diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp index 926b74288c6..f4931085fa4 100644 --- a/src/hotspot/share/opto/addnode.cpp +++ b/src/hotspot/share/opto/addnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,7 +137,7 @@ Node *AddNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for commutative operation desired if (commute(phase, this)) return this; - AddNode *progress = NULL; // Progress flag + AddNode *progress = nullptr; // Progress flag // Convert "(x+1)+2" into "x+(1+2)". If the right input is a // constant, and the left input is an add of a constant, flatten the @@ -241,7 +241,7 @@ const Type *AddNode::add_of_identity( const Type *t1, const Type *t2 ) const { if( t1->higher_equal( zero ) ) return t2; if( t2->higher_equal( zero ) ) return t1; - return NULL; + return nullptr; } AddNode* AddNode::make(Node* in1, Node* in2, BasicType bt) { @@ -253,7 +253,7 @@ AddNode* AddNode::make(Node* in1, Node* in2, BasicType bt) { default: fatal("Not implemented for %s", type2name(bt)); } - return NULL; + return nullptr; } //============================================================================= @@ -282,7 +282,7 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) { // Check for dead cycle: d = (a-b)+(c-d) assert( in1->in(2) != this && in2->in(2) != this, "dead loop in AddINode::Ideal" ); - Node* sub = SubNode::make(NULL, NULL, bt); + Node* sub = SubNode::make(nullptr, nullptr, bt); sub->init_req(1, phase->transform(AddNode::make(in1->in(1), in2->in(1), bt))); sub->init_req(2, phase->transform(AddNode::make(in1->in(2), in2->in(2), bt))); return sub; @@ -313,9 +313,9 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) { // Associative if (op1 == Op_Mul(bt) && op2 == Op_Mul(bt)) { - Node* add_in1 = NULL; - Node* add_in2 = NULL; - Node* mul_in = NULL; + Node* add_in1 = nullptr; + Node* add_in2 = nullptr; + Node* mul_in = nullptr; if (in1->in(1) == in2->in(1)) { // Convert "a*b+a*c into a*(b+c) @@ -339,7 +339,7 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) { mul_in = in1->in(1); } - if (mul_in != NULL) { + if (mul_in != nullptr) { Node* add = phase->transform(AddNode::make(add_in1, add_in2, bt)); return MulNode::make(mul_in, add, bt); } @@ -348,16 +348,16 @@ Node* AddNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) { // Convert (x >>> rshift) + (x << lshift) into RotateRight(x, rshift) if (Matcher::match_rule_supported(Op_RotateRight) && ((op1 == Op_URShift(bt) && op2 == Op_LShift(bt)) || (op1 == Op_LShift(bt) && op2 == Op_URShift(bt))) && - in1->in(1) != NULL && in1->in(1) == in2->in(1)) { + in1->in(1) != nullptr && in1->in(1) == in2->in(1)) { Node* rshift = op1 == Op_URShift(bt) ? in1->in(2) : in2->in(2); Node* lshift = op1 == Op_URShift(bt) ? in2->in(2) : in1->in(2); - if (rshift != NULL && lshift != NULL) { + if (rshift != nullptr && lshift != nullptr) { const TypeInt* rshift_t = phase->type(rshift)->isa_int(); const TypeInt* lshift_t = phase->type(lshift)->isa_int(); int bits = bt == T_INT ? 32 : 64; int mask = bt == T_INT ? 0x1F : 0x3F; - if (lshift_t != NULL && lshift_t->is_con() && - rshift_t != NULL && rshift_t->is_con() && + if (lshift_t != nullptr && lshift_t->is_con() && + rshift_t != nullptr && rshift_t->is_con() && ((lshift_t->get_con() & mask) == (bits - (rshift_t->get_con() & mask)))) { return new RotateRightNode(in1->in(1), phase->intcon(rshift_t->get_con() & mask), TypeInteger::bottom(bt)); } @@ -505,7 +505,7 @@ const Type *AddFNode::add_of_identity( const Type *t1, const Type *t2 ) const { // if( t1->higher_equal( zero ) ) return t2; // if( t2->higher_equal( zero ) ) return t1; - return NULL; + return nullptr; } //------------------------------add_ring--------------------------------------- @@ -520,7 +520,7 @@ const Type *AddFNode::add_ring( const Type *t0, const Type *t1 ) const { //------------------------------Ideal------------------------------------------ Node *AddFNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Floating point additions are not associative because of boundary conditions (infinity) - return commute(phase, this) ? this : NULL; + return commute(phase, this) ? this : nullptr; } @@ -537,7 +537,7 @@ const Type *AddDNode::add_of_identity( const Type *t1, const Type *t2 ) const { // if( t1->higher_equal( zero ) ) return t2; // if( t2->higher_equal( zero ) ) return t1; - return NULL; + return nullptr; } //------------------------------add_ring--------------------------------------- // Supplied function returns the sum of the inputs. @@ -551,7 +551,7 @@ const Type *AddDNode::add_ring( const Type *t0, const Type *t1 ) const { //------------------------------Ideal------------------------------------------ Node *AddDNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Floating point additions are not associative because of boundary conditions (infinity) - return commute(phase, this) ? this : NULL; + return commute(phase, this) ? this : nullptr; } @@ -565,7 +565,7 @@ Node* AddPNode::Identity(PhaseGVN* phase) { //------------------------------Idealize--------------------------------------- Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Bail out if dead inputs - if( phase->type( in(Address) ) == Type::TOP ) return NULL; + if( phase->type( in(Address) ) == Type::TOP ) return nullptr; // If the left input is an add of a constant, flatten the expression tree. const Node *n = in(Address); @@ -576,12 +576,12 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { "dead loop in AddPNode::Ideal" ); // Type of left input's right input const Type *t = phase->type( addp->in(Offset) ); - if( t == Type::TOP ) return NULL; + if( t == Type::TOP ) return nullptr; const TypeX *t12 = t->is_intptr_t(); if( t12->is_con() ) { // Left input is an add of a constant? // If the right input is a constant, combine constants const Type *temp_t2 = phase->type( in(Offset) ); - if( temp_t2 == Type::TOP ) return NULL; + if( temp_t2 == Type::TOP ) return nullptr; const TypeX *t2 = temp_t2->is_intptr_t(); Node* address; Node* offset; @@ -602,7 +602,7 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Raw pointers? if( in(Base)->bottom_type() == Type::TOP ) { - // If this is a NULL+long form (from unsafe accesses), switch to a rawptr. + // If this is a null+long form (from unsafe accesses), switch to a rawptr. if (phase->type(in(Address)) == TypePtr::NULL_PTR) { Node* offset = in(Offset); return new CastX2PNode(offset); @@ -623,13 +623,13 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; // No progress + return nullptr; // No progress } //------------------------------bottom_type------------------------------------ // Bottom-type is the pointer-type with unknown offset. const Type *AddPNode::bottom_type() const { - if (in(Address) == NULL) return TypePtr::BOTTOM; + if (in(Address) == nullptr) return TypePtr::BOTTOM; const TypePtr *tp = in(Address)->bottom_type()->isa_ptr(); if( !tp ) return Type::TOP; // TOP input means TOP output assert( in(Offset)->Opcode() != Op_ConP, "" ); @@ -667,7 +667,7 @@ const Type* AddPNode::Value(PhaseGVN* phase) const { //------------------------Ideal_base_and_offset-------------------------------- // Split an oop pointer into a base and offset. // (The offset might be Type::OffsetBot in the case of an array.) -// Return the base, or NULL if failure. +// Return the base, or null if failure. Node* AddPNode::Ideal_base_and_offset(Node* ptr, PhaseTransform* phase, // second return value: intptr_t& offset) { @@ -683,7 +683,7 @@ Node* AddPNode::Ideal_base_and_offset(Node* ptr, PhaseTransform* phase, } } offset = Type::OffsetBot; - return NULL; + return nullptr; } //------------------------------unpack_offsets---------------------------------- @@ -733,20 +733,20 @@ Node* rotate_shift(PhaseGVN* phase, Node* lshift, Node* rshift, int mask) { // val << norm_con_shift | val >> ({32|64} - norm_con_shift) => rotate_left val, norm_con_shift const TypeInt* lshift_t = phase->type(lshift)->isa_int(); const TypeInt* rshift_t = phase->type(rshift)->isa_int(); - if (lshift_t != NULL && lshift_t->is_con() && - rshift_t != NULL && rshift_t->is_con() && + if (lshift_t != nullptr && lshift_t->is_con() && + rshift_t != nullptr && rshift_t->is_con() && ((lshift_t->get_con() & mask) == ((mask + 1) - (rshift_t->get_con() & mask)))) { return phase->intcon(lshift_t->get_con() & mask); } // val << var_shift | val >> ({0|32|64} - var_shift) => rotate_left val, var_shift if (rshift->Opcode() == Op_SubI && rshift->in(2) == lshift && rshift->in(1)->is_Con()){ const TypeInt* shift_t = phase->type(rshift->in(1))->isa_int(); - if (shift_t != NULL && shift_t->is_con() && + if (shift_t != nullptr && shift_t->is_con() && (shift_t->get_con() == 0 || shift_t->get_con() == (mask + 1))) { return lshift; } } - return NULL; + return nullptr; } Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) { @@ -757,21 +757,21 @@ Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* lshift = in(1)->in(2); Node* rshift = in(2)->in(2); Node* shift = rotate_shift(phase, lshift, rshift, 0x1F); - if (shift != NULL) { + if (shift != nullptr) { return new RotateLeftNode(in(1)->in(1), shift, TypeInt::INT); } - return NULL; + return nullptr; } if (Matcher::match_rule_supported(Op_RotateRight) && lopcode == Op_URShiftI && ropcode == Op_LShiftI && in(1)->in(1) == in(2)->in(1)) { Node* rshift = in(1)->in(2); Node* lshift = in(2)->in(2); Node* shift = rotate_shift(phase, rshift, lshift, 0x1F); - if (shift != NULL) { + if (shift != nullptr) { return new RotateRightNode(in(1)->in(1), shift, TypeInt::INT); } } - return NULL; + return nullptr; } //------------------------------add_ring--------------------------------------- @@ -823,21 +823,21 @@ Node* OrLNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* lshift = in(1)->in(2); Node* rshift = in(2)->in(2); Node* shift = rotate_shift(phase, lshift, rshift, 0x3F); - if (shift != NULL) { + if (shift != nullptr) { return new RotateLeftNode(in(1)->in(1), shift, TypeLong::LONG); } - return NULL; + return nullptr; } if (Matcher::match_rule_supported(Op_RotateRight) && lopcode == Op_URShiftL && ropcode == Op_LShiftL && in(1)->in(1) == in(2)->in(1)) { Node* rshift = in(1)->in(2); Node* lshift = in(2)->in(2); Node* shift = rotate_shift(phase, rshift, lshift, 0x3F); - if (shift != NULL) { + if (shift != nullptr) { return new RotateRightNode(in(1)->in(1), shift, TypeLong::LONG); } } - return NULL; + return nullptr; } //------------------------------add_ring--------------------------------------- @@ -1005,16 +1005,16 @@ const Type* XorLNode::Value(PhaseGVN* phase) const { Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, const Type* t, PhaseGVN& gvn) { bool is_int = gvn.type(a)->isa_int(); assert(is_int || gvn.type(a)->isa_long(), "int or long inputs"); - assert(is_int == (gvn.type(b)->isa_int() != NULL), "inconsistent inputs"); + assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs"); BasicType bt = is_int ? T_INT: T_LONG; - Node* hook = NULL; + Node* hook = nullptr; if (gvn.is_IterGVN()) { // Make sure a and b are not destroyed hook = new Node(2); hook->init_req(0, a); hook->init_req(1, b); } - Node* res = NULL; + Node* res = nullptr; if (is_int && !is_unsigned) { if (is_max) { res = gvn.transform(new MaxINode(a, b)); @@ -1024,16 +1024,16 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co assert(gvn.type(res)->is_int()->_lo >= t->is_int()->_lo && gvn.type(res)->is_int()->_hi <= t->is_int()->_hi, "type doesn't match"); } } else { - Node* cmp = NULL; + Node* cmp = nullptr; if (is_max) { cmp = gvn.transform(CmpNode::make(a, b, bt, is_unsigned)); } else { cmp = gvn.transform(CmpNode::make(b, a, bt, is_unsigned)); } Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::lt)); - res = gvn.transform(CMoveNode::make(NULL, bol, a, b, t)); + res = gvn.transform(CMoveNode::make(nullptr, bol, a, b, t)); } - if (hook != NULL) { + if (hook != nullptr) { hook->destruct(&gvn); } return res; @@ -1042,17 +1042,17 @@ Node* MaxNode::build_min_max(Node* a, Node* b, bool is_max, bool is_unsigned, co Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const Type* t, PhaseGVN& gvn) { bool is_int = gvn.type(a)->isa_int(); assert(is_int || gvn.type(a)->isa_long(), "int or long inputs"); - assert(is_int == (gvn.type(b)->isa_int() != NULL), "inconsistent inputs"); + assert(is_int == (gvn.type(b)->isa_int() != nullptr), "inconsistent inputs"); BasicType bt = is_int ? T_INT: T_LONG; Node* zero = gvn.integercon(0, bt); - Node* hook = NULL; + Node* hook = nullptr; if (gvn.is_IterGVN()) { // Make sure a and b are not destroyed hook = new Node(2); hook->init_req(0, a); hook->init_req(1, b); } - Node* cmp = NULL; + Node* cmp = nullptr; if (is_max) { cmp = gvn.transform(CmpNode::make(a, b, bt, false)); } else { @@ -1060,8 +1060,8 @@ Node* MaxNode::build_min_max_diff_with_zero(Node* a, Node* b, bool is_max, const } Node* sub = gvn.transform(SubNode::make(a, b, bt)); Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::lt)); - Node* res = gvn.transform(CMoveNode::make(NULL, bol, sub, zero, t)); - if (hook != NULL) { + Node* res = gvn.transform(CMoveNode::make(nullptr, bol, sub, zero, t)); + if (hook != nullptr) { hook->destruct(&gvn); } return res; @@ -1108,7 +1108,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { if (x->Opcode() == Op_AddI && // Check for "x+c0" and collect constant x->in(2)->is_Con()) { const Type* t = x->in(2)->bottom_type(); - if (t == Type::TOP) return NULL; // No progress + if (t == Type::TOP) return nullptr; // No progress x_off = t->is_int()->get_con(); x = x->in(1); } @@ -1120,7 +1120,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { if (y->Opcode() == Op_AddI && // Check for "y+c1" and collect constant y->in(2)->is_Con()) { const Type* t = y->in(2)->bottom_type(); - if (t == Type::TOP) return NULL; // No progress + if (t == Type::TOP) return nullptr; // No progress y_off = t->is_int()->get_con(); y = y->in(1); } @@ -1138,7 +1138,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { if (y->Opcode() == Op_AddI &&// Check for "y+c1" and collect constant y->in(2)->is_Con()) { const Type* t = y->in(2)->bottom_type(); - if (t == Type::TOP) return NULL; // No progress + if (t == Type::TOP) return nullptr; // No progress y_off = t->is_int()->get_con(); y = y->in(1); } @@ -1148,7 +1148,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { // Transform MAX2(x + c0, MAX2(x + c1, z)) into MAX2(x + MAX2(c0, c1), z) // if x == y and the additions can't overflow. - if (x == y && tx != NULL && + if (x == y && tx != nullptr && !can_overflow(tx, x_off) && !can_overflow(tx, y_off)) { return new MaxINode(phase->transform(new AddINode(x, phase->intcon(MAX2(x_off, y_off)))), r->in(2)); @@ -1156,13 +1156,13 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { } else { // Transform MAX2(x + c0, y + c1) into x + MAX2(c0, c1) // if x == y and the additions can't overflow. - if (x == y && tx != NULL && + if (x == y && tx != nullptr && !can_overflow(tx, x_off) && !can_overflow(tx, y_off)) { return new AddINode(x, phase->intcon(MAX2(x_off, y_off))); } } - return NULL; + return nullptr; } //============================================================================= @@ -1170,7 +1170,7 @@ Node* MaxINode::Ideal(PhaseGVN* phase, bool can_reshape) { // MINs show up in range-check loop limit calculations. Look for // "MIN2(x+c0,MIN2(y,x+c1))". Pick the smaller constant: "MIN2(x+c0,y)" Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { - Node *progress = NULL; + Node *progress = nullptr; // Force a right-spline graph Node *l = in(1); Node *r = in(2); @@ -1191,7 +1191,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { if( x->Opcode() == Op_AddI && // Check for "x+c0" and collect constant x->in(2)->is_Con() ) { const Type *t = x->in(2)->bottom_type(); - if( t == Type::TOP ) return NULL; // No progress + if( t == Type::TOP ) return nullptr; // No progress x_off = t->is_int()->get_con(); x = x->in(1); } @@ -1203,7 +1203,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { if( y->Opcode() == Op_AddI && // Check for "y+c1" and collect constant y->in(2)->is_Con() ) { const Type *t = y->in(2)->bottom_type(); - if( t == Type::TOP ) return NULL; // No progress + if( t == Type::TOP ) return nullptr; // No progress y_off = t->is_int()->get_con(); y = y->in(1); } @@ -1221,7 +1221,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { if( y->Opcode() == Op_AddI &&// Check for "y+c1" and collect constant y->in(2)->is_Con() ) { const Type *t = y->in(2)->bottom_type(); - if( t == Type::TOP ) return NULL; // No progress + if( t == Type::TOP ) return nullptr; // No progress y_off = t->is_int()->get_con(); y = y->in(1); } @@ -1231,7 +1231,7 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Transform MIN2(x + c0, MIN2(x + c1, z)) into MIN2(x + MIN2(c0, c1), z) // if x == y and the additions can't overflow. - if (x == y && tx != NULL && + if (x == y && tx != nullptr && !can_overflow(tx, x_off) && !can_overflow(tx, y_off)) { return new MinINode(phase->transform(new AddINode(x, phase->intcon(MIN2(x_off, y_off)))), r->in(2)); @@ -1239,13 +1239,13 @@ Node *MinINode::Ideal(PhaseGVN *phase, bool can_reshape) { } else { // Transform MIN2(x + c0, y + c1) into x + MIN2(c0, c1) // if x == y and the additions can't overflow. - if (x == y && tx != NULL && + if (x == y && tx != nullptr && !can_overflow(tx, x_off) && !can_overflow(tx, y_off)) { return new AddINode(x,phase->intcon(MIN2(x_off,y_off))); } } - return NULL; + return nullptr; } //------------------------------add_ring--------------------------------------- diff --git a/src/hotspot/share/opto/arraycopynode.cpp b/src/hotspot/share/opto/arraycopynode.cpp index cd859b2b847..b1958f2b33a 100644 --- a/src/hotspot/share/opto/arraycopynode.cpp +++ b/src/hotspot/share/opto/arraycopynode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ #include "utilities/powerOfTwo.hpp" ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard) - : CallNode(arraycopy_type(), NULL, TypePtr::BOTTOM), + : CallNode(arraycopy_type(), nullptr, TypePtr::BOTTOM), _kind(None), _alloc_tightly_coupled(alloc_tightly_coupled), _has_negative_length_guard(has_negative_length_guard), @@ -131,7 +131,7 @@ int ArrayCopyNode::get_count(PhaseGVN *phase) const { return nb_fields; } else { const TypeAryPtr* ary_src = src_type->isa_aryptr(); - assert (ary_src != NULL, "not an array or instance?"); + assert (ary_src != nullptr, "not an array or instance?"); // clone passes a length as a rounded number of longs. If we're // cloning an array we'll do it element by element. If the // length input to ArrayCopyNode is constant, length of input @@ -174,7 +174,7 @@ void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMe Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) { if (!is_clonebasic()) { - return NULL; + return nullptr; } Node* base_src = in(ArrayCopyNode::Src); @@ -184,8 +184,8 @@ Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int c const Type* src_type = phase->type(base_src); const TypeInstPtr* inst_src = src_type->isa_instptr(); - if (inst_src == NULL) { - return NULL; + if (inst_src == nullptr) { + return nullptr; } MergeMemNode* mem = phase->transform(MergeMemNode::make(in_mem))->as_MergeMem(); @@ -264,8 +264,8 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, // newly allocated object is guaranteed to not overlap with source object disjoint_bases = is_alloc_tightly_coupled(); - if (ary_src == NULL || ary_src->elem() == Type::BOTTOM || - ary_dest == NULL || ary_dest->elem() == Type::BOTTOM) { + if (ary_src == nullptr || ary_src->elem() == Type::BOTTOM || + ary_dest == nullptr || ary_dest->elem() == Type::BOTTOM) { // We don't know if arguments are arrays return false; } @@ -324,7 +324,7 @@ bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape, copy_type = dest_elem; } else { - assert(ary_src != NULL, "should be a clone"); + assert(ary_src != nullptr, "should be a clone"); assert(is_clonebasic(), "should be"); disjoint_bases = true; @@ -372,7 +372,7 @@ void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, b if (!disjoint_bases && count > 1) { Node* src_offset = in(ArrayCopyNode::SrcPos); Node* dest_offset = in(ArrayCopyNode::DestPos); - assert(src_offset != NULL && dest_offset != NULL, "should be"); + assert(src_offset != nullptr && dest_offset != nullptr, "should be"); Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset)); Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt)); IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN); @@ -489,13 +489,13 @@ bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape, CallProjections callprojs; extract_projections(&callprojs, true, false); - if (callprojs.fallthrough_ioproj != NULL) { + if (callprojs.fallthrough_ioproj != nullptr) { igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O)); } - if (callprojs.fallthrough_memproj != NULL) { + if (callprojs.fallthrough_memproj != nullptr) { igvn->replace_node(callprojs.fallthrough_memproj, mem); } - if (callprojs.fallthrough_catchproj != NULL) { + if (callprojs.fallthrough_catchproj != nullptr) { igvn->replace_node(callprojs.fallthrough_catchproj, ctl); } @@ -525,7 +525,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (StressArrayCopyMacroNode && !can_reshape) { phase->record_for_igvn(this); - return NULL; + return nullptr; } // See if it's a small array copy and we can inline it as @@ -537,51 +537,51 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (!is_clonebasic() && !is_arraycopy_validated() && !is_copyofrange_validated() && !is_copyof_validated()) { - return NULL; + return nullptr; } - assert(in(TypeFunc::Control) != NULL && - in(TypeFunc::Memory) != NULL && - in(ArrayCopyNode::Src) != NULL && - in(ArrayCopyNode::Dest) != NULL && - in(ArrayCopyNode::Length) != NULL && - in(ArrayCopyNode::SrcPos) != NULL && - in(ArrayCopyNode::DestPos) != NULL, "broken inputs"); + assert(in(TypeFunc::Control) != nullptr && + in(TypeFunc::Memory) != nullptr && + in(ArrayCopyNode::Src) != nullptr && + in(ArrayCopyNode::Dest) != nullptr && + in(ArrayCopyNode::Length) != nullptr && + in(ArrayCopyNode::SrcPos) != nullptr && + in(ArrayCopyNode::DestPos) != nullptr, "broken inputs"); if (in(TypeFunc::Control)->is_top() || in(TypeFunc::Memory)->is_top() || phase->type(in(ArrayCopyNode::Src)) == Type::TOP || phase->type(in(ArrayCopyNode::Dest)) == Type::TOP || - (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) || - (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) { - return NULL; + (in(ArrayCopyNode::SrcPos) != nullptr && in(ArrayCopyNode::SrcPos)->is_top()) || + (in(ArrayCopyNode::DestPos) != nullptr && in(ArrayCopyNode::DestPos)->is_top())) { + return nullptr; } int count = get_count(phase); if (count < 0 || count > ArrayCopyLoadStoreMaxElem) { - return NULL; + return nullptr; } Node* mem = try_clone_instance(phase, can_reshape, count); - if (mem != NULL) { - return (mem == NodeSentinel) ? NULL : mem; + if (mem != nullptr) { + return (mem == NodeSentinel) ? nullptr : mem; } - Node* adr_src = NULL; - Node* base_src = NULL; - Node* adr_dest = NULL; - Node* base_dest = NULL; + Node* adr_src = nullptr; + Node* base_src = nullptr; + Node* adr_dest = nullptr; + Node* base_dest = nullptr; BasicType copy_type = T_ILLEGAL; - const Type* value_type = NULL; + const Type* value_type = nullptr; bool disjoint_bases = false; if (!prepare_array_copy(phase, can_reshape, adr_src, base_src, adr_dest, base_dest, copy_type, value_type, disjoint_bases)) { - assert(adr_src == NULL, "no node can be left behind"); - assert(adr_dest == NULL, "no node can be left behind"); - return NULL; + assert(adr_src == nullptr, "no node can be left behind"); + assert(adr_dest == nullptr, "no node can be left behind"); + return nullptr; } Node* src = in(ArrayCopyNode::Src); @@ -611,7 +611,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { adr_src, base_src, adr_dest, base_dest, copy_type, value_type, count); - Node* ctl = NULL; + Node* ctl = nullptr; if (!forward_ctl->is_top() && !backward_ctl->is_top()) { ctl = new RegionNode(3); ctl->init_req(1, forward_ctl); @@ -648,7 +648,7 @@ Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) { // put in worklist, so that if it happens to be dead it is removed phase->is_IterGVN()->_worklist.push(mem); } - return NULL; + return nullptr; } return mem; @@ -673,7 +673,7 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { } bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) { - if (n != NULL && + if (n != nullptr && n->is_Call() && n->as_Call()->may_modify(t_oop, phase) && (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) { @@ -691,11 +691,11 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTra // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off c = bs->step_over_gc_barrier(c); - CallNode* call = NULL; - guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to."); + CallNode* call = nullptr; + guarantee(c != nullptr, "step_over_gc_barrier failed, there must be something to step to."); if (c->is_Region()) { for (uint i = 1; i < c->req(); i++) { - if (c->in(i) != NULL) { + if (c->in(i) != nullptr) { Node* n = c->in(i)->in(0); if (may_modify_helper(t_oop, n, phase, call)) { ac = call->isa_ArrayCopy(); @@ -709,7 +709,7 @@ bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTra #ifdef ASSERT bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && static_cast(bs)->use_ReduceInitialCardMarks(); - assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); + assert(c == mb->in(0) || (ac != nullptr && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone"); #endif return true; } else if (mb->trailing_partial_array_copy()) { @@ -736,7 +736,7 @@ bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransf const TypeInt *len_t = phase->type(len)->isa_int(); const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr(); - if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) { + if (dest_pos_t == nullptr || len_t == nullptr || ary_t == nullptr) { return !must_modify; } diff --git a/src/hotspot/share/opto/arraycopynode.hpp b/src/hotspot/share/opto/arraycopynode.hpp index ba04e43970f..3b4c4930cb0 100644 --- a/src/hotspot/share/opto/arraycopynode.hpp +++ b/src/hotspot/share/opto/arraycopynode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -142,8 +142,8 @@ public: Node* length, bool alloc_tightly_coupled, bool has_negative_length_guard, - Node* src_klass = NULL, Node* dest_klass = NULL, - Node* src_length = NULL, Node* dest_length = NULL); + Node* src_klass = nullptr, Node* dest_klass = nullptr, + Node* src_length = nullptr, Node* dest_length = nullptr); void connect_outputs(GraphKit* kit, bool deoptimize_on_exception = false); diff --git a/src/hotspot/share/opto/block.cpp b/src/hotspot/share/opto/block.cpp index f729f93b03b..7f2f5167a67 100644 --- a/src/hotspot/share/opto/block.cpp +++ b/src/hotspot/share/opto/block.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ void Block_Array::grow( uint i ) { if( !_size ) { _size = 1; _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); - _blocks[0] = NULL; + _blocks[0] = nullptr; } uint old = _size; _size = next_power_of_2(i); @@ -313,7 +313,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { st->print("in( "); for (uint i=1; iget_block_for_node(s); p->dump_pred(cfg, p, st); } else { @@ -332,7 +332,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { const Block *bhead = this; // Head of self-loop Node *bh = bhead->head(); - if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) { + if ((cfg != nullptr) && bh->is_Loop() && !head()->is_Root()) { LoopNode *loop = bh->as_Loop(); const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl)); while (bx->is_connector()) { @@ -359,7 +359,7 @@ void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const { } void Block::dump() const { - dump(NULL); + dump(nullptr); } void Block::dump(const PhaseCFG* cfg) const { @@ -375,11 +375,11 @@ PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) : Phase(CFG) , _root(root) , _block_arena(arena) -, _regalloc(NULL) +, _regalloc(nullptr) , _scheduling_for_pressure(false) , _matcher(matcher) , _node_to_block_mapping(arena) -, _node_latency(NULL) +, _node_latency(nullptr) #ifndef PRODUCT , _trace_opto_pipelining(C->directive()->TraceOptoPipeliningOption) #endif @@ -391,10 +391,10 @@ PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher) // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, // then Match it into a machine-specific Node. Then clone the machine // Node on demand. - Node *x = new GotoNode(NULL); + Node *x = new GotoNode(nullptr); x->init_req(0, x); _goto = matcher.match_tree(x); - assert(_goto != NULL, ""); + assert(_goto != nullptr, ""); _goto->set_req(0,_goto); // Build the CFG in Reverse Post Order @@ -427,7 +427,7 @@ uint PhaseCFG::build_cfg() { const Node *x = proj->is_block_proj(); // Does the block end with a proper block-ending Node? One of Return, // If or Goto? (This check should be done for visited nodes also). - if (x == NULL) { // Does not end right... + if (x == nullptr) { // Does not end right... Node *g = _goto->clone(); // Force it to end in a Goto g->set_req(0, proj); np->set_req(idx, g); @@ -661,7 +661,7 @@ void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { // Helper function to move block bx to the slot following b_index. Return // true if the move is successful, otherwise false bool PhaseCFG::move_to_next(Block* bx, uint b_index) { - if (bx == NULL) return false; + if (bx == nullptr) return false; // Return false if bx is already scheduled. uint bx_index = bx->_pre_order; @@ -848,7 +848,7 @@ void PhaseCFG::fixup_flow() { } assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors"); - Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL; + Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : nullptr; Block* bs0 = block->non_connector_successor(0); // Check for multi-way branches where I cannot negate the test to @@ -1204,7 +1204,7 @@ void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { uint index = b->find_node(n); // Insert new nodes into block and map them in nodes->blocks array // and remember last node in n2. - Node *n2 = NULL; + Node *n2 = nullptr; for (int k = 0; k < new_nodes.length(); ++k) { n2 = new_nodes.at(k); b->insert_node(n2, ++index); @@ -1233,7 +1233,7 @@ void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) { assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), ""); } } - // If anything has been inserted (n2 != NULL), continue after last node inserted. + // If anything has been inserted (n2 != nullptr), continue after last node inserted. // This does not always work. Some postalloc expands don't insert any nodes, if they // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly. j = n2 ? b->find_node(n2) : j; @@ -1292,7 +1292,7 @@ void PhaseCFG::dump( ) const { void PhaseCFG::dump_headers() { for (uint i = 0; i < number_of_blocks(); i++) { Block* block = get_block(i); - if (block != NULL) { + if (block != nullptr) { block->dump_head(this); } } @@ -1312,7 +1312,7 @@ void PhaseCFG::verify_memory_writer_placement(const Block* b, const Node* n) con break; } home_or_ancestor = home_or_ancestor->parent(); - } while (home_or_ancestor != NULL); + } while (home_or_ancestor != nullptr); assert(found, "block b is not in n's home loop or an ancestor of it"); } @@ -1365,7 +1365,7 @@ void PhaseCFG::verify() const { // when CreateEx node is moved in build_ifg_physical(). if (def_block == block && !(block->head()->is_Loop() && n->is_Phi()) && // See (+++) comment in reg_split.cpp - !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) { + !(n->jvms() != nullptr && n->jvms()->is_monitor_use(k))) { bool is_loop = false; if (n->is_Phi()) { for (uint l = 1; l < def->req(); l++) { @@ -1389,7 +1389,7 @@ void PhaseCFG::verify() const { assert(j >= 1, "a projection cannot be the first instruction in a block"); Node* pred = block->get_node(j - 1); Node* parent = n->in(0); - assert(parent != NULL, "projections must have a parent"); + assert(parent != nullptr, "projections must have a parent"); assert(pred == parent || (pred->is_Proj() && pred->in(0) == parent), "projections must follow their parents or other sibling projections"); } @@ -1483,7 +1483,7 @@ void UnionFind::Union( uint idx1, uint idx2 ) { #ifndef PRODUCT void Trace::dump( ) const { tty->print_cr("Trace (freq %f)", first_block()->_freq); - for (Block *b = first_block(); b != NULL; b = next(b)) { + for (Block *b = first_block(); b != nullptr; b = next(b)) { tty->print(" B%d", b->_pre_order); if (b->head()->is_Loop()) { tty->print(" (L%d)", b->compute_loop_alignment()); @@ -1561,7 +1561,7 @@ extern "C" int trace_frequency_order(const void *p0, const void *p1) { void PhaseBlockLayout::find_edges() { // Walk the blocks, creating edges and Traces uint i; - Trace *tr = NULL; + Trace *tr = nullptr; for (i = 0; i < _cfg.number_of_blocks(); i++) { Block* b = _cfg.get_block(i); tr = new Trace(b, next, prev); @@ -1590,7 +1590,7 @@ void PhaseBlockLayout::find_edges() { assert(n == _cfg.get_block(i), "expecting next block"); tr->append(n); uf->map(n->_pre_order, tr->id()); - traces[n->_pre_order] = NULL; + traces[n->_pre_order] = nullptr; nfallthru = b->num_fall_throughs(); b = n; } @@ -1616,7 +1616,7 @@ void PhaseBlockLayout::find_edges() { assert(b->is_connector(), "connector blocks at the end"); tr->append(b); uf->map(b->_pre_order, tr->id()); - traces[b->_pre_order] = NULL; + traces[b->_pre_order] = nullptr; } } @@ -1642,7 +1642,7 @@ void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) { // Union the lower with the higher and remove the pointer // to the higher. uf->Union(lo_id, hi_id); - traces[hi_id] = NULL; + traces[hi_id] = nullptr; } // Append traces together via the most frequently executed edges @@ -1772,7 +1772,7 @@ void PhaseBlockLayout::reorder_traces(int count) { // Compact the traces. for (int i = 0; i < count; i++) { Trace *tr = traces[i]; - if (tr != NULL) { + if (tr != nullptr) { new_traces[new_count++] = tr; } } @@ -1788,9 +1788,9 @@ void PhaseBlockLayout::reorder_traces(int count) { _cfg.clear_blocks(); for (int i = 0; i < new_count; i++) { Trace *tr = new_traces[i]; - if (tr != NULL) { + if (tr != nullptr) { // push blocks onto the CFG list - for (Block* b = tr->first_block(); b != NULL; b = tr->next(b)) { + for (Block* b = tr->first_block(); b != nullptr; b = tr->next(b)) { _cfg.add_block(b); } } @@ -1855,13 +1855,13 @@ bool Trace::backedge(CFGEdge *e) { // Find the last block in the trace that has a conditional // branch. Block *b; - for (b = last_block(); b != NULL; b = prev(b)) { + for (b = last_block(); b != nullptr; b = prev(b)) { if (b->num_fall_throughs() == 2) { break; } } - if (b != last_block() && b != NULL) { + if (b != last_block() && b != nullptr) { loop_rotated = true; // Rotate the loop by doing two-part linked-list surgery. @@ -1873,7 +1873,7 @@ bool Trace::backedge(CFGEdge *e) { // Backbranch to the top of a trace // Scroll forward through the trace from the targ_block. If we find // a loop head before another loop top, use the loop head alignment. - for (Block *b = targ_block; b != NULL; b = next(b)) { + for (Block *b = targ_block; b != nullptr; b = next(b)) { if (b->has_loop_alignment()) { break; } diff --git a/src/hotspot/share/opto/block.hpp b/src/hotspot/share/opto/block.hpp index 93f8a9447d3..2a08328b95c 100644 --- a/src/hotspot/share/opto/block.hpp +++ b/src/hotspot/share/opto/block.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ struct Tarjan; //------------------------------Block_Array------------------------------------ // Map dense integer indices to Blocks. Uses classic doubling-array trick. -// Abstractly provides an infinite array of Block*'s, initialized to NULL. +// Abstractly provides an infinite array of Block*'s, initialized to null. // Note that the constructor just zeros things, and since I use Arena // allocation I do not need a destructor to reclaim storage. class Block_Array : public ArenaObj { @@ -60,11 +60,11 @@ public: debug_only(_limit=0); _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); for( int i = 0; i < OptoBlockListSize; i++ ) { - _blocks[i] = NULL; + _blocks[i] = nullptr; } } - Block *lookup( uint i ) const // Lookup, or NULL for not mapped - { return (i_idx, NULL); + _node_to_block_mapping.map(node->_idx, nullptr); } // get the block in which this node resides @@ -588,7 +588,7 @@ class PhaseCFG : public Phase { // does this node reside in a block; return true bool has_block(const Node* node) const { - return (_node_to_block_mapping.lookup(node->_idx) != NULL); + return (_node_to_block_mapping.lookup(node->_idx) != nullptr); } // Use frequency calculations and code shape to predict if the block @@ -691,7 +691,7 @@ protected: Block* _target; // block target double _prob; // probability of edge to block public: - BlockProbPair() : _target(NULL), _prob(0.0) {} + BlockProbPair() : _target(nullptr), _prob(0.0) {} BlockProbPair(Block* b, double p) : _target(b), _prob(p) {} Block* get_target() const { return _target; } @@ -716,9 +716,9 @@ class CFGLoop : public CFGElement { CFGElement(), _id(id), _depth(0), - _parent(NULL), - _sibling(NULL), - _child(NULL), + _parent(nullptr), + _sibling(nullptr), + _child(nullptr), _exit_prob(1.0f) {} CFGLoop* parent() { return _parent; } void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg); @@ -731,7 +731,7 @@ class CFGLoop : public CFGElement { assert(hd->head()->is_Loop(), "must begin with loop head node"); return hd; } - Block* backedge_block(); // Return the block on the backedge of the loop (else NULL) + Block* backedge_block(); // Return the block on the backedge of the loop (else null) void compute_loop_depth(int depth); void compute_freq(); // compute frequency with loop assuming head freq 1.0f void scale_freq(); // scale frequency by loop trip count (including outer loops) @@ -817,8 +817,8 @@ class Trace : public ResourceObj { void break_loop_after(Block *b) { _last = b; _first = next(b); - set_prev(_first, NULL); - set_next(_last, NULL); + set_prev(_first, nullptr); + set_next(_last, nullptr); } public: @@ -829,8 +829,8 @@ class Trace : public ResourceObj { _prev_list(prev_list), _first(b), _last(b) { - set_next(b, NULL); - set_prev(b, NULL); + set_next(b, nullptr); + set_prev(b, nullptr); }; // Return the id number @@ -849,7 +849,7 @@ class Trace : public ResourceObj { // Insert a trace in the middle of this one after b void insert_after(Block *b, Trace *tr) { set_next(tr->last_block(), next(b)); - if (next(b) != NULL) { + if (next(b) != nullptr) { set_prev(next(b), tr->last_block()); } @@ -863,7 +863,7 @@ class Trace : public ResourceObj { void insert_before(Block *b, Trace *tr) { Block *p = prev(b); - assert(p != NULL, "use append instead"); + assert(p != nullptr, "use append instead"); insert_after(p, tr); } diff --git a/src/hotspot/share/opto/buildOopMap.cpp b/src/hotspot/share/opto/buildOopMap.cpp index 9e7904d36b6..e51fa586c08 100644 --- a/src/hotspot/share/opto/buildOopMap.cpp +++ b/src/hotspot/share/opto/buildOopMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,7 +71,7 @@ // an array of structs, but the struct-of-arrays is generally a little more // efficient). The arrays are indexed by register number (including // stack-slots as registers) and so is bounded by 200 to 300 elements in -// practice. One array will map to a reaching def Node (or NULL for +// practice. One array will map to a reaching def Node (or null for // conflict/dead). The other array will map to a callee-saved register or // OptoReg::Bad for not-callee-saved. @@ -80,16 +80,16 @@ struct OopFlow : public ArenaObj { short *_callees; // Array mapping register to callee-saved Node **_defs; // array mapping register to reaching def - // or NULL if dead/conflict + // or null if dead/conflict // OopFlow structs, when not being actively modified, describe the _end_ of // this block. Block *_b; // Block for this struct OopFlow *_next; // Next free OopFlow - // or NULL if dead/conflict + // or null if dead/conflict Compile* C; OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs), - _b(NULL), _next(NULL), C(c) { } + _b(nullptr), _next(nullptr), C(c) { } // Given reaching-defs for this block start, compute it for this block end void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ); @@ -166,19 +166,19 @@ void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehas // Merge the given flow into the 'this' flow void OopFlow::merge( OopFlow *flow, int max_reg ) { - assert( _b == NULL, "merging into a happy flow" ); + assert( _b == nullptr, "merging into a happy flow" ); assert( flow->_b, "this flow is still alive" ); assert( flow != this, "no self flow" ); // Do the merge. If there are any differences, drop to 'bottom' which - // is OptoReg::Bad or NULL depending. + // is OptoReg::Bad or null depending. for( int i=0; i_callees[i] ) _callees[i] = OptoReg::Bad; // Merge the reaching defs if( _defs[i] != flow->_defs[i] ) - _defs[i] = NULL; + _defs[i] = nullptr; } } @@ -214,7 +214,7 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i memset(dup_check,0,OptoReg::stack0()) ); OopMap *omap = new OopMap( framesize, max_inarg_slot ); - MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : NULL; + MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : nullptr; JVMState* jvms = n->jvms(); // For all registers do... @@ -468,7 +468,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first); if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second); - MachNode *m = n->is_Mach() ? n->as_Mach() : NULL; + MachNode *m = n->is_Mach() ? n->as_Mach() : nullptr; // Check if m is potentially a CISC alternate instruction (i.e, possibly // synthesized by RegAlloc from a conventional instruction and a @@ -494,7 +494,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work // for this stack location, and set the appropriate bit in the // live vector 4987749. if (is_cisc_alternate && def == fp) { - const TypePtr *adr_type = NULL; + const TypePtr *adr_type = nullptr; intptr_t offset; const Node* base = m->get_base_and_disp(offset, adr_type); if (base == NodeSentinel) { @@ -505,7 +505,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input"); } else if (base != fp || offset == Type::OffsetBot) { // Do nothing: the fp operand is either not from a memory use - // (base == NULL) OR the fp is used in a non-memory context + // (base == nullptr) OR the fp is used in a non-memory context // (base is some other register) OR the offset is not constant, // so it is not a stack slot. } else { @@ -561,7 +561,7 @@ static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* work Block* block = cfg->get_block(i); uint j; for (j = 1; j < block->number_of_nodes(); j++) { - if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) { + if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == nullptr) { break; } } @@ -596,12 +596,12 @@ void PhaseOutput::BuildOopMaps() { Block_List worklist; // Worklist of pending blocks int max_reg_ints = align_up(max_reg, BitsPerInt)>>LogBitsPerInt; - Dict *safehash = NULL; // Used for assert only + Dict *safehash = nullptr; // Used for assert only // Compute a backwards liveness per register. Needs a bitarray of // #blocks x (#registers, rounded up to ints) safehash = new Dict(cmpkey,hashkey,A); do_liveness( C->regalloc(), C->cfg(), &worklist, max_reg_ints, A, safehash ); - OopFlow *free_list = NULL; // Free, unused + OopFlow *free_list = nullptr; // Free, unused // Array mapping blocks to completed oopflows OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, C->cfg()->number_of_blocks()); @@ -645,7 +645,7 @@ void PhaseOutput::BuildOopMaps() { // If this block has a visited predecessor AND that predecessor has this // last block as his only undone child, we can move the OopFlow from the // pred to this block. Otherwise we have to grab a new OopFlow. - OopFlow *flow = NULL; // Flag for finding optimized flow + OopFlow *flow = nullptr; // Flag for finding optimized flow Block *pred = (Block*)((intptr_t)0xdeadbeef); // Scan this block's preds to find a done predecessor for (uint j = 1; j < b->num_preds(); j++) { @@ -679,9 +679,9 @@ void PhaseOutput::BuildOopMaps() { if( !free_list ) free_list = OopFlow::make(A,max_reg,C); flow = free_list; - assert( flow->_b == NULL, "oopFlow is not free" ); + assert( flow->_b == nullptr, "oopFlow is not free" ); free_list = flow->_next; - flow->_next = NULL; + flow->_next = nullptr; // Copy/clone over the data flow->clone(flows[pred->_pre_order], max_reg); @@ -691,7 +691,7 @@ void PhaseOutput::BuildOopMaps() { // because after the first time they are guarded from entering // this code again. assert( flow->_b == pred, "have some prior flow" ); - flow->_b = NULL; + flow->_b = nullptr; // Now push flow forward flows[b->_pre_order] = flow;// Mark flow for this block diff --git a/src/hotspot/share/opto/bytecodeInfo.cpp b/src/hotspot/share/opto/bytecodeInfo.cpp index 461866f69d8..9a5fa2cbdf6 100644 --- a/src/hotspot/share/opto/bytecodeInfo.cpp +++ b/src/hotspot/share/opto/bytecodeInfo.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,31 +44,31 @@ InlineTree::InlineTree(Compile* c, JVMState* caller_jvms, int caller_bci, int max_inline_level) : C(c), - _caller_jvms(NULL), + _caller_jvms(nullptr), _method(callee), _late_inline(false), _caller_tree((InlineTree*) caller_tree), _count_inline_bcs(method()->code_size_for_inlining()), _max_inline_level(max_inline_level), - _subtrees(c->comp_arena(), 2, 0, NULL), - _msg(NULL) + _subtrees(c->comp_arena(), 2, 0, nullptr), + _msg(nullptr) { #ifndef PRODUCT _count_inlines = 0; _forced_inline = false; #endif - if (caller_jvms != NULL) { + if (caller_jvms != nullptr) { // Keep a private copy of the caller_jvms: _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms()); _caller_jvms->set_bci(caller_jvms->bci()); assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining"); assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); } - assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter"); + assert((caller_tree == nullptr ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter"); assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter"); // Update hierarchical counts, count_inline_bcs() and count_inlines() InlineTree *caller = (InlineTree *)caller_tree; - for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) { + for( ; caller != nullptr; caller = ((InlineTree *)(caller->caller_tree())) ) { caller->_count_inline_bcs += count_inline_bcs(); NOT_PRODUCT(caller->_count_inlines++;) } @@ -198,7 +198,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, // negative filter: should callee NOT be inlined? bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, bool& should_delay, ciCallProfile& profile) { - const char* fail_msg = NULL; + const char* fail_msg = nullptr; // First check all inlining restrictions which are required for correctness if (callee_method->is_abstract()) { @@ -221,11 +221,11 @@ bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_met } // one more inlining restriction - if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) { + if (fail_msg == nullptr && callee_method->has_unloaded_classes_in_signature()) { fail_msg = "unloaded signature classes"; } - if (fail_msg != NULL) { + if (fail_msg != nullptr) { set_msg(fail_msg); return true; } @@ -281,10 +281,10 @@ bool InlineTree::should_not_inline(ciMethod* callee_method, ciMethod* caller_met // don't inline exception code unless the top method belongs to an // exception class - if (caller_tree() != NULL && + if (caller_tree() != nullptr && callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) { const InlineTree *top = this; - while (top->caller_tree() != NULL) top = top->caller_tree(); + while (top->caller_tree() != nullptr) top = top->caller_tree(); ciInstanceKlass* k = top->method()->holder(); if (!k->is_subclass_of(C->env()->Throwable_klass())) { set_msg("exception method"); @@ -447,8 +447,8 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, } } // count callers of current method and callee - Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL; - for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) { + Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : nullptr; + for (JVMState* j = jvms->caller(); j != nullptr && j->has_method(); j = j->caller()) { if (j->method() == callee_method) { if (is_compiled_lambda_form) { // Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly @@ -487,7 +487,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, //------------------------------pass_initial_checks---------------------------- bool InlineTree::pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method) { // Check if a callee_method was suggested - if (callee_method == NULL) { + if (callee_method == nullptr) { return false; } ciInstanceKlass *callee_holder = callee_method->holder(); @@ -529,15 +529,15 @@ const char* InlineTree::check_can_parse(ciMethod* callee) { if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)"; if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)"; if (!callee->can_be_parsed()) return "cannot be parsed"; - return NULL; + return nullptr; } //------------------------------print_inlining--------------------------------- void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, ciMethod* caller_method, bool success) const { const char* inline_msg = msg(); - assert(inline_msg != NULL, "just checking"); - if (C->log() != NULL) { + assert(inline_msg != nullptr, "just checking"); + if (C->log() != nullptr) { if (success) { C->log()->inline_success(inline_msg); } else { @@ -548,10 +548,10 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, caller_bci, inline_msg); if (C->print_inlining()) { C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg); - guarantee(callee_method != NULL, "would crash in CompilerEvent::InlineEvent::post"); + guarantee(callee_method != nullptr, "would crash in CompilerEvent::InlineEvent::post"); if (Verbose) { const InlineTree *top = this; - while (top->caller_tree() != NULL) { top = top->caller_tree(); } + while (top->caller_tree() != nullptr) { top = top->caller_tree(); } //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); } } @@ -565,11 +565,11 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, bool& should_delay) { #ifdef ASSERT - assert(callee_method != NULL, "caller checks for optimized virtual!"); + assert(callee_method != nullptr, "caller checks for optimized virtual!"); // Make sure the incoming jvms has the same information content as me. // This means that we can eventually make this whole class AllStatic. - if (jvms->caller() == NULL) { - assert(_caller_jvms == NULL, "redundant instance state"); + if (jvms->caller() == nullptr) { + assert(_caller_jvms == nullptr, "redundant instance state"); } else { assert(_caller_jvms->same_calls_as(jvms->caller()), "redundant instance state"); } @@ -587,7 +587,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro // Do some parse checks. set_msg(check_can_parse(callee_method)); - if (msg() != NULL) { + if (msg() != nullptr) { print_inlining(callee_method, caller_bci, caller_method, false /* !success */); return false; } @@ -597,7 +597,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro should_delay); // out if (success) { // Inline! - if (msg() == NULL) { + if (msg() == nullptr) { set_msg("inline (hot)"); } print_inlining(callee_method, caller_bci, caller_method, true /* success */); @@ -609,7 +609,7 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro return true; } else { // Do not inline - if (msg() == NULL) { + if (msg() == nullptr) { set_msg("too cold to inline"); } print_inlining(callee_method, caller_bci, caller_method, false /* !success */ ); @@ -621,11 +621,11 @@ bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallPro InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, JVMState* caller_jvms, int caller_bci) { // Attempt inlining. InlineTree* old_ilt = callee_at(caller_bci, callee_method); - if (old_ilt != NULL) { + if (old_ilt != nullptr) { return old_ilt; } int max_inline_level_adjust = 0; - if (caller_jvms->method() != NULL) { + if (caller_jvms->method() != nullptr) { if (caller_jvms->method()->is_compiled_lambda_form()) { max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames } else if (callee_method->is_method_handle_intrinsic() || @@ -660,7 +660,7 @@ InlineTree *InlineTree::callee_at(int bci, ciMethod* callee) const { return sub; } } - return NULL; + return nullptr; } @@ -669,7 +669,7 @@ InlineTree *InlineTree::build_inline_tree_root() { Compile* C = Compile::current(); // Root of inline tree - InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, MaxInlineLevel); + InlineTree* ilt = new InlineTree(C, nullptr, C->method(), nullptr, -1, MaxInlineLevel); return ilt; } @@ -688,11 +688,11 @@ InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, assert(jvmsp->method() == iltp->method(), "tree still in sync"); ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method(); InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee); - if (sub == NULL) { + if (sub == nullptr) { if (d == depth) { sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); } - guarantee(sub != NULL, "should be a sub-ilt here"); + guarantee(sub != nullptr, "should be a sub-ilt here"); return sub; } iltp = sub; diff --git a/src/hotspot/share/opto/c2_CodeStubs.cpp b/src/hotspot/share/opto/c2_CodeStubs.cpp index 52dea559045..8f4ef1b282a 100644 --- a/src/hotspot/share/opto/c2_CodeStubs.cpp +++ b/src/hotspot/share/opto/c2_CodeStubs.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "opto/output.hpp" C2CodeStubList::C2CodeStubList() : - _stubs(Compile::current()->comp_arena(), 2, 0, NULL) {} + _stubs(Compile::current()->comp_arena(), 2, 0, nullptr) {} void C2CodeStubList::emit(CodeBuffer& cb) { C2_MacroAssembler masm(&cb); @@ -39,7 +39,7 @@ void C2CodeStubList::emit(CodeBuffer& cb) { C2CodeStub* stub = _stubs.at(i); int max_size = stub->max_size(); // Make sure there is enough space in the code buffer - if (cb.insts()->maybe_expand_to_ensure_remaining(max_size) && cb.blob() == NULL) { + if (cb.insts()->maybe_expand_to_ensure_remaining(max_size) && cb.blob() == nullptr) { ciEnv::current()->record_failure("CodeCache is full"); return; } diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp index 70b20e3eb3a..b21e16b7f46 100644 --- a/src/hotspot/share/opto/c2_globals.hpp +++ b/src/hotspot/share/opto/c2_globals.hpp @@ -385,7 +385,7 @@ notproduct(ccstr, PrintIdealGraphAddress, "127.0.0.1", \ "IP address to connect to visualizer") \ \ - notproduct(ccstr, PrintIdealGraphFile, NULL, \ + notproduct(ccstr, PrintIdealGraphFile, nullptr, \ "File to dump ideal graph to. If set overrides the " \ "use of the network") \ \ diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp index a6fa097bdee..f809b7d0fbf 100644 --- a/src/hotspot/share/opto/c2compiler.cpp +++ b/src/hotspot/share/opto/c2compiler.cpp @@ -113,7 +113,7 @@ void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, boo Compile C(env, target, entry_bci, options, directive); // Check result and retry if appropriate. - if (C.failure_reason() != NULL) { + if (C.failure_reason() != nullptr) { if (C.failure_reason_is(retry_class_loading_during_parsing())) { env->report_failure(C.failure_reason()); continue; // retry @@ -229,7 +229,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt if (!Matcher::match_rule_supported(Op_AryEq)) return false; break; case vmIntrinsics::_copyMemory: - if (StubRoutines::unsafe_arraycopy() == NULL) return false; + if (StubRoutines::unsafe_arraycopy() == nullptr) return false; break; case vmIntrinsics::_encodeAsciiArray: if (!Matcher::match_rule_supported(Op_EncodeISOArray) || !Matcher::supports_encode_ascii_array) return false; @@ -483,7 +483,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt if (!Matcher::match_rule_supported(Op_UMulHiL)) return false; break; case vmIntrinsics::_getCallerClass: - if (vmClasses::reflect_CallerSensitive_klass() == NULL) return false; + if (vmClasses::reflect_CallerSensitive_klass() == nullptr) return false; break; case vmIntrinsics::_onSpinWait: if (!Matcher::match_rule_supported(Op_OnSpinWait)) return false; diff --git a/src/hotspot/share/opto/callGenerator.cpp b/src/hotspot/share/opto/callGenerator.cpp index 3f99875a80d..6f4099d7feb 100644 --- a/src/hotspot/share/opto/callGenerator.cpp +++ b/src/hotspot/share/opto/callGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ public: { _is_osr = is_osr; _expected_uses = expected_uses; - assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible"); + assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible"); } virtual bool is_parse() const { return true; } @@ -93,7 +93,7 @@ JVMState* ParseGenerator::generate(JVMState* jvms) { } if (C->failing()) { - return NULL; // bailing out of the compile; do not try to parse + return nullptr; // bailing out of the compile; do not try to parse } Parse parser(jvms, method(), _expected_uses); @@ -101,8 +101,8 @@ JVMState* ParseGenerator::generate(JVMState* jvms) { GraphKit& exits = parser.exits(); if (C->failing()) { - while (exits.pop_exception_state() != NULL) ; - return NULL; + while (exits.pop_exception_state() != nullptr) ; + return nullptr; } assert(exits.jvms()->same_calls_as(jvms), "sanity"); @@ -147,7 +147,7 @@ JVMState* DirectCallGenerator::generate(JVMState* jvms) { address target = is_static ? SharedRuntime::get_resolve_static_call_stub() : SharedRuntime::get_resolve_opt_virtual_call_stub(); - if (kit.C->log() != NULL) { + if (kit.C->log() != nullptr) { kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); } @@ -195,7 +195,7 @@ protected: public: VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj) - : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(NULL) + : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr) { assert(vtable_index == Method::invalid_vtable_index || vtable_index >= 0, "either invalid or usable"); @@ -219,7 +219,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { kit.C->print_inlining_update(this); - if (kit.C->log() != NULL) { + if (kit.C->log() != nullptr) { kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); } @@ -235,7 +235,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { kit.inc_sp(arg_size); // restore arguments kit.uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none, - NULL, "null receiver"); + nullptr, "null receiver"); return kit.transfer_exceptions_into_jvms(); } @@ -244,7 +244,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { // However currently the conversion to implicit null checks in // Block::implicit_null_check() only looks for loads and stores, not calls. ciMethod *caller = kit.method(); - ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); + ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data(); if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() || ((ImplicitNullCheckThreshold > 0) && caller_md && (caller_md->trap_count(Deoptimization::Reason_null_check) @@ -288,7 +288,7 @@ JVMState* VirtualCallGenerator::generate(JVMState* jvms) { } CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { - if (InlineTree::check_can_parse(m) != NULL) return NULL; + if (InlineTree::check_can_parse(m) != nullptr) return nullptr; return new ParseGenerator(m, expected_uses); } @@ -296,7 +296,7 @@ CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { // for the method execution already in progress, not just the JVMS // of the caller. Thus, this CallGenerator cannot be mixed with others! CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { - if (InlineTree::check_can_parse(m) != NULL) return NULL; + if (InlineTree::check_can_parse(m) != nullptr) return nullptr; float past_uses = m->interpreter_invocation_count(); float expected_uses = past_uses; return new ParseGenerator(m, expected_uses, true); @@ -388,7 +388,7 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator { public: LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) : - LateInlineCallGenerator(callee, NULL), _caller(caller), _input_not_const(input_not_const) {} + LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {} virtual bool is_mh_late_inline() const { return true; } @@ -429,7 +429,7 @@ bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const); assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place - if (cg != NULL) { + if (cg != nullptr) { assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining"); _inline_cg = cg; C->dec_number_of_mh_late_inlines(); @@ -466,7 +466,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator { public: LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor) : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/), - _unique_id(0), _inline_cg(NULL), _callee(NULL), _is_pure_call(false), _prof_factor(prof_factor) { + _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) { assert(IncrementalInlineVirtual, "required"); } @@ -478,7 +478,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator { virtual void do_late_inline(); virtual void set_callee_method(ciMethod* m) { - assert(_callee == NULL, "repeated inlining attempt"); + assert(_callee == nullptr, "repeated inlining attempt"); _callee = m; } @@ -488,7 +488,7 @@ class LateInlineVirtualCallGenerator : public VirtualCallGenerator { // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. JVMState* new_jvms = VirtualCallGenerator::generate(jvms); - if (call_node() != NULL) { + if (call_node() != nullptr) { call_node()->set_generator(this); } return new_jvms; @@ -548,10 +548,10 @@ bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms, allow_inline, _prof_factor, - NULL /*speculative_receiver_type*/, + nullptr /*speculative_receiver_type*/, true /*allow_intrinsics*/); - if (cg != NULL) { + if (cg != nullptr) { assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline, "we're doing late inlining"); _inline_cg = cg; return true; @@ -578,7 +578,7 @@ void LateInlineMHCallGenerator::do_late_inline() { } void LateInlineVirtualCallGenerator::do_late_inline() { - assert(_callee != NULL, "required"); // set up in CallDynamicJavaNode::Ideal + assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal CallGenerator::do_late_inline_helper(); } @@ -587,8 +587,8 @@ void CallGenerator::do_late_inline_helper() { // Can't inline it CallNode* call = call_node(); - if (call == NULL || call->outcnt() == 0 || - call->in(0) == NULL || call->in(0)->is_top()) { + if (call == nullptr || call->outcnt() == 0 || + call->in(0) == nullptr || call->in(0)->is_top()) { return; } @@ -620,8 +620,8 @@ void CallGenerator::do_late_inline_helper() { (callprojs.catchall_memproj == call->in(TypeFunc::Memory)) || (callprojs.fallthrough_ioproj == call->in(TypeFunc::I_O)) || (callprojs.catchall_ioproj == call->in(TypeFunc::I_O)) || - (callprojs.resproj != NULL && call->find_edge(callprojs.resproj) != -1) || - (callprojs.exobj != NULL && call->find_edge(callprojs.exobj) != -1)) { + (callprojs.resproj != nullptr && call->find_edge(callprojs.resproj) != -1) || + (callprojs.exobj != nullptr && call->find_edge(callprojs.exobj) != -1)) { return; } @@ -633,7 +633,7 @@ void CallGenerator::do_late_inline_helper() { // The call is marked as pure (no important side effects), but result isn't used. // It's safe to remove the call. - bool result_not_used = (callprojs.resproj == NULL || callprojs.resproj->outcnt() == 0); + bool result_not_used = (callprojs.resproj == nullptr || callprojs.resproj->outcnt() == 0); if (is_pure_call() && result_not_used) { GraphKit kit(call->jvms()); @@ -685,7 +685,7 @@ void CallGenerator::do_late_inline_helper() { // Setup default node notes to be picked up by the inlining Node_Notes* old_nn = C->node_notes_at(call->_idx); - if (old_nn != NULL) { + if (old_nn != nullptr) { Node_Notes* entry_nn = old_nn->clone(C); entry_nn->set_jvms(jvms); C->set_default_node_notes(entry_nn); @@ -693,7 +693,7 @@ void CallGenerator::do_late_inline_helper() { // Now perform the inlining using the synthesized JVMState JVMState* new_jvms = inline_cg()->generate(jvms); - if (new_jvms == NULL) return; // no change + if (new_jvms == nullptr) return; // no change if (C->failing()) return; // Capture any exceptional control flow @@ -862,7 +862,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { // We share a map with the caller, so his JVMS gets adjusted. Node* receiver = kit.argument(0); CompileLog* log = kit.C->log(); - if (log != NULL) { + if (log != nullptr) { log->elem("predicted_call bci='%d' exact='%d' klass='%d'", jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver)); } @@ -877,7 +877,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { replaced_nodes.clone(); Node* casted_receiver = receiver; // will get updated in place... - Node* slow_ctl = NULL; + Node* slow_ctl = nullptr; if (_exact_check) { slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, &casted_receiver); @@ -886,15 +886,15 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { &casted_receiver); } - SafePointNode* slow_map = NULL; - JVMState* slow_jvms = NULL; + SafePointNode* slow_map = nullptr; + JVMState* slow_jvms = nullptr; { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { slow_jvms = _if_missed->generate(kit.sync_jvms()); if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - assert(slow_jvms != NULL, "must be"); + return nullptr; // might happen because of NodeCountInliningCutoff + assert(slow_jvms != nullptr, "must be"); kit.add_exception_states_from(slow_jvms); kit.set_map(slow_jvms->map()); if (!kit.stopped()) @@ -913,7 +913,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { // Make the hot call: JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); - if (new_jvms == NULL) { + if (new_jvms == nullptr) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); @@ -923,7 +923,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { kit.set_jvms(new_jvms); // Need to merge slow and fast? - if (slow_map == NULL) { + if (slow_map == nullptr) { // The fast path is the only path remaining. return kit.transfer_exceptions_into_jvms(); } @@ -983,7 +983,7 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c bool input_not_const; CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const); Compile* C = Compile::current(); - if (cg != NULL) { + if (cg != nullptr) { if (AlwaysIncrementalInline) { return CallGenerator::for_late_inline(callee, cg); } else { @@ -1020,14 +1020,14 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* if (receiver->Opcode() == Op_ConP) { input_not_const = false; const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr(); - if (recv_toop != NULL) { + if (recv_toop != nullptr) { ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget(); const int vtable_index = Method::invalid_vtable_index; if (!ciMethod::is_consistent_info(callee, target)) { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "signatures mismatch"); - return NULL; + return nullptr; } CallGenerator *cg = C->call_generator(target, vtable_index, @@ -1064,7 +1064,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* if (!ciMethod::is_consistent_info(callee, target)) { print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(), "signatures mismatch"); - return NULL; + return nullptr; } // In lambda forms we erase signature types to avoid resolving issues @@ -1078,7 +1078,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* Node* arg = kit.argument(0); const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { + if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { const Type* recv_type = arg_type->filter_speculative(sig_type); // keep speculative part Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, recv_type)); kit.set_argument(0, cast_obj); @@ -1091,7 +1091,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* Node* arg = kit.argument(receiver_skip + j); const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { + if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { const Type* narrowed_arg_type = arg_type->filter_speculative(sig_type); // keep speculative part Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, narrowed_arg_type)); kit.set_argument(receiver_skip + j, cast_obj); @@ -1106,7 +1106,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* int vtable_index = Method::invalid_vtable_index; bool call_does_dispatch = false; - ciKlass* speculative_receiver_type = NULL; + ciKlass* speculative_receiver_type = nullptr; if (is_virtual_or_interface) { ciInstanceKlass* klass = target->holder(); Node* receiver_node = kit.argument(0); @@ -1121,7 +1121,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* false /* check_access */); // We lack profiling at this call but type speculation may // provide us with a type - speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL; + speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr; } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, allow_inline, @@ -1144,7 +1144,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); break; } - return NULL; + return nullptr; } //------------------------PredicatedIntrinsicGenerator------------------------------ @@ -1178,7 +1178,7 @@ CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { // The code we want to generate here is: - // if (receiver == NULL) + // if (receiver == nullptr) // uncommon_Trap // if (predicate(0)) // do_intrinsic(0) @@ -1193,7 +1193,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { PhaseGVN& gvn = kit.gvn(); CompileLog* log = kit.C->log(); - if (log != NULL) { + if (log != nullptr) { log->elem("predicated_intrinsic bci='%d' method='%d'", jvms->bci(), log->identify(method())); } @@ -1237,7 +1237,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { PreserveJVMState pjvms(&kit); // Generate intrinsic code: JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); - if (new_jvms == NULL) { + if (new_jvms == nullptr) { // Intrinsic failed, use normal compilation path for this predicate. slow_region->add_req(kit.control()); } else { @@ -1248,7 +1248,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { } } } - if (else_ctrl == NULL) { + if (else_ctrl == nullptr) { else_ctrl = kit.C->top(); } kit.set_control(else_ctrl); @@ -1263,8 +1263,8 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { kit.set_control(gvn.transform(slow_region)); JVMState* new_jvms = _cg->generate(kit.sync_jvms()); if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - assert(new_jvms != NULL, "must be"); + return nullptr; // might happen because of NodeCountInliningCutoff + assert(new_jvms != nullptr, "must be"); kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); if (!kit.stopped()) { @@ -1327,7 +1327,7 @@ JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { for (int j = 1; j < results; j++) { JVMState* jvms = result_jvms[j]; Node* jmap = jvms->map(); - Node* m = NULL; + Node* m = nullptr; if (jmap->req() > i) { m = jmap->in(i); if (m != n) { @@ -1397,7 +1397,7 @@ JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { // of a class cast failure for a monomorphic call as it will never let us convert // the call to either bi-morphic or megamorphic and can lead to unc-trap loops bool keep_exact_action = true; - kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); + kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action); } else { kit.uncommon_trap(_reason, _action); } diff --git a/src/hotspot/share/opto/callGenerator.hpp b/src/hotspot/share/opto/callGenerator.hpp index 39d1cabb428..5b76c2c538e 100644 --- a/src/hotspot/share/opto/callGenerator.hpp +++ b/src/hotspot/share/opto/callGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,9 +44,9 @@ class CallGenerator : public ArenaObj { void do_late_inline_helper(); - virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { ShouldNotReachHere(); return false; } - virtual CallGenerator* inline_cg() const { ShouldNotReachHere(); return NULL; } - virtual bool is_pure_call() const { ShouldNotReachHere(); return false; } + virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { ShouldNotReachHere(); return false; } + virtual CallGenerator* inline_cg() const { ShouldNotReachHere(); return nullptr;} + virtual bool is_pure_call() const { ShouldNotReachHere(); return false; } public: // Accessors @@ -80,7 +80,7 @@ class CallGenerator : public ArenaObj { // Replace the call with an inline version of the code virtual void do_late_inline() { ShouldNotReachHere(); } - virtual CallNode* call_node() const { return NULL; } + virtual CallNode* call_node() const { return nullptr; } virtual CallGenerator* with_call_node(CallNode* call) { return this; } virtual void set_unique_id(jlong id) { fatal("unique id only for late inlines"); }; @@ -119,7 +119,7 @@ class CallGenerator : public ArenaObj { // If the call traps, the returned map must have a control edge of top. // If the call can throw, the returned map must report has_exceptions(). // - // If the result is NULL, it means that this CallGenerator was unable + // If the result is null, it means that this CallGenerator was unable // to handle the given call, and another CallGenerator should be consulted. virtual JVMState* generate(JVMState* jvms) = 0; @@ -169,7 +169,7 @@ class CallGenerator : public ArenaObj { static void register_intrinsic(ciMethod* m, CallGenerator* cg); static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic, CallGenerator* cg); - virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; }; + virtual Node* generate_predicate(JVMState* jvms, int predicate) { return nullptr; }; virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); } diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index ae1741416fa..8db160d57b1 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } //------------------------------Ideal------------------------------------------ Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //------------------------------calling_convention----------------------------- @@ -99,7 +99,7 @@ Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { return new MachProjNode(this,proj->_con,rm,ideal_reg); } } - return NULL; + return nullptr; } //------------------------------StartOSRNode---------------------------------- @@ -169,7 +169,7 @@ ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *f } Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } const Type* ReturnNode::Value(PhaseGVN* phase) const { @@ -219,7 +219,7 @@ RethrowNode::RethrowNode( } Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } const Type* RethrowNode::Value(PhaseGVN* phase) const { @@ -264,13 +264,13 @@ uint TailJumpNode::match_edge(uint idx) const { //============================================================================= JVMState::JVMState(ciMethod* method, JVMState* caller) : _method(method) { - assert(method != NULL, "must be valid call site"); + assert(method != nullptr, "must be valid call site"); _bci = InvocationEntryBci; _reexecute = Reexecute_Undefined; debug_only(_bci = -99); // random garbage value debug_only(_map = (SafePointNode*)-1); _caller = caller; - _depth = 1 + (caller == NULL ? 0 : caller->depth()); + _depth = 1 + (caller == nullptr ? 0 : caller->depth()); _locoff = TypeFunc::Parms; _stkoff = _locoff + _method->max_locals(); _monoff = _stkoff + _method->max_stack(); @@ -279,11 +279,11 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) : _sp = 0; } JVMState::JVMState(int stack_size) : - _method(NULL) { + _method(nullptr) { _bci = InvocationEntryBci; _reexecute = Reexecute_Undefined; debug_only(_map = (SafePointNode*)-1); - _caller = NULL; + _caller = nullptr; _depth = 1; _locoff = TypeFunc::Parms; _stkoff = _locoff; @@ -312,13 +312,13 @@ bool JVMState::same_calls_as(const JVMState* that) const { const JVMState* q = that; for (;;) { if (p->_method != q->_method) return false; - if (p->_method == NULL) return true; // bci is irrelevant + if (p->_method == nullptr) return true; // bci is irrelevant if (p->_bci != q->_bci) return false; if (p->_reexecute != q->_reexecute) return false; p = p->caller(); q = q->caller(); if (p == q) return true; - assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); + assert(p != nullptr && q != nullptr, "depth check ensures we don't run off end"); } } @@ -339,7 +339,7 @@ uint JVMState::debug_end() const { //------------------------------debug_depth------------------------------------ uint JVMState::debug_depth() const { uint total = 0; - for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { + for (const JVMState* jvmp = this; jvmp != nullptr; jvmp = jvmp->caller()) { total += jvmp->debug_size(); } return total; @@ -351,7 +351,7 @@ uint JVMState::debug_depth() const { // Given an allocation (a Chaitin object) and a Node decide if the Node carries // any defined value or not. If it does, print out the register or constant. static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray *scobjs ) { - if (n == NULL) { st->print(" NULL"); return; } + if (n == nullptr) { st->print(" null"); return; } if (n->is_SafePointScalarObject()) { // Scalar replacement. SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); @@ -374,7 +374,7 @@ static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, c break; case Type::AnyPtr: assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); - st->print(" %s%d]=#NULL",msg,i); + st->print(" %s%d]=#null",msg,i); break; case Type::AryPtr: case Type::InstPtr: @@ -477,7 +477,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) ciKlass* cik = spobj->bottom_type()->is_oopptr()->exact_klass(); assert(cik->is_instance_klass() || cik->is_array_klass(), "Not supported allocation."); - ciInstanceKlass *iklass = NULL; + ciInstanceKlass *iklass = nullptr; if (cik->is_instance_klass()) { cik->print_name_on(st); iklass = cik->as_instance_klass(); @@ -505,7 +505,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) uint first_ind = spobj->first_index(mcall->jvms()); Node* fld_node = mcall->in(first_ind); ciField* cifield; - if (iklass != NULL) { + if (iklass != nullptr) { st->print(" ["); cifield = iklass->nonstatic_field_at(0); cifield->print_name_on(st); @@ -515,7 +515,7 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) } for (uint j = 1; j < nf; j++) { fld_node = mcall->in(first_ind+j); - if (iklass != NULL) { + if (iklass != nullptr) { st->print(", ["); cifield = iklass->nonstatic_field_at(j); cifield->print_name_on(st); @@ -529,12 +529,12 @@ void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) } } st->cr(); - if (caller() != NULL) caller()->format(regalloc, n, st); + if (caller() != nullptr) caller()->format(regalloc, n, st); } void JVMState::dump_spec(outputStream *st) const { - if (_method != NULL) { + if (_method != nullptr) { bool printed = false; if (!Verbose) { // The JVMS dumps make really, really long lines. @@ -546,8 +546,8 @@ void JVMState::dump_spec(outputStream *st) const { const char* name = namest.base(); if (name[0] == ' ') ++name; const char* endcn = strchr(name, ':'); // end of class name - if (endcn == NULL) endcn = strchr(name, '('); - if (endcn == NULL) endcn = name + strlen(name); + if (endcn == nullptr) endcn = strchr(name, '('); + if (endcn == nullptr) endcn = name + strlen(name); while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') --endcn; st->print(" %s", endcn); @@ -560,30 +560,30 @@ void JVMState::dump_spec(outputStream *st) const { } else { st->print(" runtime stub"); } - if (caller() != NULL) caller()->dump_spec(st); + if (caller() != nullptr) caller()->dump_spec(st); } void JVMState::dump_on(outputStream* st) const { bool print_map = _map && !((uintptr_t)_map & 1) && - ((caller() == NULL) || (caller()->map() != _map)); + ((caller() == nullptr) || (caller()->map() != _map)); if (print_map) { if (_map->len() > _map->req()) { // _map->has_exceptions() Node* ex = _map->in(_map->req()); // _map->next_exception() // skip the first one; it's already being printed - while (ex != NULL && ex->len() > ex->req()) { + while (ex != nullptr && ex->len() > ex->req()) { ex = ex->in(ex->req()); // ex->next_exception() ex->dump(1); } } _map->dump(Verbose ? 2 : 1); } - if (caller() != NULL) { + if (caller() != nullptr) { caller()->dump_on(st); } st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); - if (_method == NULL) { + if (_method == nullptr) { st->print_cr("(none)"); } else { _method->print_name(st); @@ -620,7 +620,7 @@ JVMState* JVMState::clone_shallow(Compile* C) const { //---------------------------clone_deep---------------------------------------- JVMState* JVMState::clone_deep(Compile* C) const { JVMState* n = clone_shallow(C); - for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { + for (JVMState* p = n; p->_caller != nullptr; p = p->_caller) { p->_caller = p->_caller->clone_shallow(C); } assert(n->depth() == depth(), "sanity"); @@ -632,7 +632,7 @@ JVMState* JVMState::clone_deep(Compile* C) const { * Reset map for all callers */ void JVMState::set_map_deep(SafePointNode* map) { - for (JVMState* p = this; p != NULL; p = p->_caller) { + for (JVMState* p = this; p != nullptr; p = p->_caller) { p->set_map(map); } } @@ -646,7 +646,7 @@ void JVMState::bind_map(SafePointNode* map) { // Adapt offsets in in-array after adding or removing an edge. // Prerequisite is that the JVMState is used by only one node. void JVMState::adapt_position(int delta) { - for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) { + for (JVMState* jvms = this; jvms != nullptr; jvms = jvms->caller()) { jvms->set_locoff(jvms->locoff() + delta); jvms->set_stkoff(jvms->stkoff() + delta); jvms->set_monoff(jvms->monoff() + delta); @@ -665,7 +665,7 @@ int JVMState::interpreter_frame_size() const { int callee_locals = 0; int extra_args = method()->max_stack() - stk_size(); - while (jvms != NULL) { + while (jvms != nullptr) { int locks = jvms->nof_monitors(); int temps = jvms->stk_size(); bool is_top_frame = (jvms == this); @@ -710,9 +710,9 @@ void CallNode::dump_req(outputStream *st, DumpConfig* dc) const { void CallNode::dump_spec(outputStream *st) const { st->print(" "); - if (tf() != NULL) tf()->dump_on(st); + if (tf() != nullptr) tf()->dump_on(st); if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); - if (jvms() != NULL) jvms()->dump_spec(st); + if (jvms() != nullptr) jvms()->dump_spec(st); } #endif @@ -774,7 +774,7 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { default: ShouldNotReachHere(); } - return NULL; + return nullptr; } // Do we Match on this edge index or not? Match no edges @@ -787,10 +787,10 @@ uint CallNode::match_edge(uint idx) const { // instance at the specified offset. // bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { - assert((t_oop != NULL), "sanity"); + assert((t_oop != nullptr), "sanity"); if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { const TypeTuple* args = _tf->domain(); - Node* dest = NULL; + Node* dest = nullptr; // Stubs that can be called once an ArrayCopyNode is expanded have // different signatures. Look for the second pointer argument, // that is the destination of the copy. @@ -803,7 +803,7 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { } } } - guarantee(dest != NULL, "Call had only one ptr in, broken IR!"); + guarantee(dest != nullptr, "Call had only one ptr in, broken IR!"); if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { return true; } @@ -819,29 +819,29 @@ bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { // Skip unrelated boxing methods. Node* proj = proj_out_or_null(TypeFunc::Parms); - if ((proj == NULL) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { + if ((proj == nullptr) || (phase->type(proj)->is_instptr()->instance_klass() != boxing_klass)) { return false; } } - if (is_CallJava() && as_CallJava()->method() != NULL) { + if (is_CallJava() && as_CallJava()->method() != nullptr) { ciMethod* meth = as_CallJava()->method(); if (meth->is_getter()) { return false; } // May modify (by reflection) if an boxing object is passed // as argument or returned. - Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL; - if (proj != NULL) { + Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : nullptr; + if (proj != nullptr) { const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); - if ((inst_t != NULL) && (!inst_t->klass_is_exact() || - (inst_t->instance_klass() == boxing_klass))) { + if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || + (inst_t->instance_klass() == boxing_klass))) { return true; } } const TypeTuple* d = tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); - if ((inst_t != NULL) && (!inst_t->klass_is_exact() || + if ((inst_t != nullptr) && (!inst_t->klass_is_exact() || (inst_t->instance_klass() == boxing_klass))) { return true; } @@ -866,18 +866,18 @@ bool CallNode::has_non_debug_use(Node *n) { // Returns the unique CheckCastPP of a call // or 'this' if there are several CheckCastPP or unexpected uses -// or returns NULL if there is no one. +// or returns null if there is no one. Node *CallNode::result_cast() { - Node *cast = NULL; + Node *cast = nullptr; Node *p = proj_out_or_null(TypeFunc::Parms); - if (p == NULL) - return NULL; + if (p == nullptr) + return nullptr; for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { Node *use = p->fast_out(i); if (use->is_CheckCastPP()) { - if (cast != NULL) { + if (cast != nullptr) { return this; // more than 1 CheckCastPP } cast = use; @@ -896,15 +896,15 @@ Node *CallNode::result_cast() { void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts) { - projs->fallthrough_proj = NULL; - projs->fallthrough_catchproj = NULL; - projs->fallthrough_ioproj = NULL; - projs->catchall_ioproj = NULL; - projs->catchall_catchproj = NULL; - projs->fallthrough_memproj = NULL; - projs->catchall_memproj = NULL; - projs->resproj = NULL; - projs->exobj = NULL; + projs->fallthrough_proj = nullptr; + projs->fallthrough_catchproj = nullptr; + projs->fallthrough_ioproj = nullptr; + projs->catchall_ioproj = nullptr; + projs->catchall_catchproj = nullptr; + projs->fallthrough_memproj = nullptr; + projs->catchall_memproj = nullptr; + projs->resproj = nullptr; + projs->exobj = nullptr; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ProjNode *pn = fast_out(i)->as_Proj(); @@ -915,8 +915,8 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj projs->fallthrough_proj = pn; const Node* cn = pn->unique_ctrl_out_or_null(); - if (cn != NULL && cn->is_Catch()) { - ProjNode *cpn = NULL; + if (cn != nullptr && cn->is_Catch()) { + ProjNode *cpn = nullptr; for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { cpn = cn->fast_out(k)->as_Proj(); assert(cpn->is_CatchProj(), "must be a CatchProjNode"); @@ -938,7 +938,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj for (DUIterator j = pn->outs(); pn->has_out(j); j++) { Node* e = pn->out(j); if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { - assert(projs->exobj == NULL, "only one"); + assert(projs->exobj == nullptr, "only one"); projs->exobj = e; } } @@ -960,15 +960,15 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj // The resproj may not exist because the result could be ignored // and the exception object may not exist if an exception handler // swallows the exception but all the other must exist and be found. - assert(projs->fallthrough_proj != NULL, "must be found"); + assert(projs->fallthrough_proj != nullptr, "must be found"); do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); - assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); - assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); - assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); - assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); + assert(!do_asserts || projs->fallthrough_catchproj != nullptr, "must be found"); + assert(!do_asserts || projs->fallthrough_memproj != nullptr, "must be found"); + assert(!do_asserts || projs->fallthrough_ioproj != nullptr, "must be found"); + assert(!do_asserts || projs->catchall_catchproj != nullptr, "must be found"); if (separate_io_proj) { - assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); - assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); + assert(!do_asserts || projs->catchall_memproj != nullptr, "must be found"); + assert(!do_asserts || projs->catchall_ioproj != nullptr, "must be found"); } } @@ -976,7 +976,7 @@ Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { #ifdef ASSERT // Validate attached generator CallGenerator* cg = generator(); - if (cg != NULL) { + if (cg != nullptr) { assert(is_CallStaticJava() && cg->is_mh_late_inline() || is_CallDynamicJava() && cg->is_virtual_late_inline(), "mismatch"); } @@ -985,7 +985,7 @@ Node* CallNode::Ideal(PhaseGVN* phase, bool can_reshape) { } bool CallNode::is_call_to_arraycopystub() const { - if (_name != NULL && strstr(_name, "arraycopy") != 0) { + if (_name != nullptr && strstr(_name, "arraycopy") != 0) { return true; } return false; @@ -1013,7 +1013,7 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt for (uint i = old_dbg_start; i < sfpt->req(); i++) { Node* old_in = sfpt->in(i); // Clone old SafePointScalarObjectNodes, adjusting their field contents. - if (old_in != NULL && old_in->is_SafePointScalarObject()) { + if (old_in != nullptr && old_in->is_SafePointScalarObject()) { SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); bool new_node; Node* new_in = old_sosn->clone(sosn_map, new_node); @@ -1027,8 +1027,8 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt } // JVMS may be shared so clone it before we modify it - set_jvms(sfpt->jvms() != NULL ? sfpt->jvms()->clone_deep(C) : NULL); - for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { + set_jvms(sfpt->jvms() != nullptr ? sfpt->jvms()->clone_deep(C) : nullptr); + for (JVMState *jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { jvms->set_map(this); jvms->set_locoff(jvms->locoff()+jvms_adj); jvms->set_stkoff(jvms->stkoff()+jvms_adj); @@ -1040,7 +1040,7 @@ void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt #ifdef ASSERT bool CallJavaNode::validate_symbolic_info() const { - if (method() == NULL) { + if (method() == nullptr) { return true; // call into runtime or uncommon trap } ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(jvms()->bci()); @@ -1077,7 +1077,7 @@ bool CallStaticJavaNode::cmp( const Node &n ) const { Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { CallGenerator* cg = generator(); - if (can_reshape && cg != NULL) { + if (can_reshape && cg != nullptr) { assert(IncrementalInlineMH, "required"); assert(cg->call_node() == this, "mismatch"); assert(cg->is_mh_late_inline(), "not virtual"); @@ -1088,7 +1088,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (iid == vmIntrinsics::_invokeBasic) { if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { phase->C->prepend_late_inline(cg); - set_generator(NULL); + set_generator(nullptr); } } else if (iid == vmIntrinsics::_linkToNative) { // never retry @@ -1096,7 +1096,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { assert(callee->has_member_arg(), "wrong type of call?"); if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { phase->C->prepend_late_inline(cg); - set_generator(NULL); + set_generator(nullptr); } } } @@ -1106,7 +1106,7 @@ Node* CallStaticJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { //----------------------------uncommon_trap_request---------------------------- // If this is an uncommon trap, return the request code, else zero. int CallStaticJavaNode::uncommon_trap_request() const { - if (_name != NULL && !strcmp(_name, "uncommon_trap")) { + if (_name != nullptr && !strcmp(_name, "uncommon_trap")) { return extract_uncommon_trap_request(this); } return 0; @@ -1114,7 +1114,7 @@ int CallStaticJavaNode::uncommon_trap_request() const { int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { #ifndef PRODUCT if (!(call->req() > TypeFunc::Parms && - call->in(TypeFunc::Parms) != NULL && + call->in(TypeFunc::Parms) != nullptr && call->in(TypeFunc::Parms)->is_Con() && call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { assert(in_dump() != 0, "OK if dumping"); @@ -1128,7 +1128,7 @@ int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { #ifndef PRODUCT void CallStaticJavaNode::dump_spec(outputStream *st) const { st->print("# Static "); - if (_name != NULL) { + if (_name != nullptr) { st->print("%s", _name); int trap_req = uncommon_trap_request(); if (trap_req != 0) { @@ -1162,7 +1162,7 @@ bool CallDynamicJavaNode::cmp( const Node &n ) const { Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { CallGenerator* cg = generator(); - if (can_reshape && cg != NULL) { + if (can_reshape && cg != nullptr) { assert(IncrementalInlineVirtual, "required"); assert(cg->call_node() == this, "mismatch"); assert(cg->is_virtual_late_inline(), "not virtual"); @@ -1195,7 +1195,7 @@ Node* CallDynamicJavaNode::Ideal(PhaseGVN* phase, bool can_reshape) { // Register for late inlining. cg->set_callee_method(callee); phase->C->prepend_late_inline(cg); // MH late inlining prepends to the list, so do the same - set_generator(NULL); + set_generator(nullptr); } } return CallNode::Ideal(phase, can_reshape); @@ -1285,9 +1285,9 @@ bool SafePointNode::cmp( const Node &n ) const { //-------------------------set_next_exception---------------------------------- void SafePointNode::set_next_exception(SafePointNode* n) { - assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); + assert(n == nullptr || n->Opcode() == Op_SafePoint, "correct value for next_exception"); if (len() == req()) { - if (n != NULL) add_prec(n); + if (n != nullptr) add_prec(n); } else { set_prec(req(), n); } @@ -1297,10 +1297,10 @@ void SafePointNode::set_next_exception(SafePointNode* n) { //----------------------------next_exception----------------------------------- SafePointNode* SafePointNode::next_exception() const { if (len() == req()) { - return NULL; + return nullptr; } else { Node* n = in(req()); - assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); + assert(n == nullptr || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); return (SafePointNode*) n; } } @@ -1309,8 +1309,8 @@ SafePointNode* SafePointNode::next_exception() const { //------------------------------Ideal------------------------------------------ // Skip over any collapsed Regions Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { - assert(_jvms == NULL || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); - return remove_dead_region(phase, can_reshape) ? this : NULL; + assert(_jvms == nullptr || ((uintptr_t)_jvms->map() & 1) || _jvms->map() == this, "inconsistent JVMState"); + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //------------------------------Identity--------------------------------------- @@ -1322,7 +1322,7 @@ Node* SafePointNode::Identity(PhaseGVN* phase) { Node* out_c = unique_ctrl_out_or_null(); // This can be the safepoint of an outer strip mined loop if the inner loop's backedge was removed. Replacing the // outer loop's safepoint could confuse removal of the outer loop. - if (out_c != NULL && !out_c->is_OuterStripMinedLoopEnd()) { + if (out_c != nullptr && !out_c->is_OuterStripMinedLoopEnd()) { return in(TypeFunc::Control); } } @@ -1508,7 +1508,7 @@ uint SafePointScalarObjectNode::match_edge(uint idx) const { SafePointScalarObjectNode* SafePointScalarObjectNode::clone(Dict* sosn_map, bool& new_node) const { void* cached = (*sosn_map)[(void*)this]; - if (cached != NULL) { + if (cached != nullptr) { new_node = false; return (SafePointScalarObjectNode*)cached; } @@ -1533,7 +1533,7 @@ uint AllocateNode::size_of() const { return sizeof(*this); } AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node *size, Node *klass_node, Node *initial_test) - : CallNode(atype, NULL, TypeRawPtr::BOTTOM) + : CallNode(atype, nullptr, TypeRawPtr::BOTTOM) { init_class_id(Class_Allocate); init_flags(Flag_is_macro); @@ -1557,12 +1557,12 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) { - assert(initializer != NULL && + assert(initializer != nullptr && initializer->is_initializer() && !initializer->is_static(), "unexpected initializer method"); BCEscapeAnalyzer* analyzer = initializer->get_bcea(); - if (analyzer == NULL) { + if (analyzer == nullptr) { return; } @@ -1572,7 +1572,7 @@ void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) } } Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem) { - Node* mark_node = NULL; + Node* mark_node = nullptr; // For now only enable fast locking for non-array types mark_node = phase->MakeConX(markWord::prototype().value()); return mark_node; @@ -1580,15 +1580,15 @@ Node *AllocateNode::make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, N // Retrieve the length from the AllocateArrayNode. Narrow the type with a // CastII, if appropriate. If we are not allowed to create new nodes, and -// a CastII is appropriate, return NULL. +// a CastII is appropriate, return null. Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { Node *length = in(AllocateNode::ALength); - assert(length != NULL, "length is not null"); + assert(length != nullptr, "length is not null"); const TypeInt* length_type = phase->find_int_type(length); const TypeAryPtr* ary_type = oop_type->isa_aryptr(); - if (ary_type != NULL && length_type != NULL) { + if (ary_type != nullptr && length_type != nullptr) { const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); if (narrow_length_type != length_type) { // Assert one of: @@ -1601,14 +1601,14 @@ Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTran narrow_length_type->_lo >= length_type->_lo), "narrow type must be narrower than length type"); - // Return NULL if new nodes are not allowed + // Return null if new nodes are not allowed if (!allow_new_nodes) { - return NULL; + return nullptr; } // Create a cast which is control dependent on the initialization to // propagate the fact that the array length must be positive. InitializeNode* init = initialization(); - if (init != NULL) { + if (init != nullptr) { length = new CastIINode(length, narrow_length_type); length->set_req(TypeFunc::Control, init->proj_out_or_null(TypeFunc::Control)); } @@ -1749,13 +1749,13 @@ uint LockNode::size_of() const { return sizeof(*this); } // - eliminated locking nodes // static Node *next_control(Node *ctrl) { - if (ctrl == NULL) - return NULL; + if (ctrl == nullptr) + return nullptr; while (1) { if (ctrl->is_Region()) { RegionNode *r = ctrl->as_Region(); Node *n = r->is_copy(); - if (n == NULL) + if (n == nullptr) break; // hit a region, return it else ctrl = n; @@ -1778,10 +1778,10 @@ static Node *next_control(Node *ctrl) { // bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, GrowableArray &lock_ops) { - ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; - if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { + ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : nullptr; + if (ctrl_proj != nullptr && ctrl_proj->_con == TypeFunc::Control) { Node *n = ctrl_proj->in(0); - if (n != NULL && n->is_Unlock()) { + if (n != nullptr && n->is_Unlock()) { UnlockNode *unlock = n->as_Unlock(); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); @@ -1801,11 +1801,11 @@ bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, // Find the lock matching an unlock. Returns null if a safepoint // or complicated control is encountered first. LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { - LockNode *lock_result = NULL; + LockNode *lock_result = nullptr; // find the matching lock, or an intervening safepoint Node *ctrl = next_control(unlock->in(0)); while (1) { - assert(ctrl != NULL, "invalid control graph"); + assert(ctrl != nullptr, "invalid control graph"); assert(!ctrl->is_Start(), "missing lock for unlock"); if (ctrl->is_top()) break; // dead control path if (ctrl->is_Proj()) ctrl = ctrl->in(0); @@ -1813,7 +1813,7 @@ LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { break; // found a safepoint (may be the lock we are searching for) } else if (ctrl->is_Region()) { // Check for a simple diamond pattern. Punt on anything more complicated - if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { + if (ctrl->req() == 3 && ctrl->in(1) != nullptr && ctrl->in(2) != nullptr) { Node *in1 = next_control(ctrl->in(1)); Node *in2 = next_control(ctrl->in(2)); if (((in1->is_IfTrue() && in2->is_IfFalse()) || @@ -1852,7 +1852,7 @@ bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* loc if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { Node *lock_ctrl = next_control(if_node->in(0)); if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { - Node* lock1_node = NULL; + Node* lock1_node = nullptr; ProjNode* proj = if_node->as_If()->proj_out(!if_true); if (if_true) { if (proj->is_IfFalse() && proj->outcnt() == 1) { @@ -1863,7 +1863,7 @@ bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* loc lock1_node = proj->unique_out(); } } - if (lock1_node != NULL && lock1_node->is_Lock()) { + if (lock1_node != nullptr && lock1_node->is_Lock()) { LockNode *lock1 = lock1_node->as_Lock(); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); @@ -1888,7 +1888,7 @@ bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNod // in(0) should be self edge so skip it. for (int i = 1; i < (int)region->req(); i++) { Node *in_node = next_control(region->in(i)); - if (in_node != NULL) { + if (in_node != nullptr) { if (find_matching_unlock(in_node, lock, lock_ops)) { // found a match so keep on checking. continue; @@ -1943,11 +1943,11 @@ void AbstractLockNode::dump_compact_spec(outputStream* st) const { //============================================================================= Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { - // perform any generic optimizations first (returns 'this' or NULL) + // perform any generic optimizations first (returns 'this' or null) Node *result = SafePointNode::Ideal(phase, can_reshape); - if (result != NULL) return result; + if (result != nullptr) return result; // Don't bother trying to transform a dead node - if (in(0) && in(0)->is_top()) return NULL; + if (in(0) && in(0)->is_top()) return nullptr; // Now see if we can optimize away this lock. We don't actually // remove the locking here, we simply set the _eliminate flag which @@ -1959,7 +1959,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { // If we are locking an non-escaped object, the lock/unlock is unnecessary // ConnectionGraph *cgr = phase->C->congraph(); - if (cgr != NULL && cgr->not_global_escape(obj_node())) { + if (cgr != nullptr && cgr->not_global_escape(obj_node())) { assert(!is_eliminated() || is_coarsened(), "sanity"); // The lock could be marked eliminated by lock coarsening // code during first IGVN before EA. Replace coarsened flag @@ -1978,7 +1978,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Try lock coarsening // PhaseIterGVN* iter = phase->is_IterGVN(); - if (iter != NULL && !is_eliminated()) { + if (iter != nullptr && !is_eliminated()) { GrowableArray lock_ops; @@ -2058,10 +2058,10 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { //============================================================================= bool LockNode::is_nested_lock_region() { - return is_nested_lock_region(NULL); + return is_nested_lock_region(nullptr); } -// p is used for access to compilation log; no logging if NULL +// p is used for access to compilation log; no logging if null bool LockNode::is_nested_lock_region(Compile * c) { BoxLockNode* box = box_node()->as_BoxLock(); int stk_slot = box->stack_slot(); @@ -2074,8 +2074,8 @@ bool LockNode::is_nested_lock_region(Compile * c) { // Ignore complex cases: merged locks or multiple locks. Node* obj = obj_node(); - LockNode* unique_lock = NULL; - Node* bad_lock = NULL; + LockNode* unique_lock = nullptr; + Node* bad_lock = nullptr; if (!box->is_simple_lock_region(&unique_lock, obj, &bad_lock)) { #ifdef ASSERT this->log_lock_optimization(c, "eliminate_lock_INLR_2a", bad_lock); @@ -2084,7 +2084,7 @@ bool LockNode::is_nested_lock_region(Compile * c) { } if (unique_lock != this) { #ifdef ASSERT - this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != NULL ? unique_lock : bad_lock)); + this->log_lock_optimization(c, "eliminate_lock_INLR_2b", (unique_lock != nullptr ? unique_lock : bad_lock)); if (PrintEliminateLocks && Verbose) { tty->print_cr("=============== unique_lock != this ============"); tty->print(" this: "); @@ -2093,11 +2093,11 @@ bool LockNode::is_nested_lock_region(Compile * c) { box->dump(); tty->print(" obj: "); obj->dump(); - if (unique_lock != NULL) { + if (unique_lock != nullptr) { tty->print(" unique_lock: "); unique_lock->dump(); } - if (bad_lock != NULL) { + if (bad_lock != nullptr) { tty->print(" bad_lock: "); bad_lock->dump(); } @@ -2138,11 +2138,11 @@ uint UnlockNode::size_of() const { return sizeof(*this); } //============================================================================= Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { - // perform any generic optimizations first (returns 'this' or NULL) + // perform any generic optimizations first (returns 'this' or null) Node *result = SafePointNode::Ideal(phase, can_reshape); - if (result != NULL) return result; + if (result != nullptr) return result; // Don't bother trying to transform a dead node - if (in(0) && in(0)->is_top()) return NULL; + if (in(0) && in(0)->is_top()) return nullptr; // Now see if we can optimize away this unlock. We don't actually // remove the unlocking here, we simply set the _eliminate flag which @@ -2155,7 +2155,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { // If we are unlocking an non-escaped object, the lock/unlock is unnecessary. // ConnectionGraph *cgr = phase->C->congraph(); - if (cgr != NULL && cgr->not_global_escape(obj_node())) { + if (cgr != nullptr && cgr->not_global_escape(obj_node())) { assert(!is_eliminated() || is_coarsened(), "sanity"); // The lock could be marked eliminated by lock coarsening // code during first IGVN before EA. Replace coarsened flag @@ -2170,24 +2170,24 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { } void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag, Node* bad_lock) const { - if (C == NULL) { + if (C == nullptr) { return; } CompileLog* log = C->log(); - if (log != NULL) { + if (log != nullptr) { Node* box = box_node(); Node* obj = obj_node(); - int box_id = box != NULL ? box->_idx : -1; - int obj_id = obj != NULL ? obj->_idx : -1; + int box_id = box != nullptr ? box->_idx : -1; + int obj_id = obj != nullptr ? obj->_idx : -1; log->begin_head("%s compile_id='%d' lock_id='%d' class='%s' kind='%s' box_id='%d' obj_id='%d' bad_id='%d'", tag, C->compile_id(), this->_idx, is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", - kind_as_string(), box_id, obj_id, (bad_lock != NULL ? bad_lock->_idx : -1)); + kind_as_string(), box_id, obj_id, (bad_lock != nullptr ? bad_lock->_idx : -1)); log->stamp(); log->end_head(); JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); - while (p != NULL) { + while (p != nullptr) { log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); p = p->caller(); } diff --git a/src/hotspot/share/opto/callnode.hpp b/src/hotspot/share/opto/callnode.hpp index 955d0deba26..90cf066b9e9 100644 --- a/src/hotspot/share/opto/callnode.hpp +++ b/src/hotspot/share/opto/callnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,7 +247,7 @@ public: int bci() const { return _bci; } bool should_reexecute() const { return _reexecute==Reexecute_True; } bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } - bool has_method() const { return _method != NULL; } + bool has_method() const { return _method != nullptr; } ciMethod* method() const { assert(has_method(), ""); return _method; } JVMState* caller() const { return _caller; } SafePointNode* map() const { return _map; } @@ -335,13 +335,13 @@ protected: bool _has_ea_local_in_scope; // NoEscape or ArgEscape objects in JVM States void set_jvms(JVMState* s) { - assert(s != nullptr, "assign NULL value to _jvms"); + assert(s != nullptr, "assign null value to _jvms"); *(JVMState**)&_jvms = s; // override const attribute in the accessor } public: SafePointNode(uint edges, JVMState* jvms, - // A plain safepoint advertises no memory effects (NULL): - const TypePtr* adr_type = NULL) + // A plain safepoint advertises no memory effects (null): + const TypePtr* adr_type = nullptr) : MultiNode( edges ), _jvms(jvms), _adr_type(adr_type), @@ -353,7 +353,7 @@ public: JVMState* jvms() const { return _jvms; } virtual bool needs_deep_clone_jvms(Compile* C) { return false; } void clone_jvms(Compile* C) { - if (jvms() != NULL) { + if (jvms() != nullptr) { if (needs_deep_clone_jvms(C)) { set_jvms(jvms()->clone_deep(C)); jvms()->set_map_deep(this); @@ -434,7 +434,7 @@ public: } // The parser marks useless maps as dead when it's done with them: - bool is_killed() { return in(TypeFunc::Control) == NULL; } + bool is_killed() { return in(TypeFunc::Control) == nullptr; } // Exception states bubbling out of subgraphs such as inlined calls // are recorded here. (There might be more than one, hence the "next".) @@ -442,7 +442,7 @@ public: // for JVM states during parsing, intrinsic expansion, etc. SafePointNode* next_exception() const; void set_next_exception(SafePointNode* n); - bool has_exceptions() const { return next_exception() != NULL; } + bool has_exceptions() const { return next_exception() != nullptr; } // Helper methods to operate on replaced nodes ReplacedNodes replaced_nodes() const { @@ -531,7 +531,7 @@ public: virtual uint match_edge(uint idx) const; uint first_index(JVMState* jvms) const { - assert(jvms != NULL, "missed JVMS"); + assert(jvms != nullptr, "missed JVMS"); return jvms->scloff() + _first_index; } uint n_fields() const { return _n_fields; } @@ -588,15 +588,15 @@ public: address _entry_point; // Address of method being called float _cnt; // Estimate of number of times called CallGenerator* _generator; // corresponding CallGenerator for some late inline calls - const char* _name; // Printable name, if _method is NULL + const char* _name; // Printable name, if _method is null CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr) : SafePointNode(tf->domain()->cnt(), jvms, adr_type), _tf(tf), _entry_point(addr), _cnt(COUNT_UNKNOWN), - _generator(NULL), - _name(NULL) + _generator(nullptr), + _name(nullptr) { init_class_id(Class_Call); } @@ -634,7 +634,7 @@ public: bool has_non_debug_use(Node* n); // Returns the unique CheckCastPP of a call // or result projection is there are several CheckCastPP - // or returns NULL if there is no one. + // or returns null if there is no one. Node* result_cast(); // Does this node returns pointer? bool returns_pointer() const { @@ -720,13 +720,13 @@ public: CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method) : CallJavaNode(tf, addr, method) { init_class_id(Class_CallStaticJava); - if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { + if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) { init_flags(Flag_is_macro); C->add_macro_node(this); } } CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type) - : CallJavaNode(tf, addr, NULL) { + : CallJavaNode(tf, addr, nullptr) { init_class_id(Class_CallStaticJava); // This node calls a runtime stub, which often has narrow memory effects. _adr_type = adr_type; @@ -738,7 +738,7 @@ public: static int extract_uncommon_trap_request(const Node* call); bool is_boxing_method() const { - return is_macro() && (method() != NULL) && method()->is_boxing_method(); + return is_macro() && (method() != nullptr) && method()->is_boxing_method(); } // Late inlining modifies the JVMState, so we need to deep clone it // when the call node is cloned (because it is macro node). @@ -933,7 +933,7 @@ public: // Dig the klass operand out of a (possible) allocation site. static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { AllocateNode* allo = Ideal_allocation(ptr, phase); - return (allo == NULL) ? NULL : allo->in(KlassNode); + return (allo == nullptr) ? nullptr : allo->in(KlassNode); } // Conservatively small estimate of offset of first non-header byte. @@ -954,13 +954,13 @@ public: // Return true if allocation doesn't escape thread, its escape state // needs be noEscape or ArgEscape. InitializeNode._does_not_escape // is true when its allocation's escape state is noEscape or - // ArgEscape. In case allocation's InitializeNode is NULL, check + // ArgEscape. In case allocation's InitializeNode is null, check // AlllocateNode._is_non_escaping flag. // AlllocateNode._is_non_escaping is true when its escape state is // noEscape. bool does_not_escape_thread() { - InitializeNode* init = NULL; - return _is_non_escaping || (((init = initialization()) != NULL) && init->does_not_escape()); + InitializeNode* init = nullptr; + return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape()); } // If object doesn't escape in <.init> method and there is memory barrier @@ -1003,8 +1003,8 @@ public: // Return null if no allocation is recognized. static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { AllocateNode* allo = Ideal_allocation(ptr, phase); - return (allo == NULL || !allo->is_AllocateArray()) - ? NULL : allo->as_AllocateArray(); + return (allo == nullptr || !allo->is_AllocateArray()) + ? nullptr : allo->as_AllocateArray(); } }; @@ -1041,11 +1041,11 @@ protected: public: AbstractLockNode(const TypeFunc *tf) - : CallNode(tf, NULL, TypeRawPtr::BOTTOM), + : CallNode(tf, nullptr, TypeRawPtr::BOTTOM), _kind(Regular) { #ifndef PRODUCT - _counter = NULL; + _counter = nullptr; #endif } virtual int Opcode() const = 0; @@ -1064,7 +1064,7 @@ public: bool is_nested() const { return (_kind == Nested); } const char * kind_as_string() const; - void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = NULL) const; + void log_lock_optimization(Compile* c, const char * tag, Node* bad_lock = nullptr) const; void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } @@ -1138,7 +1138,7 @@ public: virtual uint size_of() const; // Size is bigger UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) #ifdef ASSERT - , _dbg_jvms(NULL) + , _dbg_jvms(nullptr) #endif { init_class_id(Class_Unlock); @@ -1154,7 +1154,7 @@ public: } JVMState* dbg_jvms() const { return _dbg_jvms; } #else - JVMState* dbg_jvms() const { return NULL; } + JVMState* dbg_jvms() const { return nullptr; } #endif }; #endif // SHARE_OPTO_CALLNODE_HPP diff --git a/src/hotspot/share/opto/castnode.cpp b/src/hotspot/share/opto/castnode.cpp index 828de9e69c1..d0d05b5bb98 100644 --- a/src/hotspot/share/opto/castnode.cpp +++ b/src/hotspot/share/opto/castnode.cpp @@ -37,7 +37,7 @@ // If input is already higher or equal to cast type, then this is an identity. Node* ConstraintCastNode::Identity(PhaseGVN* phase) { Node* dom = dominating_cast(phase, phase); - if (dom != NULL) { + if (dom != nullptr) { return dom; } if (_dependency != RegularDependency) { @@ -97,7 +97,7 @@ const Type* ConstraintCastNode::Value(PhaseGVN* phase) const { // Return a node which is more "ideal" than the current node. Strip out // control copies Node *ConstraintCastNode::Ideal(PhaseGVN *phase, bool can_reshape) { - return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; + return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr; } bool ConstraintCastNode::cmp(const Node &n) const { @@ -144,7 +144,7 @@ Node* ConstraintCastNode::make_cast(int opcode, Node* c, Node *n, const Type *t, default: fatal("Bad opcode %d", opcode); } - return NULL; + return nullptr; } Node* ConstraintCastNode::make(Node* c, Node *n, const Type *t, DependencyType dependency, BasicType bt) { @@ -158,34 +158,34 @@ Node* ConstraintCastNode::make(Node* c, Node *n, const Type *t, DependencyType d default: fatal("Bad basic type %s", type2name(bt)); } - return NULL; + return nullptr; } TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) const { if (_dependency == UnconditionalDependency) { - return NULL; + return nullptr; } Node* val = in(1); Node* ctl = in(0); int opc = Opcode(); - if (ctl == NULL) { - return NULL; + if (ctl == nullptr) { + return nullptr; } // Range check CastIIs may all end up under a single range check and // in that case only the narrower CastII would be kept by the code // below which would be incorrect. if (is_CastII() && as_CastII()->has_range_check()) { - return NULL; + return nullptr; } - if (type()->isa_rawptr() && (gvn->type_or_null(val) == NULL || gvn->type(val)->isa_oopptr())) { - return NULL; + if (type()->isa_rawptr() && (gvn->type_or_null(val) == nullptr || gvn->type(val)->isa_oopptr())) { + return nullptr; } for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { Node* u = val->fast_out(i); if (u != this && u->outcnt() > 0 && u->Opcode() == opc && - u->in(0) != NULL && + u->in(0) != nullptr && u->bottom_type()->higher_equal(type())) { if (pt->is_dominator(u->in(0), ctl)) { return u->as_Type(); @@ -199,7 +199,7 @@ TypeNode* ConstraintCastNode::dominating_cast(PhaseGVN* gvn, PhaseTransform* pt) } } } - return NULL; + return nullptr; } #ifndef PRODUCT @@ -245,7 +245,7 @@ const Type* CastIINode::Value(PhaseGVN* phase) const { // CastIINode // if (carry_dependency()) { - if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) { + if (in(0) != nullptr && in(0)->in(0) != nullptr && in(0)->in(0)->is_If()) { assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj"); Node* proj = in(0); if (proj->in(0)->in(1)->is_Bool()) { @@ -307,7 +307,7 @@ const Type* CastIINode::Value(PhaseGVN* phase) const { static Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, Node* control, const TypeInteger* type, ConstraintCastNode::DependencyType dependency, BasicType bt) { Node* n = ConstraintCastNode::make(control, parent, type, dependency, bt); Node* existing = igvn->hash_find_insert(n); - if (existing != NULL) { + if (existing != nullptr) { n->destruct(igvn); return existing; } @@ -316,7 +316,7 @@ static Node* find_or_make_integer_cast(PhaseIterGVN* igvn, Node* parent, Node* c Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* progress = ConstraintCastNode::Ideal(phase, can_reshape); - if (progress != NULL) { + if (progress != nullptr) { return progress; } if (can_reshape && !_range_check_dependency && !phase->C->post_loop_opts_phase()) { @@ -326,7 +326,7 @@ Node *CastIINode::Ideal(PhaseGVN *phase, bool can_reshape) { if (!_range_check_dependency) { return optimize_integer_cast(phase, T_INT); } - return NULL; + return nullptr; } Node* CastIINode::Identity(PhaseGVN* phase) { @@ -373,7 +373,7 @@ const Type* CastLLNode::Value(PhaseGVN* phase) const { Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* progress = ConstraintCastNode::Ideal(phase, can_reshape); - if (progress != NULL) { + if (progress != nullptr) { return progress; } if (!phase->C->post_loop_opts_phase()) { @@ -383,7 +383,7 @@ Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) { // transform (CastLL (ConvI2L ..)) into (ConvI2L (CastII ..)) if the type of the CastLL is narrower than the type of // the ConvI2L. Node* in1 = in(1); - if (in1 != NULL && in1->Opcode() == Op_ConvI2L) { + if (in1 != nullptr && in1->Opcode() == Op_ConvI2L) { const Type* t = Value(phase); const Type* t_in = phase->type(in1); if (t != Type::TOP && t_in != Type::TOP) { @@ -418,7 +418,7 @@ const Type* CheckCastPPNode::Value(PhaseGVN* phase) const { const TypePtr *in_type = inn->isa_ptr(); const TypePtr *my_type = _type->isa_ptr(); const Type *result = _type; - if (in_type != NULL && my_type != NULL) { + if (in_type != nullptr && my_type != nullptr) { TypePtr::PTR in_ptr = in_type->ptr(); if (in_ptr == TypePtr::Null) { result = in_type; @@ -492,7 +492,7 @@ Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) { } break; } - return NULL; + return nullptr; } //------------------------------Identity--------------------------------------- @@ -514,7 +514,7 @@ const Type* CastP2XNode::Value(PhaseGVN* phase) const { } Node *CastP2XNode::Ideal(PhaseGVN *phase, bool can_reshape) { - return (in(0) && remove_dead_region(phase, can_reshape)) ? this : NULL; + return (in(0) && remove_dead_region(phase, can_reshape)) ? this : nullptr; } //------------------------------Identity--------------------------------------- @@ -524,7 +524,7 @@ Node* CastP2XNode::Identity(PhaseGVN* phase) { } Node* ConstraintCastNode::make_cast_for_type(Node* c, Node* in, const Type* type, DependencyType dependency) { - Node* cast= NULL; + Node* cast= nullptr; if (type->isa_int()) { cast = make_cast(Op_CastII, c, in, type, dependency); } else if (type->isa_long()) { @@ -545,15 +545,15 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) { PhaseIterGVN *igvn = phase->is_IterGVN(); const TypeInteger* this_type = this->type()->is_integer(bt); Node* z = in(1); - const TypeInteger* rx = NULL; - const TypeInteger* ry = NULL; + const TypeInteger* rx = nullptr; + const TypeInteger* ry = nullptr; // Similar to ConvI2LNode::Ideal() for the same reasons if (Compile::push_thru_add(phase, z, this_type, rx, ry, bt, bt)) { - if (igvn == NULL) { + if (igvn == nullptr) { // Postpone this optimization to iterative GVN, where we can handle deep // AddI chains without an exponential number of recursive Ideal() calls. phase->record_for_igvn(this); - return NULL; + return nullptr; } int op = z->Opcode(); Node* x = z->in(1); @@ -567,9 +567,9 @@ Node* ConstraintCastNode::optimize_integer_cast(PhaseGVN* phase, BasicType bt) { assert(op == Op_Sub(bt), ""); return SubNode::make(cx, cy, bt); } - return NULL; + return nullptr; } - return NULL; + return nullptr; } const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* res, BasicType bt) const { @@ -578,7 +578,7 @@ const Type* ConstraintCastNode::widen_type(const PhaseGVN* phase, const Type* re } const TypeInteger* this_type = res->is_integer(bt); const TypeInteger* in_type = phase->type(in(1))->isa_integer(bt); - if (in_type != NULL && + if (in_type != nullptr && (in_type->lo_as_long() != this_type->lo_as_long() || in_type->hi_as_long() != this_type->hi_as_long())) { jlong lo1 = this_type->lo_as_long(); diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp index b6731644b7d..cdab33dd603 100644 --- a/src/hotspot/share/opto/castnode.hpp +++ b/src/hotspot/share/opto/castnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -190,7 +190,7 @@ class CheckCastPPNode: public ConstraintCastNode { // convert a machine-pointer-sized integer to a raw pointer class CastX2PNode : public Node { public: - CastX2PNode( Node *n ) : Node(NULL, n) {} + CastX2PNode( Node *n ) : Node(nullptr, n) {} virtual int Opcode() const; virtual const Type* Value(PhaseGVN* phase) const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp index cbbb6a58f35..20e65d18844 100644 --- a/src/hotspot/share/opto/cfgnode.cpp +++ b/src/hotspot/share/opto/cfgnode.cpp @@ -77,8 +77,8 @@ Node* RegionNode::Identity(PhaseGVN* phase) { // hard to do if there is stuff that has to happen static Node *merge_region(RegionNode *region, PhaseGVN *phase) { if( region->Opcode() != Op_Region ) // Do not do to LoopNodes - return NULL; - Node *progress = NULL; // Progress flag + return nullptr; + Node *progress = nullptr; // Progress flag PhaseIterGVN *igvn = phase->is_IterGVN(); uint rreq = region->req(); @@ -91,7 +91,7 @@ static Node *merge_region(RegionNode *region, PhaseGVN *phase) { assert(!r->as_Region()->has_phi(), "no phi users"); if( !progress ) { // No progress if (region->has_phi()) { - return NULL; // Only flatten if no Phi users + return nullptr; // Only flatten if no Phi users // igvn->hash_delete( phi ); } igvn->hash_delete( region ); @@ -126,7 +126,7 @@ static Node *merge_region(RegionNode *region, PhaseGVN *phase) { //--------------------------------has_phi-------------------------------------- -// Helper function: Return any PhiNode that uses this region or NULL +// Helper function: Return any PhiNode that uses this region or null PhiNode* RegionNode::has_phi() const { for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node* phi = fast_out(i); @@ -136,23 +136,23 @@ PhiNode* RegionNode::has_phi() const { } } - return NULL; + return nullptr; } //-----------------------------has_unique_phi---------------------------------- -// Helper function: Return the only PhiNode that uses this region or NULL +// Helper function: Return the only PhiNode that uses this region or null PhiNode* RegionNode::has_unique_phi() const { // Check that only one use is a Phi - PhiNode* only_phi = NULL; + PhiNode* only_phi = nullptr; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node* phi = fast_out(i); if (phi->is_Phi()) { // Check for Phi users assert(phi->in(0) == (Node*)this, "phi uses region only via in(0)"); - if (only_phi == NULL) { + if (only_phi == nullptr) { only_phi = phi->as_Phi(); } else { - return NULL; // multiple phis + return nullptr; // multiple phis } } } @@ -165,9 +165,9 @@ PhiNode* RegionNode::has_unique_phi() const { // Helper function for RegionNode's identification of FP clipping // Check inputs to the Phi static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, ConNode * &max, uint &max_idx, Node * &val, uint &val_idx ) { - min = NULL; - max = NULL; - val = NULL; + min = nullptr; + max = nullptr; + val = nullptr; min_idx = 0; max_idx = 0; val_idx = 0; @@ -179,11 +179,11 @@ static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, Con switch( opcode ) { case Op_ConI: { - if( min == NULL ) { - min = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; + if( min == nullptr ) { + min = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr; min_idx = j; } else { - max = n->Opcode() == Op_ConI ? (ConNode*)n : NULL; + max = n->Opcode() == Op_ConI ? (ConNode*)n : nullptr; max_idx = j; if( min->get_int() > max->get_int() ) { // Swap min and max @@ -220,8 +220,8 @@ static bool check_phi_clipping( PhiNode *phi, ConNode * &min, uint &min_idx, Con // RegionNode_inputs // static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNode * &top_if ) { - top_if = NULL; - bot_if = NULL; + top_if = nullptr; + bot_if = nullptr; // Check control structure above RegionNode for (if ( if ) ) Node *in1 = region->in(1); @@ -233,14 +233,14 @@ static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNod Node *in20 = in2->in(0); Node *in30 = in3->in(0); // Check that #1 and #2 are ifTrue and ifFalse from same If - if( in10 != NULL && in10->is_If() && - in20 != NULL && in20->is_If() && - in30 != NULL && in30->is_If() && in10 == in20 && + if( in10 != nullptr && in10->is_If() && + in20 != nullptr && in20->is_If() && + in30 != nullptr && in30->is_If() && in10 == in20 && (in1->Opcode() != in2->Opcode()) ) { Node *in100 = in10->in(0); - Node *in1000 = (in100 != NULL && in100->is_Proj()) ? in100->in(0) : NULL; + Node *in1000 = (in100 != nullptr && in100->is_Proj()) ? in100->in(0) : nullptr; // Check that control for in10 comes from other branch of IF from in3 - if( in1000 != NULL && in1000->is_If() && + if( in1000 != nullptr && in1000->is_If() && in30 == in1000 && (in3->Opcode() != in100->Opcode()) ) { // Control pattern checks top_if = (IfNode*)in1000; @@ -249,7 +249,7 @@ static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNod } } - return (top_if != NULL); + return (top_if != nullptr); } @@ -257,7 +257,7 @@ static bool check_if_clipping( const RegionNode *region, IfNode * &bot_if, IfNod // Helper function for RegionNode's identification of FP clipping // Verify that the value input to the phi comes from "ConvF2I; LShift; RShift" static bool check_convf2i_clipping( PhiNode *phi, uint idx, ConvF2INode * &convf2i, Node *min, Node *max) { - convf2i = NULL; + convf2i = nullptr; // Check for the RShiftNode Node *rshift = phi->in(idx); @@ -318,7 +318,7 @@ static bool check_compare_clipping( bool less_than, IfNode *iff, ConNode *limit, // Check if the RegionNode is part of an unsafe loop and unreachable from root. bool RegionNode::is_unreachable_region(const PhaseGVN* phase) { Node* top = phase->C->top(); - assert(req() == 2 || (req() == 3 && in(1) != NULL && in(2) == top), "sanity check arguments"); + assert(req() == 2 || (req() == 3 && in(1) != nullptr && in(2) == top), "sanity check arguments"); if (_is_unreachable_region) { // Return cached result from previous evaluation which should still be valid assert(is_unreachable_from_root(phase), "walk the graph again and check if its indeed unreachable"); @@ -342,7 +342,7 @@ bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const { uint i; for (i = 0; i < max; i++) { Node* n = raw_out(i); - if (n != NULL && n->is_Phi()) { + if (n != nullptr && n->is_Phi()) { PhiNode* phi = n->as_Phi(); assert(phi->in(0) == this, "sanity check phi"); if (phi->outcnt() == 0) { @@ -352,7 +352,7 @@ bool RegionNode::is_possible_unsafe_loop(const PhaseGVN* phase) const { Node* u = phi->raw_out(0); // Skip if only one use is an other Phi or Call or Uncommon trap. // It is safe to consider this case as fallthrough. - if (u != NULL && (u->is_Phi() || u->is_CFG())) { + if (u != nullptr && (u->is_Phi() || u->is_CFG())) { continue; } } @@ -382,7 +382,7 @@ bool RegionNode::is_unreachable_from_root(const PhaseGVN* phase) const { uint max = n->outcnt(); for (uint i = 0; i < max; i++) { Node* m = n->raw_out(i); - if (m != NULL && m->is_CFG()) { + if (m != nullptr && m->is_CFG()) { if (m == this) { return false; // We reached the Region node - it is not dead. } @@ -469,7 +469,7 @@ bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) { PhiNode* phi = has_unique_phi(); if (phi && phi->type() == Type::MEMORY && req() == 3 && phi->is_diamond_phi(true)) { - MergeMemNode* m = NULL; + MergeMemNode* m = nullptr; assert(phi->req() == 3, "same as region"); for (uint i = 1; i < 3; ++i) { Node *mem = phi->in(i); @@ -494,14 +494,14 @@ bool RegionNode::try_clean_mem_phi(PhaseGVN *phase) { // Return a node which is more "ideal" than the current node. Must preserve // the CFG, but we can still strip out dead paths. Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if( !can_reshape && !in(0) ) return NULL; // Already degraded to a Copy + if( !can_reshape && !in(0) ) return nullptr; // Already degraded to a Copy assert(!in(0) || !in(0)->is_Root(), "not a specially hidden merge"); // Check for RegionNode with no Phi users and both inputs come from either // arm of the same IF. If found, then the control-flow split is useless. bool has_phis = false; if (can_reshape) { // Need DU info to check for Phi users - has_phis = (has_phi() != NULL); // Cache result + has_phis = (has_phi() != nullptr); // Cache result if (has_phis && try_clean_mem_phi(phase)) { has_phis = false; } @@ -519,7 +519,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { // will be eliminated if dead. phase->is_IterGVN()->add_users_to_worklist(iff); set_req(i, iff->in(0));// Skip around the useless IF diamond - set_req(j, NULL); + set_req(j, nullptr); return this; // Record progress } } @@ -527,7 +527,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - // Remove TOP or NULL input paths. If only 1 input path remains, this Region + // Remove TOP or null input paths. If only 1 input path remains, this Region // degrades to a copy. bool add_to_worklist = true; bool modified = false; @@ -538,7 +538,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { // For all inputs... for( uint i=1; iis_Region() && n->as_Region()->is_copy() ) { set_req(i, n->nonnull_req()); @@ -556,7 +556,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } if( phase->type(n) == Type::TOP ) { - set_req_X(i, NULL, phase); // Ignore TOP inputs + set_req_X(i, nullptr, phase); // Ignore TOP inputs modified = true; found_top = true; i--; @@ -578,7 +578,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { if( n->req() != req() && n->is_Phi() ) { assert( n->in(0) == this, "" ); igvn->hash_delete(n); // Yank from hash before hacking edges - n->set_req_X(i,NULL,igvn);// Correct DU info + n->set_req_X(i,nullptr,igvn);// Correct DU info n->del_req(i); // Yank path from Phis if( max != outcnt() ) { progress = true; @@ -624,10 +624,10 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } if( cnt <= 1 ) { // Only 1 path in? - set_req(0, NULL); // Null control input for region copy + set_req(0, nullptr); // Null control input for region copy if( cnt == 0 && !can_reshape) { // Parse phase - leave the node as it is. - // No inputs or all inputs are NULL. - return NULL; + // No inputs or all inputs are null. + return nullptr; } else if (can_reshape) { // Optimization phase - remove the node PhaseIterGVN *igvn = phase->is_IterGVN(); // Strip mined (inner) loop is going away, remove outer loop. @@ -635,7 +635,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { as_Loop()->is_strip_mined()) { Node* outer_sfpt = as_CountedLoop()->outer_safepoint(); Node* outer_out = as_CountedLoop()->outer_loop_exit(); - if (outer_sfpt != NULL && outer_out != NULL) { + if (outer_sfpt != nullptr && outer_out != nullptr) { Node* in = outer_sfpt->in(0); igvn->replace_node(outer_out, in); LoopNode* outer = as_CountedLoop()->outer_loop(); @@ -644,7 +644,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } if (is_CountedLoop()) { Node* opaq = as_CountedLoop()->is_canonical_loop_entry(); - if (opaq != NULL) { + if (opaq != nullptr) { // This is not a loop anymore. No need to keep the Opaque1 node on the test that guards the loop as it won't be // subject to further loop opts. assert(opaq->Opcode() == Op_OpaqueZeroTripGuard, ""); @@ -662,7 +662,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } else { // The fallthrough case since we already checked dead loops above. parent_ctrl = in(1); - assert(parent_ctrl != NULL, "Region is a copy of some non-null control"); + assert(parent_ctrl != nullptr, "Region is a copy of some non-null control"); assert(parent_ctrl != this, "Close dead loop"); } if (add_to_worklist) { @@ -685,7 +685,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { assert( n->req() == 1, "No data inputs expected" ); in = parent_ctrl; // replaced by top } else { - assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); + assert( n->req() == 2 && n->in(1) != nullptr, "Only one data input expected" ); in = n->in(1); // replaced by unique input if( n->as_Phi()->is_unsafe_data_reference(in) ) in = phase->C->top(); // replaced by top @@ -711,7 +711,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { } // Remove the RegionNode itself from DefUse info igvn->remove_dead_node(this); - return NULL; + return nullptr; } return this; // Record progress } @@ -720,14 +720,14 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { // If a Region flows into a Region, merge into one big happy merge. if (can_reshape) { Node *m = merge_region(this, phase); - if (m != NULL) return m; + if (m != nullptr) return m; } // Check if this region is the root of a clipping idiom on floats if( ConvertFloat2IntClipping && can_reshape && req() == 4 ) { // Check that only one use is a Phi and that it simplifies to two constants + PhiNode* phi = has_unique_phi(); - if (phi != NULL) { // One Phi user + if (phi != nullptr) { // One Phi user // Check inputs to the Phi ConNode *min; ConNode *max; @@ -740,13 +740,13 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { IfNode *bot_if; if( check_if_clipping( this, bot_if, top_if ) ) { // Control pattern checks, now verify compares - Node *top_in = NULL; // value being compared against - Node *bot_in = NULL; + Node *top_in = nullptr; // value being compared against + Node *bot_in = nullptr; if( check_compare_clipping( true, bot_if, min, bot_in ) && check_compare_clipping( false, top_if, max, top_in ) ) { if( bot_in == top_in ) { PhaseIterGVN *gvn = phase->is_IterGVN(); - assert( gvn != NULL, "Only had DefUse info in IterGVN"); + assert( gvn != nullptr, "Only had DefUse info in IterGVN"); // Only remaining check is that bot_in == top_in == (Phi's val + mods) // Check for the ConvF2INode @@ -788,7 +788,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) { modified |= optimize_trichotomy(phase->is_IterGVN()); } - return modified ? this : NULL; + return modified ? this : nullptr; } //--------------------------remove_unreachable_subgraph---------------------- @@ -876,19 +876,19 @@ void RegionNode::remove_unreachable_subgraph(PhaseIterGVN* igvn) { // The method returns true if 'this' is modified and false otherwise. bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { int idx1 = 1, idx2 = 2; - Node* region = NULL; - if (req() == 3 && in(1) != NULL && in(2) != NULL) { + Node* region = nullptr; + if (req() == 3 && in(1) != nullptr && in(2) != nullptr) { // Shape 1: Check if one of the inputs is a region that merges two control // inputs and has no other users (especially no Phi users). region = in(1)->isa_Region() ? in(1) : in(2)->isa_Region(); - if (region == NULL || region->outcnt() != 2 || region->req() != 3) { + if (region == nullptr || region->outcnt() != 2 || region->req() != 3) { return false; // No suitable region input found } } else if (req() == 4) { // Shape 2: Check if two control inputs map to the same value of the unique phi // user and treat these as if they would come from another region (shape (1)). PhiNode* phi = has_unique_phi(); - if (phi == NULL) { + if (phi == nullptr) { return false; // No unique phi user } if (phi->in(idx1) != phi->in(idx2)) { @@ -903,22 +903,22 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { assert(phi->in(idx1) == phi->in(idx2), "must be"); // Region is merging same value region = this; } - if (region == NULL || region->in(idx1) == NULL || region->in(idx2) == NULL) { + if (region == nullptr || region->in(idx1) == nullptr || region->in(idx2) == nullptr) { return false; // Region does not merge two control inputs } // At this point we know that region->in(idx1) and region->(idx2) map to the same // value and control flow. Now search for ifs that feed into these region inputs. ProjNode* proj1 = region->in(idx1)->isa_Proj(); ProjNode* proj2 = region->in(idx2)->isa_Proj(); - if (proj1 == NULL || proj1->outcnt() != 1 || - proj2 == NULL || proj2->outcnt() != 1) { + if (proj1 == nullptr || proj1->outcnt() != 1 || + proj2 == nullptr || proj2->outcnt() != 1) { return false; // No projection inputs with region as unique user found } assert(proj1 != proj2, "should be different projections"); IfNode* iff1 = proj1->in(0)->isa_If(); IfNode* iff2 = proj2->in(0)->isa_If(); - if (iff1 == NULL || iff1->outcnt() != 2 || - iff2 == NULL || iff2->outcnt() != 2) { + if (iff1 == nullptr || iff1->outcnt() != 2 || + iff2 == nullptr || iff2->outcnt() != 2) { return false; // No ifs found } if (iff1 == iff2) { @@ -929,7 +929,7 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { } BoolNode* bol1 = iff1->in(1)->isa_Bool(); BoolNode* bol2 = iff2->in(1)->isa_Bool(); - if (bol1 == NULL || bol2 == NULL) { + if (bol1 == nullptr || bol2 == nullptr) { return false; // No bool inputs found } Node* cmp1 = bol1->in(1); @@ -1015,7 +1015,7 @@ Node *Node::nonnull_req() const { if( in(i) ) return in(i); ShouldNotReachHere(); - return NULL; + return nullptr; } @@ -1030,7 +1030,7 @@ bool PhiNode::cmp( const Node &n ) const { } static inline const TypePtr* flatten_phi_adr_type(const TypePtr* at) { - if (at == NULL || at == TypePtr::BOTTOM) return at; + if (at == nullptr || at == TypePtr::BOTTOM) return at; return Compile::current()->alias_type(at)->adr_type(); } @@ -1042,20 +1042,20 @@ PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) { PhiNode* p = new PhiNode(r, t, at); for (uint j = 1; j < preds; j++) { // Fill in all inputs, except those which the region does not yet have - if (r->in(j) != NULL) + if (r->in(j) != nullptr) p->init_req(j, x); } return p; } PhiNode* PhiNode::make(Node* r, Node* x) { const Type* t = x->bottom_type(); - const TypePtr* at = NULL; + const TypePtr* at = nullptr; if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); return make(r, x, t, at); } PhiNode* PhiNode::make_blank(Node* r, Node* x) { const Type* t = x->bottom_type(); - const TypePtr* at = NULL; + const TypePtr* at = nullptr; if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); return new PhiNode(r, t, at); } @@ -1078,7 +1078,7 @@ PhiNode* PhiNode::slice_memory(const TypePtr* adr_type) const { // Split out an instance type from a bottom phi. PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const { const TypeOopPtr *t_oop = at->isa_oopptr(); - assert(t_oop != NULL && t_oop->is_known_instance(), "expecting instance oopptr"); + assert(t_oop != nullptr && t_oop->is_known_instance(), "expecting instance oopptr"); // Check if an appropriate node already exists. Node *region = in(0); @@ -1107,13 +1107,13 @@ PhiNode* PhiNode::split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) cons nphi = node_map[ophi->_idx]->as_Phi(); for (; i < ophi->req(); i++) { Node *in = ophi->in(i); - if (in == NULL || igvn->type(in) == Type::TOP) + if (in == nullptr || igvn->type(in) == Type::TOP) continue; - Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, NULL, igvn); - PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : NULL; - if (optphi != NULL && optphi->adr_type() == TypePtr::BOTTOM) { + Node *opt = MemNode::optimize_simple_memory_chain(in, t_oop, nullptr, igvn); + PhiNode *optphi = opt->is_Phi() ? opt->as_Phi() : nullptr; + if (optphi != nullptr && optphi->adr_type() == TypePtr::BOTTOM) { opt = node_map[optphi->_idx]; - if (opt == NULL) { + if (opt == nullptr) { stack.push(ophi, i); nphi = optphi->slice_memory(at); igvn->register_new_node_with_optimizer( nphi ); @@ -1144,7 +1144,7 @@ void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { // walk around for (uint i = 1; i < req(); i++) { Node* n = in(i); - if (n == NULL) continue; + if (n == nullptr) continue; const Node* np = in(i); if (np->is_Phi()) { np->as_Phi()->verify_adr_type(visited, at); @@ -1154,7 +1154,7 @@ void PhiNode::verify_adr_type(VectorSet& visited, const TypePtr* at) const { } else { const TypePtr* nat = flatten_phi_adr_type(n->adr_type()); // recheck phi/non-phi consistency at leaves: - assert((nat != NULL) == (at != NULL), ""); + assert((nat != nullptr) == (at != nullptr), ""); assert(nat == at || nat == TypePtr::BOTTOM, "adr_type must be consistent at leaves of phi nest"); } @@ -1166,7 +1166,7 @@ void PhiNode::verify_adr_type(bool recursive) const { if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error if (Node::in_dump()) return; // muzzle asserts when printing - assert((_type == Type::MEMORY) == (_adr_type != NULL), "adr_type for memory phis only"); + assert((_type == Type::MEMORY) == (_adr_type != nullptr), "adr_type for memory phis only"); if (!VerifyAliases) return; // verify thoroughly only if requested @@ -1194,18 +1194,18 @@ const Type* PhiNode::Value(PhaseGVN* phase) const { return Type::TOP; // Check for trip-counted loop. If so, be smarter. - BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : NULL; + BaseCountedLoopNode* l = r->is_BaseCountedLoop() ? r->as_BaseCountedLoop() : nullptr; if (l && ((const Node*)l->phi() == this)) { // Trip counted loop! - // protect against init_trip() or limit() returning NULL + // protect against init_trip() or limit() returning null if (l->can_be_counted_loop(phase)) { const Node* init = l->init_trip(); const Node* limit = l->limit(); const Node* stride = l->stride(); - if (init != NULL && limit != NULL && stride != NULL) { + if (init != nullptr && limit != nullptr && stride != nullptr) { const TypeInteger* lo = phase->type(init)->isa_integer(l->bt()); const TypeInteger* hi = phase->type(limit)->isa_integer(l->bt()); const TypeInteger* stride_t = phase->type(stride)->isa_integer(l->bt()); - if (lo != NULL && hi != NULL && stride_t != NULL) { // Dying loops might have TOP here + if (lo != nullptr && hi != nullptr && stride_t != nullptr) { // Dying loops might have TOP here assert(stride_t->is_con(), "bad stride type"); BoolTest::mask bt = l->loopexit()->test_trip(); // If the loop exit condition is "not equal", the condition @@ -1259,8 +1259,8 @@ const Type* PhiNode::Value(PhaseGVN* phase) const { } } } - } else if (l->in(LoopNode::LoopBackControl) != NULL && - in(LoopNode::EntryControl) != NULL && + } else if (l->in(LoopNode::LoopBackControl) != nullptr && + in(LoopNode::EntryControl) != nullptr && phase->type(l->in(LoopNode::LoopBackControl)) == Type::TOP) { // During CCP, if we saturate the type of a counted loop's Phi // before the special code for counted loop above has a chance @@ -1387,19 +1387,19 @@ Node* PhiNode::is_cmove_id(PhaseTransform* phase, int true_path) { Node* tval = in(true_path); Node* fval = in(3-true_path); Node* id = CMoveNode::is_cmove_id(phase, cmp, tval, fval, b); - if (id == NULL) - return NULL; + if (id == nullptr) + return nullptr; // Either value might be a cast that depends on a branch of 'iff'. // Since the 'id' value will float free of the diamond, either // decast or return failure. Node* ctl = id->in(0); - if (ctl != NULL && ctl->in(0) == iff) { + if (ctl != nullptr && ctl->in(0) == iff) { if (id->is_ConstraintCast()) { return id->in(1); } else { // Don't know how to disentangle this value. - return NULL; + return nullptr; } } @@ -1418,7 +1418,7 @@ Node* PhiNode::Identity(PhaseGVN* phase) { // trivially, perhaps with a single cast. The unique_input method // does all this and more, by reducing such tributaries to 'this'.) Node* uin = unique_input(phase, false); - if (uin != NULL) { + if (uin != nullptr) { return uin; } @@ -1426,7 +1426,7 @@ Node* PhiNode::Identity(PhaseGVN* phase) { // Delay CMove'ing identity if Ideal has not had the chance to handle unsafe cases, yet. if (true_path != 0 && !(phase->is_IterGVN() && wait_for_region_igvn(phase))) { Node* id = is_cmove_id(phase, true_path); - if (id != NULL) { + if (id != nullptr) { return id; } } @@ -1444,11 +1444,11 @@ Node* PhiNode::Identity(PhaseGVN* phase) { u->req() == phi_len) { for (uint j = 1; j < phi_len; j++) { if (in(j) != u->in(j)) { - u = NULL; + u = nullptr; break; } } - if (u != NULL) { + if (u != nullptr) { return u; } } @@ -1474,21 +1474,21 @@ Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { // phi / -- Node* r = in(0); // RegionNode - Node* input = NULL; // The unique direct input (maybe uncasted = ConstraintCasts removed) + Node* input = nullptr; // The unique direct input (maybe uncasted = ConstraintCasts removed) for (uint i = 1, cnt = req(); i < cnt; ++i) { Node* rc = r->in(i); - if (rc == NULL || phase->type(rc) == Type::TOP) + if (rc == nullptr || phase->type(rc) == Type::TOP) continue; // ignore unreachable control path Node* n = in(i); - if (n == NULL) + if (n == nullptr) continue; Node* un = n; if (uncast) { #ifdef ASSERT Node* m = un->uncast(); #endif - while (un != NULL && un->req() == 2 && un->is_ConstraintCast()) { + while (un != nullptr && un->req() == 2 && un->is_ConstraintCast()) { Node* next = un->in(1); if (phase->type(next)->isa_rawptr() && phase->type(un)->isa_oopptr()) { // risk exposing raw ptr at safepoint @@ -1498,17 +1498,17 @@ Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { } assert(m == un || un->in(1) == m, "Only expected at CheckCastPP from allocation"); } - if (un == NULL || un == this || phase->type(un) == Type::TOP) { + if (un == nullptr || un == this || phase->type(un) == Type::TOP) { continue; // ignore if top, or in(i) and "this" are in a data cycle } // Check for a unique input (maybe uncasted) - if (input == NULL) { + if (input == nullptr) { input = un; } else if (input != un) { input = NodeSentinel; // no unique input } } - if (input == NULL) { + if (input == nullptr) { return phase->C->top(); // no inputs } @@ -1517,7 +1517,7 @@ Node* PhiNode::unique_input(PhaseTransform* phase, bool uncast) { } // Nothing. - return NULL; + return nullptr; } //------------------------------is_x2logic------------------------------------- @@ -1547,25 +1547,25 @@ static Node *is_x2logic( PhaseGVN *phase, PhiNode *phi, int true_path ) { if( tcmp != TypeInt::ZERO && tcmp != TypePtr::NULL_PTR ) { // Allow cmp-vs-1 if the other input is bounded by 0-1 if( !(tcmp == TypeInt::ONE && phase->type(cmp->in(1)) == TypeInt::BOOL) ) - return NULL; + return nullptr; flipped = 1-flipped; // Test is vs 1 instead of 0! } // Check for setting zero/one opposite expected if( tzero == TypeInt::ZERO ) { if( tone == TypeInt::ONE ) { - } else return NULL; + } else return nullptr; } else if( tzero == TypeInt::ONE ) { if( tone == TypeInt::ZERO ) { flipped = 1-flipped; - } else return NULL; - } else return NULL; + } else return nullptr; + } else return nullptr; // Check for boolean test backwards if( b->_test._test == BoolTest::ne ) { } else if( b->_test._test == BoolTest::eq ) { flipped = 1-flipped; - } else return NULL; + } else return nullptr; // Build int->bool conversion Node *n = new Conv2BNode(cmp->in(1)); @@ -1594,16 +1594,16 @@ static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { const CmpNode *cmp = (CmpNode*)b->in(1); // Make sure only merging this one phi here - if (region->has_unique_phi() != phi) return NULL; + if (region->has_unique_phi() != phi) return nullptr; // Make sure each arm of the diamond has exactly one output, which we assume // is the region. Otherwise, the control flow won't disappear. - if (region->in(1)->outcnt() != 1) return NULL; - if (region->in(2)->outcnt() != 1) return NULL; + if (region->in(1)->outcnt() != 1) return nullptr; + if (region->in(2)->outcnt() != 1) return nullptr; // Check for "(P < Q)" of type signed int - if (b->_test._test != BoolTest::lt) return NULL; - if (cmp->Opcode() != Op_CmpI) return NULL; + if (b->_test._test != BoolTest::lt) return nullptr; + if (cmp->Opcode() != Op_CmpI) return nullptr; Node *p = cmp->in(1); Node *q = cmp->in(2); @@ -1616,19 +1616,19 @@ static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { op != Op_AddP && op != Op_XorI && op != Op_OrI*/ ) - return NULL; + return nullptr; Node *x = n2; - Node *y = NULL; + Node *y = nullptr; if( x == n1->in(1) ) { y = n1->in(2); } else if( x == n1->in(2) ) { y = n1->in(1); - } else return NULL; + } else return nullptr; // Not so profitable if compare and add are constants if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) - return NULL; + return nullptr; Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) ); Node *j_and = phase->transform( new AndINode(cmplt,y) ); @@ -1659,7 +1659,7 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = true_path; break; case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = false_path; break; - default: return NULL; break; + default: return nullptr; break; } } else if (cmp->Opcode() == Op_CmpI || cmp->Opcode() == Op_CmpL) { switch (bol->_test._test) { @@ -1667,22 +1667,22 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = false_path; break; case BoolTest::gt: case BoolTest::ge: cmp_zero_idx = 2; phi_x_idx = true_path; break; - default: return NULL; break; + default: return nullptr; break; } } // Test is next - const Type *tzero = NULL; + const Type *tzero = nullptr; switch (cmp->Opcode()) { case Op_CmpI: tzero = TypeInt::ZERO; break; // Integer ABS case Op_CmpL: tzero = TypeLong::ZERO; break; // Long ABS case Op_CmpF: tzero = TypeF::ZERO; break; // Float ABS case Op_CmpD: tzero = TypeD::ZERO; break; // Double ABS - default: return NULL; + default: return nullptr; } // Find zero input of compare; the other input is being abs'd - Node *x = NULL; + Node *x = nullptr; bool flip = false; if( phase->type(cmp->in(cmp_zero_idx)) == tzero ) { x = cmp->in(3 - cmp_zero_idx); @@ -1691,12 +1691,12 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { x = cmp->in(cmp_zero_idx); flip = true; } else { - return NULL; + return nullptr; } // Next get the 2 pieces being selected, one is the original value // and the other is the negated value. - if( phi_root->in(phi_x_idx) != x ) return NULL; + if( phi_root->in(phi_x_idx) != x ) return nullptr; // Check other phi input for subtract node Node *sub = phi_root->in(3 - phi_x_idx); @@ -1705,7 +1705,7 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL; // Allow only Sub(0,X) and fail out for all others; Neg is not OK - if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return NULL; + if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return nullptr; if (tzero == TypeF::ZERO) { x = new AbsFNode(x); @@ -1727,7 +1727,7 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { if (flip) { x = new SubLNode(sub->in(1), phase->transform(x)); } - } else return NULL; + } else return nullptr; return x; } @@ -1773,21 +1773,21 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) { } BasicType bt = phi->type()->basic_type(); if( bt == T_ILLEGAL || type2size[bt] <= 0 ) - return NULL; // Bail out on funny non-value stuff + return nullptr; // Bail out on funny non-value stuff if( phi->req() <= 3 ) // Need at least 2 matched inputs and a - return NULL; // third unequal input to be worth doing + return nullptr; // third unequal input to be worth doing // Scan for a constant uint i; for( i = 1; i < phi->req()-1; i++ ) { Node *n = phi->in(i); - if( !n ) return NULL; - if( phase->type(n) == Type::TOP ) return NULL; + if( !n ) return nullptr; + if( phase->type(n) == Type::TOP ) return nullptr; if( n->Opcode() == Op_ConP || n->Opcode() == Op_ConN || n->Opcode() == Op_ConNKlass ) break; } if( i >= phi->req() ) // Only split for constants - return NULL; + return nullptr; Node *val = phi->in(i); // Constant to split for uint hit = 0; // Number of times it occurs @@ -1795,19 +1795,19 @@ static Node* split_flow_path(PhaseGVN *phase, PhiNode *phi) { for( ; i < phi->req(); i++ ){ // Count occurrences of constant Node *n = phi->in(i); - if( !n ) return NULL; - if( phase->type(n) == Type::TOP ) return NULL; + if( !n ) return nullptr; + if( phase->type(n) == Type::TOP ) return nullptr; if( phi->in(i) == val ) { hit++; - if (PhaseIdealLoop::find_predicate(r->in(i)) != NULL) { - return NULL; // don't split loop entry path + if (PhaseIdealLoop::find_predicate(r->in(i)) != nullptr) { + return nullptr; // don't split loop entry path } } } if( hit <= 1 || // Make sure we find 2 or more hit == phi->req()-1 ) // and not ALL the same value - return NULL; + return nullptr; // Now start splitting out the flow paths that merge the same value. // Split first the RegionNode. @@ -1860,7 +1860,7 @@ PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const { // Unsafe loop if the phi node references itself through an unsafe data node. // Exclude cases with null inputs or data nodes which could reference // itself (safe for dead loops). - if (in != NULL && !in->is_dead_loop_safe()) { + if (in != nullptr && !in->is_dead_loop_safe()) { // Check inputs of phi's inputs also. // It is much less expensive then full graph walk. uint cnt = in->req(); @@ -1869,13 +1869,13 @@ PhiNode::LoopSafety PhiNode::simple_data_loop_check(Node *in) const { Node* m = in->in(i); if (m == (Node*)this) return UnsafeLoop; // Unsafe loop - if (m != NULL && !m->is_dead_loop_safe()) { + if (m != nullptr && !m->is_dead_loop_safe()) { // Check the most common case (about 30% of all cases): // phi->Load/Store->AddP->(ConP ConP Con)/(Parm Parm Con). - Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : NULL; + Node *m1 = (m->is_AddP() && m->req() > 3) ? m->in(1) : nullptr; if (m1 == (Node*)this) return UnsafeLoop; // Unsafe loop - if (m1 != NULL && m1 == m->in(2) && + if (m1 != nullptr && m1 == m->in(2) && m1->is_dead_loop_safe() && m->in(3)->is_Con()) { continue; // Safe case } @@ -1918,7 +1918,7 @@ bool PhiNode::is_unsafe_data_reference(Node *in) const { if (m == (Node*)this) { return true; // Data loop } - if (m != NULL && !m->is_dead_loop_safe()) { // Only look for unsafe cases. + if (m != nullptr && !m->is_dead_loop_safe()) { // Only look for unsafe cases. if (!visited.test_set(m->_idx)) nstack.push(m); } @@ -1937,19 +1937,19 @@ bool PhiNode::wait_for_region_igvn(PhaseGVN* phase) { for (uint j = 1; j < req(); j++) { Node* rc = r->in(j); Node* n = in(j); - if (rc != NULL && + if (rc != nullptr && rc->is_Proj()) { if (worklist.member(rc)) { delay = true; - } else if (rc->in(0) != NULL && + } else if (rc->in(0) != nullptr && rc->in(0)->is_If()) { if (worklist.member(rc->in(0))) { delay = true; - } else if (rc->in(0)->in(1) != NULL && + } else if (rc->in(0)->in(1) != nullptr && rc->in(0)->in(1)->is_Bool()) { if (worklist.member(rc->in(0)->in(1))) { delay = true; - } else if (rc->in(0)->in(1)->in(1) != NULL && + } else if (rc->in(0)->in(1)->in(1) != nullptr && rc->in(0)->in(1)->in(1)->is_Cmp()) { if (worklist.member(rc->in(0)->in(1)->in(1))) { delay = true; @@ -1995,19 +1995,19 @@ bool PhiNode::must_wait_for_region_in_irreducible_loop(PhaseGVN* phase) const { // the CFG, but we can still strip out dead paths. Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node *r = in(0); // RegionNode - assert(r != NULL && r->is_Region(), "this phi must have a region"); - assert(r->in(0) == NULL || !r->in(0)->is_Root(), "not a specially hidden merge"); + assert(r != nullptr && r->is_Region(), "this phi must have a region"); + assert(r->in(0) == nullptr || !r->in(0)->is_Root(), "not a specially hidden merge"); // Note: During parsing, phis are often transformed before their regions. // This means we have to use type_or_null to defend against untyped regions. if( phase->type_or_null(r) == Type::TOP ) // Dead code? - return NULL; // No change + return nullptr; // No change Node *top = phase->C->top(); bool new_phi = (outcnt() == 0); // transforming new Phi // No change for igvn if new phi is not hooked if (new_phi && can_reshape) - return NULL; + return nullptr; if (must_wait_for_region_in_irreducible_loop(phase)) { return nullptr; @@ -2018,15 +2018,15 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // One: region is not loop - replace phi with this input. // Two: region is loop - replace phi with top since this data path is dead // and we need to break the dead data loop. - Node* progress = NULL; // Record if any progress made + Node* progress = nullptr; // Record if any progress made for( uint j = 1; j < req(); ++j ){ // For all paths in // Check unreachable control paths Node* rc = r->in(j); Node* n = in(j); // Get the input - if (rc == NULL || phase->type(rc) == Type::TOP) { + if (rc == nullptr || phase->type(rc) == Type::TOP) { if (n != top) { // Not already top? PhaseIterGVN *igvn = phase->is_IterGVN(); - if (can_reshape && igvn != NULL) { + if (can_reshape && igvn != nullptr) { igvn->_worklist.push(r); } // Nuke it down @@ -2044,7 +2044,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { bool uncasted = false; Node* uin = unique_input(phase, false); - if (uin == NULL && can_reshape && + if (uin == nullptr && can_reshape && // If there is a chance that the region can be optimized out do // not add a cast node that we can't remove yet. !wait_for_region_igvn(phase)) { @@ -2055,12 +2055,12 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (can_reshape) // IGVN transformation return top; else - return NULL; // Identity will return TOP - } else if (uin != NULL) { - // Only one not-NULL unique input path is left. + return nullptr; // Identity will return TOP + } else if (uin != nullptr) { + // Only one not-null unique input path is left. // Determine if this input is backedge of a loop. // (Skip new phis which have no uses and dead regions). - if (outcnt() > 0 && r->in(0) != NULL) { + if (outcnt() > 0 && r->in(0) != nullptr) { if (is_data_loop(r->as_Region(), uin, phase)) { // Break this data loop to avoid creation of a dead loop. if (can_reshape) { @@ -2069,7 +2069,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // We can't return top if we are in Parse phase - cut inputs only // let Identity to handle the case. replace_edge(uin, top, phase); - return NULL; + return nullptr; } } } @@ -2081,7 +2081,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { const Type* phi_type = bottom_type(); // Add casts to carry the control dependency of the Phi that is // going away - Node* cast = NULL; + Node* cast = nullptr; if (phi_type->isa_ptr()) { const Type* uin_type = phase->type(uin); if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) { @@ -2103,20 +2103,20 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // that of phi if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) { Node* n = uin; - if (cast != NULL) { + if (cast != nullptr) { cast = phase->transform(cast); n = cast; } cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, ConstraintCastNode::StrongDependency); } - if (cast == NULL) { + if (cast == nullptr) { cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, ConstraintCastNode::StrongDependency); } } } else { cast = ConstraintCastNode::make_cast_for_type(r, uin, phi_type, ConstraintCastNode::StrongDependency); } - assert(cast != NULL, "cast should be set"); + assert(cast != nullptr, "cast should be set"); cast = phase->transform(cast); // set all inputs to the new cast(s) so the Phi is removed by Identity PhaseIterGVN* igvn = phase->is_IterGVN(); @@ -2140,10 +2140,10 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { #endif // Identity may not return the expected uin, if it has to wait for the region, in irreducible case assert(ident == uin || ident->is_top() || must_wait_for_region_in_irreducible_loop(phase), "Identity must clean this up"); - return NULL; + return nullptr; } - Node* opt = NULL; + Node* opt = nullptr; int true_path = is_diamond_phi(); if (true_path != 0 && // If one of the diamond's branch is in the process of dying then, the Phi's input for that branch might transform @@ -2153,24 +2153,24 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for CMove'ing identity. If it would be unsafe, // handle it here. In the safe case, let Identity handle it. Node* unsafe_id = is_cmove_id(phase, true_path); - if( unsafe_id != NULL && is_unsafe_data_reference(unsafe_id) ) + if( unsafe_id != nullptr && is_unsafe_data_reference(unsafe_id) ) opt = unsafe_id; // Check for simple convert-to-boolean pattern - if( opt == NULL ) + if( opt == nullptr ) opt = is_x2logic(phase, this, true_path); // Check for absolute value - if( opt == NULL ) + if( opt == nullptr ) opt = is_absolute(phase, this, true_path); // Check for conditional add - if( opt == NULL && can_reshape ) + if( opt == nullptr && can_reshape ) opt = is_cond_add(phase, this, true_path); // These 4 optimizations could subsume the phi: // have to check for a dead data loop creation. - if( opt != NULL ) { + if( opt != nullptr ) { if( opt == unsafe_id || is_unsafe_data_reference(opt) ) { // Found dead loop. if( can_reshape ) @@ -2180,7 +2180,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { assert(req() == 3, "only diamond merge phi here"); set_req(1, top); set_req(2, top); - return NULL; + return nullptr; } else { return opt; } @@ -2191,11 +2191,11 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (can_reshape) { opt = split_flow_path(phase, this); // This optimization only modifies phi - don't need to check for dead loop. - assert(opt == NULL || opt == this, "do not elide phi"); - if (opt != NULL) return opt; + assert(opt == nullptr || opt == this, "do not elide phi"); + if (opt != nullptr) return opt; } - if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) { + if (in(1) != nullptr && in(1)->Opcode() == Op_AddP && can_reshape) { // Try to undo Phi of AddP: // (Phi (AddP base address offset) (AddP base2 address2 offset2)) // becomes: @@ -2211,7 +2211,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* base = addp->in(AddPNode::Base); Node* address = addp->in(AddPNode::Address); Node* offset = addp->in(AddPNode::Offset); - if (base != NULL && address != NULL && offset != NULL && + if (base != nullptr && address != nullptr && offset != nullptr && !base->is_top() && !address->is_top() && !offset->is_top()) { const Type* base_type = base->bottom_type(); const Type* address_type = address->bottom_type(); @@ -2219,11 +2219,11 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // i.e. AddP with base == address and same offset as first AddP bool doit = true; for (uint i = 2; i < req(); i++) { - if (in(i) == NULL || + if (in(i) == nullptr || in(i)->Opcode() != Op_AddP || - in(i)->in(AddPNode::Base) == NULL || - in(i)->in(AddPNode::Address) == NULL || - in(i)->in(AddPNode::Offset) == NULL || + in(i)->in(AddPNode::Base) == nullptr || + in(i)->in(AddPNode::Address) == nullptr || + in(i)->in(AddPNode::Offset) == nullptr || in(i)->in(AddPNode::Base)->is_top() || in(i)->in(AddPNode::Address)->is_top() || in(i)->in(AddPNode::Offset)->is_top()) { @@ -2231,27 +2231,27 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { break; } if (in(i)->in(AddPNode::Base) != base) { - base = NULL; + base = nullptr; } if (in(i)->in(AddPNode::Offset) != offset) { - offset = NULL; + offset = nullptr; } if (in(i)->in(AddPNode::Address) != address) { - address = NULL; + address = nullptr; } // Accumulate type for resulting Phi base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type()); address_type = address_type->meet_speculative(in(i)->in(AddPNode::Address)->bottom_type()); } - if (doit && base == NULL) { + if (doit && base == nullptr) { // Check for neighboring AddP nodes in a tree. // If they have a base, use that it. for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) { Node* u = this->fast_out(k); if (u->is_AddP()) { Node* base2 = u->in(AddPNode::Base); - if (base2 != NULL && !base2->is_top()) { - if (base == NULL) + if (base2 != nullptr && !base2->is_top()) { + if (base == nullptr) base = base2; else if (base != base2) { doit = false; break; } @@ -2260,22 +2260,22 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } if (doit) { - if (base == NULL) { - base = new PhiNode(in(0), base_type, NULL); + if (base == nullptr) { + base = new PhiNode(in(0), base_type, nullptr); for (uint i = 1; i < req(); i++) { base->init_req(i, in(i)->in(AddPNode::Base)); } phase->is_IterGVN()->register_new_node_with_optimizer(base); } - if (address == NULL) { - address = new PhiNode(in(0), address_type, NULL); + if (address == nullptr) { + address = new PhiNode(in(0), address_type, nullptr); for (uint i = 1; i < req(); i++) { address->init_req(i, in(i)->in(AddPNode::Address)); } phase->is_IterGVN()->register_new_node_with_optimizer(address); } - if (offset == NULL) { - offset = new PhiNode(in(0), TypeX_X, NULL); + if (offset == nullptr) { + offset = new PhiNode(in(0), TypeX_X, nullptr); for (uint i = 1; i < req(); i++) { offset->init_req(i, in(i)->in(AddPNode::Offset)); } @@ -2292,7 +2292,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // (Do not attempt this optimization unless parsing is complete. // It would make the parser's memory-merge logic sick.) // (MergeMemNode is not dead_loop_safe - need to check for dead loop.) - if (progress == NULL && can_reshape && type() == Type::MEMORY) { + if (progress == nullptr && can_reshape && type() == Type::MEMORY) { // see if this phi should be sliced uint merge_width = 0; bool saw_self = false; @@ -2302,7 +2302,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Phi references itself through all other inputs then splitting the // Phi through memory merges would create dead loop at later stage. if (ii == top) { - return NULL; // Delay optimization until graph is cleaned. + return nullptr; // Delay optimization until graph is cleaned. } if (ii->is_MergeMem()) { MergeMemNode* n = ii->as_MergeMem(); @@ -2371,7 +2371,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) PhaseIterGVN* igvn = phase->is_IterGVN(); - assert(igvn != NULL, "sanity check"); + assert(igvn != nullptr, "sanity check"); Node* hook = new Node(1); PhiNode* new_base = (PhiNode*) clone(); // Must eagerly register phis, since they participate in loops. @@ -2431,7 +2431,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { const TypePtr* at = adr_type(); for( uint i=1; iis_DecodeNarrowPtr()) { assert(ii->bottom_type() == bottom_type(), "sanity"); new_ii = ii->in(1); @@ -2502,7 +2502,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { #endif // Phi (VB ... VB) => VB (Phi ...) (Phi ...) - if (EnableVectorReboxing && can_reshape && progress == NULL && type()->isa_oopptr()) { + if (EnableVectorReboxing && can_reshape && progress == nullptr && type()->isa_oopptr()) { progress = merge_through_phi(this, phase->is_IterGVN()); } @@ -2527,7 +2527,7 @@ Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIte if (idx < n->req()) { stack.set_index(idx + 1); Node* def = n->in(idx); - if (def == NULL) { + if (def == nullptr) { continue; // ignore dead path } else if (def->is_Phi()) { // inner node Node* new_phi = node_map[n->_idx]; @@ -2543,7 +2543,7 @@ Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIte new_phi->set_req(idx, def->in(c)); } else { assert(false, "not optimizeable"); - return NULL; + return nullptr; } } else { Node* new_phi = node_map[n->_idx]; @@ -2561,14 +2561,14 @@ Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) { stack.push(root_phi, 1); // ignore control visited.set(root_phi->_idx); - VectorBoxNode* cached_vbox = NULL; + VectorBoxNode* cached_vbox = nullptr; while (stack.is_nonempty()) { Node* n = stack.node(); uint idx = stack.index(); if (idx < n->req()) { stack.set_index(idx + 1); Node* in = n->in(idx); - if (in == NULL) { + if (in == nullptr) { continue; // ignore dead path } else if (in->isa_Phi()) { if (!visited.test_set(in->_idx)) { @@ -2576,18 +2576,18 @@ Node* PhiNode::merge_through_phi(Node* root_phi, PhaseIterGVN* igvn) { } } else if (in->Opcode() == Op_VectorBox) { VectorBoxNode* vbox = static_cast(in); - if (cached_vbox == NULL) { + if (cached_vbox == nullptr) { cached_vbox = vbox; } else if (vbox->vec_type() != cached_vbox->vec_type()) { // TODO: vector type mismatch can be handled with additional reinterpret casts assert(Type::cmp(vbox->vec_type(), cached_vbox->vec_type()) != 0, "inconsistent"); - return NULL; // not optimizable: vector type mismatch + return nullptr; // not optimizable: vector type mismatch } else if (vbox->box_type() != cached_vbox->box_type()) { assert(Type::cmp(vbox->box_type(), cached_vbox->box_type()) != 0, "inconsistent"); - return NULL; // not optimizable: box type mismatch + return nullptr; // not optimizable: box type mismatch } } else { - return NULL; // not optimizable: neither Phi nor VectorBox + return nullptr; // not optimizable: neither Phi nor VectorBox } } else { stack.pop(); @@ -2623,7 +2623,7 @@ bool PhiNode::is_data_loop(RegionNode* r, Node* uin, const PhaseGVN* phase) { //------------------------------is_tripcount----------------------------------- bool PhiNode::is_tripcount(BasicType bt) const { - return (in(0) != NULL && in(0)->is_BaseCountedLoop() && + return (in(0) != nullptr && in(0)->is_BaseCountedLoop() && in(0)->as_BaseCountedLoop()->bt() == bt && in(0)->as_BaseCountedLoop()->phi() == this); } @@ -2708,7 +2708,7 @@ const Type* PCTableNode::Value(PhaseGVN* phase) const { // Return a node which is more "ideal" than the current node. Strip out // control copies Node *PCTableNode::Ideal(PhaseGVN *phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //============================================================================= @@ -2745,7 +2745,7 @@ const Type* CatchNode::Value(PhaseGVN* phase) const { for( uint i = 0; i < _size; i++ ) f[i] = Type::CONTROL; // Identify cases that will always throw an exception // () rethrow call - // () virtual or interface call with NULL receiver + // () virtual or interface call with null receiver // () call is a check cast with incompatible arguments if( in(1)->is_Proj() ) { Node *i10 = in(1)->in(0); @@ -2803,7 +2803,7 @@ Node* CatchProjNode::Identity(PhaseGVN* phase) { // an exception) or for "rethrow", because a further optimization will // yank the rethrow (happens when we inline a function that can throw an // exception and the caller has no handler). Not legal, e.g., for passing - // a NULL receiver to a v-call, or passing bad types to a slow-check-cast. + // a null receiver to a v-call, or passing bad types to a slow-check-cast. // These cases MUST throw an exception via the runtime system, so the VM // will be looking for a table entry. Node *proj = in(0)->in(1); // Expect a proj feeding CatchNode @@ -2866,12 +2866,12 @@ Node *NeverBranchNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Dead code elimination can sometimes delete this projection so // if it's not there, there's nothing to do. Node* fallthru = proj_out_or_null(0); - if (fallthru != NULL) { + if (fallthru != nullptr) { phase->is_IterGVN()->replace_node(fallthru, in(0)); } return phase->C->top(); } - return NULL; + return nullptr; } #ifndef PRODUCT @@ -2886,7 +2886,7 @@ void BlackholeNode::format(PhaseRegAlloc* ra, outputStream* st) const { bool first = true; for (uint i = 0; i < req(); i++) { Node* n = in(i); - if (n != NULL && OptoReg::is_valid(ra->get_reg_first(n))) { + if (n != nullptr && OptoReg::is_valid(ra->get_reg_first(n))) { if (first) { first = false; } else { diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp index 1d7c63efc64..a4ec796ccd3 100644 --- a/src/hotspot/share/opto/cfgnode.hpp +++ b/src/hotspot/share/opto/cfgnode.hpp @@ -100,12 +100,12 @@ public: Node* is_copy() const { const Node* r = _in[Region]; - if (r == NULL) + if (r == nullptr) return nonnull_req(); - return NULL; // not a copy! + return nullptr; // not a copy! } - PhiNode* has_phi() const; // returns an arbitrary phi user, or NULL - PhiNode* has_unique_phi() const; // returns the unique phi user, or NULL + PhiNode* has_phi() const; // returns an arbitrary phi user, or null + PhiNode* has_unique_phi() const; // returns the unique phi user, or null // Is this region node unreachable from root? bool is_unreachable_region(const PhaseGVN* phase); #ifdef ASSERT @@ -182,7 +182,7 @@ public: Input // Input values are [1..len) }; - PhiNode( Node *r, const Type *t, const TypePtr* at = NULL, + PhiNode( Node *r, const Type *t, const TypePtr* at = nullptr, const int imid = -1, const int iid = TypeOopPtr::InstanceTop, const int iidx = Compile::AliasIdxTop, @@ -201,7 +201,7 @@ public: // create a new phi with in edges matching r and set (initially) to x static PhiNode* make( Node* r, Node* x ); // extra type arguments override the new phi's bottom_type and adr_type - static PhiNode* make( Node* r, Node* x, const Type *t, const TypePtr* at = NULL ); + static PhiNode* make( Node* r, Node* x, const Type *t, const TypePtr* at = nullptr ); // create a new phi with narrowed memory type PhiNode* slice_memory(const TypePtr* adr_type) const; PhiNode* split_out_instance(const TypePtr* at, PhaseIterGVN *igvn) const; @@ -214,11 +214,11 @@ public: bool is_tripcount(BasicType bt) const; // Determine a unique non-trivial input, if any. - // Ignore casts if it helps. Return NULL on failure. + // Ignore casts if it helps. Return null on failure. Node* unique_input(PhaseTransform *phase, bool uncast); Node* unique_input(PhaseTransform *phase) { Node* uin = unique_input(phase, false); - if (uin == NULL) { + if (uin == nullptr) { uin = unique_input(phase, true); } return uin; @@ -426,7 +426,7 @@ public: // Takes the type of val and filters it through the test represented // by if_proj and returns a more refined type if one is produced. - // Returns NULL is it couldn't improve the type. + // Returns null is it couldn't improve the type. static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj); #ifndef PRODUCT diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp index 899e004ca5a..765ee800224 100644 --- a/src/hotspot/share/opto/chaitin.cpp +++ b/src/hotspot/share/opto/chaitin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ void LRG::dump() const { if( is_multidef() ) { tty->print("MultiDef "); - if (_defs != NULL) { + if (_defs != nullptr) { tty->print("("); for (int i = 0; i < _defs->length(); i++) { tty->print("N%d ", _defs->at(i)->_idx); @@ -200,7 +200,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc #ifndef PRODUCT print_chaitin_statistics #else - NULL + nullptr #endif ) , _live(0) @@ -234,7 +234,7 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher, bool sc cutoff *= 0.001; buckval[i] = cutoff; for (uint j = 0; j < _cfg.number_of_blocks(); j++) { - buckets[i][j] = NULL; + buckets[i][j] = nullptr; } } // Sort blocks into buckets @@ -379,7 +379,7 @@ void PhaseChaitin::Register_Allocate() { { Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); - _live = NULL; // Mark live as being not available + _live = nullptr; // Mark live as being not available rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); ifg.init(_lrg_map.max_lrg_id()); // Empty IFG @@ -397,7 +397,7 @@ void PhaseChaitin::Register_Allocate() { if (stretch_base_pointer_live_ranges(&live_arena)) { Compile::TracePhase tp("computeLive (sbplr)", &timers[_t_computeLive]); // Since some live range stretched, I need to recompute live - _live = NULL; + _live = nullptr; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); ifg.init(_lrg_map.max_lrg_id()); @@ -436,7 +436,7 @@ void PhaseChaitin::Register_Allocate() { // To color, we need the IFG and for that we need LIVE. { Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); - _live = NULL; + _live = nullptr; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); ifg.init(_lrg_map.max_lrg_id()); @@ -474,7 +474,7 @@ void PhaseChaitin::Register_Allocate() { { Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); - _live = NULL; + _live = nullptr; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph @@ -544,7 +544,7 @@ void PhaseChaitin::Register_Allocate() { // Nuke the live-ness and interference graph and LiveRanGe info { Compile::TracePhase tp("computeLive", &timers[_t_computeLive]); - _live = NULL; + _live = nullptr; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); ifg.init(_lrg_map.max_lrg_id()); @@ -622,7 +622,7 @@ void PhaseChaitin::Register_Allocate() { // Log regalloc results CompileLog* log = Compile::current()->log(); - if (log != NULL) { + if (log != nullptr) { log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); } @@ -682,9 +682,9 @@ void PhaseChaitin::Register_Allocate() { } // Done! - _live = NULL; - _ifg = NULL; - C->set_indexSet_arena(NULL); // ResourceArea is at end of scope + _live = nullptr; + _ifg = nullptr; + C->set_indexSet_arena(nullptr); // ResourceArea is at end of scope } void PhaseChaitin::de_ssa() { @@ -791,10 +791,10 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { copy_src._has_copy = 1; } - if (trace_spilling() && lrg._def != NULL) { + if (trace_spilling() && lrg._def != nullptr) { // collect defs for MultiDef printing - if (lrg._defs == NULL) { - lrg._defs = new (_ifg->_arena) GrowableArray(_ifg->_arena, 2, 0, NULL); + if (lrg._defs == nullptr) { + lrg._defs = new (_ifg->_arena) GrowableArray(_ifg->_arena, 2, 0, nullptr); lrg._defs->append(lrg._def); } lrg._defs->append(n); @@ -802,7 +802,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { #endif // Check for a single def LRG; these can spill nicely - // via rematerialization. Flag as NULL for no def found + // via rematerialization. Flag as null for no def found // yet, or 'n' for single def or -1 for many defs. lrg._def = lrg._def ? NodeSentinel : n; @@ -844,7 +844,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { lrg.set_scalable_reg_slots(Matcher::scalable_predicate_reg_slots()); } } - assert(n_type->isa_vect() == NULL || lrg._is_vector || + assert(n_type->isa_vect() == nullptr || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL || ireg == Op_RegVectMask, "vector must be in vector registers"); @@ -1063,7 +1063,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { const RegMask &lrgmask = lrg.mask(); uint kreg = n->in(k)->ideal_reg(); bool is_vect = RegMask::is_vector(kreg); - assert(n->in(k)->bottom_type()->isa_vect() == NULL || is_vect || + assert(n->in(k)->bottom_type()->isa_vect() == nullptr || is_vect || kreg == Op_RegD || kreg == Op_RegL || kreg == Op_RegVectMask, "vector must be in vector registers"); if (lrgmask.is_bound(kreg)) @@ -1092,7 +1092,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // if the LRG is an unaligned pair, we will have to spill // so clear the LRG's register mask if it is not already spilled if (!is_vect && !n->is_SpillCopy() && - (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && + (lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) && lrgmask.is_misaligned_pair()) { lrg.Clear(); } @@ -1777,22 +1777,22 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive // See if this happens to be a base. // NOTE: we use TypePtr instead of TypeOopPtr because we can have - // pointers derived from NULL! These are always along paths that + // pointers derived from null! These are always along paths that // can't happen at run-time but the optimizer cannot deduce it so // we have to handle it gracefully. assert(!derived->bottom_type()->isa_narrowoop() || derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); const TypePtr *tj = derived->bottom_type()->isa_ptr(); // If its an OOP with a non-zero offset, then it is derived. - if( tj == NULL || tj->_offset == 0 ) { + if( tj == nullptr || tj->_offset == 0 ) { derived_base_map[derived->_idx] = derived; return derived; } - // Derived is NULL+offset? Base is NULL! + // Derived is null+offset? Base is null! if( derived->is_Con() ) { Node *base = _matcher.mach_null(); - assert(base != NULL, "sanity"); - if (base->in(0) == NULL) { + assert(base != nullptr, "sanity"); + if (base->in(0) == nullptr) { // Initialize it once and make it shared: // set control to _root and place it into Start block // (where top() node is placed). @@ -1817,7 +1817,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } - assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared"); + assert(base->in(0) == _cfg.get_root_node() && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base null should be shared"); derived_base_map[derived->_idx] = base; return base; } @@ -1866,7 +1866,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive uint j; for( j = 1; j < base->req(); j++ ) if( phi->in(j) != base->in(j) && - !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs + !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different nulls break; if( j == base->req() ) { // All inputs match? base = phi; // Then use existing 'phi' and drop 'base' @@ -2428,7 +2428,7 @@ void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const { if (n->is_MachSafePoint()) { MachSafePointNode* sfpt = n->as_MachSafePoint(); JVMState* jvms = sfpt->jvms(); - if (jvms != NULL) { + if (jvms != nullptr) { // Now scan for a live derived pointer if (jvms->oopoff() < sfpt->req()) { // Check each derived/base pair @@ -2452,11 +2452,11 @@ void PhaseChaitin::verify_base_ptrs(ResourceArea* a) const { } } else if (check->is_Con()) { if (is_derived && check->bottom_type()->is_ptr()->_offset != 0) { - // Derived is NULL+non-zero offset, base must be NULL. + // Derived is null+non-zero offset, base must be null. assert(check->bottom_type()->is_ptr()->ptr() == TypePtr::Null, "Bad derived pointer"); } else { assert(check->bottom_type()->is_ptr()->_offset == 0, "Bad base pointer"); - // Base either ConP(NULL) or loadConP + // Base either ConP(nullptr) or loadConP if (check->is_Mach()) { assert(check->as_Mach()->ideal_Opcode() == Op_ConP, "Bad base pointer"); } else { diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index 646ea4b12ea..dd917571b06 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -217,7 +217,7 @@ public: // Alive if non-zero, dead if zero - bool alive() const { return _def != NULL; } + bool alive() const { return _def != nullptr; } bool is_multidef() const { return _def == NodeSentinel; } bool is_singledef() const { return _def != NodeSentinel; } @@ -747,7 +747,7 @@ private: Node* _def; Node* _first_use; public: - RegDefUse() : _def(NULL), _first_use(NULL) { } + RegDefUse() : _def(nullptr), _first_use(nullptr) { } Node* def() const { return _def; } Node* first_use() const { return _first_use; } @@ -758,8 +758,8 @@ private: } } void clear() { - _def = NULL; - _first_use = NULL; + _def = nullptr; + _first_use = nullptr; } }; typedef GrowableArray RegToDefUseMap; diff --git a/src/hotspot/share/opto/coalesce.cpp b/src/hotspot/share/opto/coalesce.cpp index b95987c4b09..9625a97666d 100644 --- a/src/hotspot/share/opto/coalesce.cpp +++ b/src/hotspot/share/opto/coalesce.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -260,7 +260,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { Node *def = n->in(cidx); if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { n->replace_by(def); - n->set_req(cidx,NULL); + n->set_req(cidx,nullptr); b->remove_node(l); l--; continue; @@ -503,7 +503,7 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui lrgs(lr1)._def = (lrgs(lr1).is_multidef() || lrgs(lr2).is_multidef() ) ? NodeSentinel : src_def; - lrgs(lr2)._def = NULL; // No def for lrg 2 + lrgs(lr2)._def = nullptr; // No def for lrg 2 lrgs(lr2).Clear(); // Force empty mask for LRG 2 //lrgs(lr2)._size = 0; // Live-range 2 goes dead lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop; @@ -520,7 +520,7 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui // _phc.free_spillcopy(b->_nodes[bindex]); assert( b->get_node(bindex) == dst_copy, "" ); dst_copy->replace_by( dst_copy->in(didx) ); - dst_copy->set_req( didx, NULL); + dst_copy->set_req( didx, nullptr); b->remove_node(bindex); if( bindex < b->_ihrp_index ) b->_ihrp_index--; if( bindex < b->_fhrp_index ) b->_fhrp_index--; diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index 32f54a0e982..eca839f76dd 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -85,7 +85,7 @@ // -------------------- Compile::mach_constant_base_node ----------------------- // Constant table base node singleton. MachConstantBaseNode* Compile::mach_constant_base_node() { - if (_mach_constant_base_node == NULL) { + if (_mach_constant_base_node == nullptr) { _mach_constant_base_node = new MachConstantBaseNode(); _mach_constant_base_node->add_req(C->root()); } @@ -153,7 +153,7 @@ CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) { if (m->intrinsic_id() != vmIntrinsics::_none && m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) { CallGenerator* cg = make_vm_intrinsic(m, is_virtual); - if (cg != NULL) { + if (cg != nullptr) { // Save it for next time: register_intrinsic(cg); return cg; @@ -161,7 +161,7 @@ CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) { gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled); } } - return NULL; + return nullptr; } // Compile::make_vm_intrinsic is defined in library_call.cpp. @@ -228,7 +228,7 @@ static char* format_flags(int flags, char* buf) { void Compile::print_intrinsic_statistics() { char flagsbuf[100]; ttyLocker ttyl; - if (xtty != NULL) xtty->head("statistics type='intrinsic'"); + if (xtty != nullptr) xtty->head("statistics type='intrinsic'"); tty->print_cr("Compiler intrinsic usage:"); juint total = _intrinsic_hist_count[as_int(vmIntrinsics::_none)]; if (total == 0) total = 1; // avoid div0 in case of no successes @@ -242,12 +242,12 @@ void Compile::print_intrinsic_statistics() { } } PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[as_int(vmIntrinsics::_none)], flagsbuf)); - if (xtty != NULL) xtty->tail("statistics"); + if (xtty != nullptr) xtty->tail("statistics"); } void Compile::print_statistics() { { ttyLocker ttyl; - if (xtty != NULL) xtty->head("statistics type='opto'"); + if (xtty != nullptr) xtty->head("statistics type='opto'"); Parse::print_statistics(); PhaseStringOpts::print_statistics(); PhaseCCP::print_statistics(); @@ -257,7 +257,7 @@ void Compile::print_statistics() { PhaseIdealLoop::print_statistics(); ConnectionGraph::print_statistics(); PhaseMacroExpand::print_statistics(); - if (xtty != NULL) xtty->tail("statistics"); + if (xtty != nullptr) xtty->tail("statistics"); } if (_intrinsic_hist_flags[as_int(vmIntrinsics::_none)] != 0) { // put this under its own element. @@ -295,12 +295,12 @@ void Compile::gvn_replace_by(Node* n, Node* nn) { // recursive traversal is slower. void Compile::identify_useful_nodes(Unique_Node_List &useful) { int estimated_worklist_size = live_nodes(); - useful.map( estimated_worklist_size, NULL ); // preallocate space + useful.map( estimated_worklist_size, nullptr ); // preallocate space // Initialize worklist - if (root() != NULL) { useful.push(root()); } + if (root() != nullptr) { useful.push(root()); } // If 'top' is cached, declare it useful to preserve cached node - if( cached_top_node() ) { useful.push(cached_top_node()); } + if (cached_top_node()) { useful.push(cached_top_node()); } // Push all useful nodes onto the list, breadthfirst for( uint next = 0; next < useful.size(); ++next ) { @@ -348,7 +348,7 @@ void Compile::remove_useless_late_inlines(GrowableArray* inlines } void Compile::remove_useless_late_inlines(GrowableArray* inlines, Node* dead) { - assert(dead != NULL && dead->is_Call(), "sanity"); + assert(dead != nullptr && dead->is_Call(), "sanity"); int found = 0; for (int i = 0; i < inlines->length(); i++) { if (inlines->at(i)->call_node() == dead) { @@ -442,7 +442,7 @@ void Compile::disconnect_useless_nodes(Unique_Node_List &useful, Unique_Node_Lis remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes #ifdef ASSERT - if (_modified_nodes != NULL) { + if (_modified_nodes != nullptr) { _modified_nodes->remove_useless_nodes(useful.member_set()); } #endif @@ -471,17 +471,17 @@ CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) { // the Compile* pointer is stored in the current ciEnv: ciEnv* env = compile->env(); assert(env == ciEnv::current(), "must already be a ciEnv active"); - assert(env->compiler_data() == NULL, "compile already active?"); + assert(env->compiler_data() == nullptr, "compile already active?"); env->set_compiler_data(compile); assert(compile == Compile::current(), "sanity"); - compile->set_type_dict(NULL); + compile->set_type_dict(nullptr); compile->set_clone_map(new Dict(cmpkey, hashkey, _compile->comp_arena())); compile->clone_map().set_clone_idx(0); compile->set_type_last_size(0); - compile->set_last_tf(NULL, NULL); - compile->set_indexSet_arena(NULL); - compile->set_indexSet_free_block_list(NULL); + compile->set_last_tf(nullptr, nullptr); + compile->set_indexSet_arena(nullptr); + compile->set_indexSet_free_block_list(nullptr); compile->init_type_arena(); Type::Initialize(compile); _compile->begin_method(); @@ -492,7 +492,7 @@ CompileWrapper::~CompileWrapper() { assert(CICrashAt < 0 || _compile->compile_id() != CICrashAt, "just as planned"); _compile->end_method(); - _compile->env()->set_compiler_data(NULL); + _compile->env()->set_compiler_data(nullptr); } @@ -555,7 +555,7 @@ void Compile::print_ideal_ir(const char* phase_name) { // This output goes directly to the tty, not the compiler log. // To enable tools to match it up with the compilation activity, // be sure to tag this tty output with the compile ID. - if (xtty != NULL) { + if (xtty != nullptr) { xtty->head("ideal compile_id='%d'%s compile_phase='%s'", compile_id(), is_osr_compilation() ? " compile_kind='osr'" : "", @@ -570,7 +570,7 @@ void Compile::print_ideal_ir(const char* phase_name) { _output->print_scheduling(); } - if (xtty != NULL) { + if (xtty != nullptr) { xtty->tail("ideal"); } } @@ -591,10 +591,10 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, _options(options), _method(target), _entry_bci(osr_bci), - _ilt(NULL), - _stub_function(NULL), - _stub_name(NULL), - _stub_entry_point(NULL), + _ilt(nullptr), + _stub_function(nullptr), + _stub_name(nullptr), + _stub_entry_point(nullptr), _max_node_limit(MaxNodeLimit), _post_loop_opts_phase(false), _inlining_progress(false), @@ -613,40 +613,40 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, _env(ci_env), _directive(directive), _log(ci_env->log()), - _failure_reason(NULL), - _intrinsics (comp_arena(), 0, 0, NULL), - _macro_nodes (comp_arena(), 8, 0, NULL), - _predicate_opaqs (comp_arena(), 8, 0, NULL), - _skeleton_predicate_opaqs (comp_arena(), 8, 0, NULL), - _expensive_nodes (comp_arena(), 8, 0, NULL), - _for_post_loop_igvn(comp_arena(), 8, 0, NULL), - _unstable_if_traps (comp_arena(), 8, 0, NULL), - _coarsened_locks (comp_arena(), 8, 0, NULL), - _congraph(NULL), - NOT_PRODUCT(_igv_printer(NULL) COMMA) + _failure_reason(nullptr), + _intrinsics (comp_arena(), 0, 0, nullptr), + _macro_nodes (comp_arena(), 8, 0, nullptr), + _predicate_opaqs (comp_arena(), 8, 0, nullptr), + _skeleton_predicate_opaqs (comp_arena(), 8, 0, nullptr), + _expensive_nodes (comp_arena(), 8, 0, nullptr), + _for_post_loop_igvn(comp_arena(), 8, 0, nullptr), + _unstable_if_traps (comp_arena(), 8, 0, nullptr), + _coarsened_locks (comp_arena(), 8, 0, nullptr), + _congraph(nullptr), + NOT_PRODUCT(_igv_printer(nullptr) COMMA) _dead_node_list(comp_arena()), _dead_node_count(0), _node_arena(mtCompiler), _old_arena(mtCompiler), - _mach_constant_base_node(NULL), + _mach_constant_base_node(nullptr), _Compile_types(mtCompiler), - _initial_gvn(NULL), - _for_igvn(NULL), - _late_inlines(comp_arena(), 2, 0, NULL), - _string_late_inlines(comp_arena(), 2, 0, NULL), - _boxing_late_inlines(comp_arena(), 2, 0, NULL), - _vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL), + _initial_gvn(nullptr), + _for_igvn(nullptr), + _late_inlines(comp_arena(), 2, 0, nullptr), + _string_late_inlines(comp_arena(), 2, 0, nullptr), + _boxing_late_inlines(comp_arena(), 2, 0, nullptr), + _vector_reboxing_late_inlines(comp_arena(), 2, 0, nullptr), _late_inlines_pos(0), _number_of_mh_late_inlines(0), _print_inlining_stream(new (mtCompiler) stringStream()), - _print_inlining_list(NULL), + _print_inlining_list(nullptr), _print_inlining_idx(0), - _print_inlining_output(NULL), - _replay_inline_data(NULL), + _print_inlining_output(nullptr), + _replay_inline_data(nullptr), _java_calls(0), _inner_loops(0), _interpreter_frame_size(0), - _output(NULL) + _output(nullptr) #ifndef PRODUCT , _in_dump_cnt(0) #endif @@ -662,7 +662,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, tty->print(" "); } TraceTime t1("Total compilation time", &_t_totalCompilation, CITime, CITimeVerbose); - TraceTime t2(NULL, &_t_methodCompilation, CITime, false); + TraceTime t2(nullptr, &_t_methodCompilation, CITime, false); #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) bool print_opto_assembly = directive->PrintOptoAssemblyOption; @@ -722,7 +722,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, initial_gvn()->transform_no_reclaim(top()); // Set up tf(), start(), and find a CallGenerator. - CallGenerator* cg = NULL; + CallGenerator* cg = nullptr; if (is_osr_compilation()) { const TypeTuple *domain = StartOSRNode::osr_domain(); const TypeTuple *range = TypeTuple::make_range(method()->signature()); @@ -745,14 +745,14 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, // the pre-barrier code. cg = find_intrinsic(method(), false); } - if (cg == NULL) { + if (cg == nullptr) { float past_uses = method()->interpreter_invocation_count(); float expected_uses = past_uses; cg = CallGenerator::for_inline(method(), expected_uses); } } if (failing()) return; - if (cg == NULL) { + if (cg == nullptr) { record_method_not_compilable("cannot parse method"); return; } @@ -760,7 +760,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, gvn.set_type(root(), root()->bottom_type()); JVMState* jvms = build_start_state(start(), tf()); - if ((jvms = cg->generate(jvms)) == NULL) { + if ((jvms = cg->generate(jvms)) == nullptr) { if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) { record_method_not_compilable("method parse failed"); } @@ -804,7 +804,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, // After parsing, node notes are no longer automagic. // They must be propagated by register_new_node_with_optimizer(), // clone(), or the like. - set_default_node_notes(NULL); + set_default_node_notes(nullptr); #ifndef PRODUCT if (should_print_igv(1)) { @@ -824,7 +824,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, } else { _stress_seed = StressSeed; } - if (_log != NULL) { + if (_log != nullptr) { _log->elem("stress_test seed='%u'", _stress_seed); } } @@ -849,7 +849,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci, if (directive->DumpReplayOption) { env()->dump_replay_data(_compile_id); } - if (directive->DumpInlineOption && (ilt() != NULL)) { + if (directive->DumpInlineOption && (ilt() != nullptr)) { env()->dump_inline_data(_compile_id); } @@ -879,11 +879,11 @@ Compile::Compile( ciEnv* ci_env, : Phase(Compiler), _compile_id(0), _options(Options::for_runtime_stub()), - _method(NULL), + _method(nullptr), _entry_bci(InvocationEntryBci), _stub_function(stub_function), _stub_name(stub_name), - _stub_entry_point(NULL), + _stub_entry_point(nullptr), _max_node_limit(MaxNodeLimit), _post_loop_opts_phase(false), _inlining_progress(false), @@ -901,35 +901,35 @@ Compile::Compile( ciEnv* ci_env, _env(ci_env), _directive(directive), _log(ci_env->log()), - _failure_reason(NULL), - _congraph(NULL), - NOT_PRODUCT(_igv_printer(NULL) COMMA) + _failure_reason(nullptr), + _congraph(nullptr), + NOT_PRODUCT(_igv_printer(nullptr) COMMA) _dead_node_list(comp_arena()), _dead_node_count(0), _node_arena(mtCompiler), _old_arena(mtCompiler), - _mach_constant_base_node(NULL), + _mach_constant_base_node(nullptr), _Compile_types(mtCompiler), - _initial_gvn(NULL), - _for_igvn(NULL), + _initial_gvn(nullptr), + _for_igvn(nullptr), _number_of_mh_late_inlines(0), _print_inlining_stream(new (mtCompiler) stringStream()), - _print_inlining_list(NULL), + _print_inlining_list(nullptr), _print_inlining_idx(0), - _print_inlining_output(NULL), - _replay_inline_data(NULL), + _print_inlining_output(nullptr), + _replay_inline_data(nullptr), _java_calls(0), _inner_loops(0), _interpreter_frame_size(0), - _output(NULL), + _output(nullptr), #ifndef PRODUCT _in_dump_cnt(0), #endif _allowed_reasons(0) { C = this; - TraceTime t1(NULL, &_t_totalCompilation, CITime, false); - TraceTime t2(NULL, &_t_stubCompilation, CITime, false); + TraceTime t1(nullptr, &_t_totalCompilation, CITime, false); + TraceTime t2(nullptr, &_t_stubCompilation, CITime, false); #ifndef PRODUCT set_print_assembly(PrintFrameConverterAssembly); @@ -965,34 +965,34 @@ Compile::Compile( ciEnv* ci_env, void Compile::Init(bool aliasing) { _do_aliasing = aliasing; _unique = 0; - _regalloc = NULL; + _regalloc = nullptr; - _tf = NULL; // filled in later - _top = NULL; // cached later - _matcher = NULL; // filled in later - _cfg = NULL; // filled in later + _tf = nullptr; // filled in later + _top = nullptr; // cached later + _matcher = nullptr; // filled in later + _cfg = nullptr; // filled in later IA32_ONLY( set_24_bit_selection_and_mode(true, false); ) - _node_note_array = NULL; - _default_node_notes = NULL; - DEBUG_ONLY( _modified_nodes = NULL; ) // Used in Optimize() + _node_note_array = nullptr; + _default_node_notes = nullptr; + DEBUG_ONLY( _modified_nodes = nullptr; ) // Used in Optimize() - _immutable_memory = NULL; // filled in at first inquiry + _immutable_memory = nullptr; // filled in at first inquiry #ifdef ASSERT _phase_optimize_finished = false; _exception_backedge = false; - _type_verify = NULL; + _type_verify = nullptr; #endif // Globally visible Nodes - // First set TOP to NULL to give safe behavior during creation of RootNode - set_cached_top_node(NULL); + // First set TOP to null to give safe behavior during creation of RootNode + set_cached_top_node(nullptr); set_root(new RootNode()); // Now that you have a Root to point to, create the real TOP set_cached_top_node( new ConNode(Type::TOP) ); - set_recent_alloc(NULL, NULL); + set_recent_alloc(nullptr, nullptr); // Create Debug Information Recorder to record scopes, oopmaps, etc. env()->set_oop_recorder(new OopRecorder(env()->arena())); @@ -1038,7 +1038,7 @@ void Compile::Init(bool aliasing) { _max_node_limit = _directive->MaxNodeLimitOption; #if INCLUDE_RTM_OPT - if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) { + if (UseRTMLocking && has_method() && (method()->method_data_or_null() != nullptr)) { int rtm_state = method()->method_data()->rtm_state(); if (method_has_option(CompileCommand::NoRTMLockEliding) || ((rtm_state & NoRTM) != 0)) { // Don't generate RTM lock eliding code. @@ -1058,7 +1058,7 @@ void Compile::Init(bool aliasing) { } if (debug_info()->recording_non_safepoints()) { set_node_note_array(new(comp_arena()) GrowableArray - (comp_arena(), 8, 0, NULL)); + (comp_arena(), 8, 0, nullptr)); set_default_node_notes(Node_Notes::make(this)); } @@ -1071,14 +1071,14 @@ void Compile::Init(bool aliasing) { for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i]; } // Initialize the first few types. - _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL); + _alias_types[AliasIdxTop]->Init(AliasIdxTop, nullptr); _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM); _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM); _num_alias_types = AliasIdxRaw+1; // Zero out the alias type cache. Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache)); - // A NULL adr_type hits in the cache right away. Preload the right answer. - probe_alias_cache(NULL)->_index = AliasIdxTop; + // A null adr_type hits in the cache right away. Preload the right answer. + probe_alias_cache(nullptr)->_index = AliasIdxTop; } //---------------------------init_start---------------------------------------- @@ -1103,13 +1103,13 @@ StartNode* Compile::start() const { } } fatal("Did not find Start node!"); - return NULL; + return nullptr; } //-------------------------------immutable_memory------------------------------------- // Access immutable memory Node* Compile::immutable_memory() { - if (_immutable_memory != NULL) { + if (_immutable_memory != nullptr) { return _immutable_memory; } StartNode* s = start(); @@ -1121,20 +1121,20 @@ Node* Compile::immutable_memory() { } } ShouldNotReachHere(); - return NULL; + return nullptr; } //----------------------set_cached_top_node------------------------------------ // Install the cached top node, and make sure Node::is_top works correctly. void Compile::set_cached_top_node(Node* tn) { - if (tn != NULL) verify_top(tn); + if (tn != nullptr) verify_top(tn); Node* old_top = _top; _top = tn; // Calling Node::setup_is_top allows the nodes the chance to adjust // their _out arrays. - if (_top != NULL) _top->setup_is_top(); - if (old_top != NULL) old_top->setup_is_top(); - assert(_top == NULL || top()->is_top(), ""); + if (_top != nullptr) _top->setup_is_top(); + if (old_top != nullptr) old_top->setup_is_top(); + assert(_top == nullptr || top()->is_top(), ""); } #ifdef ASSERT @@ -1147,8 +1147,8 @@ uint Compile::count_live_nodes_by_graph_walk() { void Compile::print_missing_nodes() { - // Return if CompileLog is NULL and PrintIdealNodeCount is false. - if ((_log == NULL) && (! PrintIdealNodeCount)) { + // Return if CompileLog is null and PrintIdealNodeCount is false. + if ((_log == nullptr) && (! PrintIdealNodeCount)) { return; } @@ -1166,7 +1166,7 @@ void Compile::print_missing_nodes() { uint l_nodes_by_walk = useful.size(); if (l_nodes != l_nodes_by_walk) { - if (_log != NULL) { + if (_log != nullptr) { _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk))); _log->stamp(); _log->end_head(); @@ -1176,7 +1176,7 @@ void Compile::print_missing_nodes() { for (int i = 0; i < last_idx; i++) { if (useful_member_set.test(i)) { if (_dead_node_list.test(i)) { - if (_log != NULL) { + if (_log != nullptr) { _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i); } if (PrintIdealNodeCount) { @@ -1187,7 +1187,7 @@ void Compile::print_missing_nodes() { } } else if (! _dead_node_list.test(i)) { - if (_log != NULL) { + if (_log != nullptr) { _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i); } if (PrintIdealNodeCount) { @@ -1196,19 +1196,19 @@ void Compile::print_missing_nodes() { } } } - if (_log != NULL) { + if (_log != nullptr) { _log->tail("mismatched_nodes"); } } } void Compile::record_modified_node(Node* n) { - if (_modified_nodes != NULL && !_inlining_incrementally && !n->is_Con()) { + if (_modified_nodes != nullptr && !_inlining_incrementally && !n->is_Con()) { _modified_nodes->push(n); } } void Compile::remove_modified_node(Node* n) { - if (_modified_nodes != NULL) { + if (_modified_nodes != nullptr) { _modified_nodes->remove(n); } } @@ -1216,10 +1216,10 @@ void Compile::remove_modified_node(Node* n) { #ifndef PRODUCT void Compile::verify_top(Node* tn) const { - if (tn != NULL) { + if (tn != nullptr) { assert(tn->is_Con(), "top node must be a constant"); assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type"); - assert(tn->in(0) != NULL, "must have live top node"); + assert(tn->in(0) != nullptr, "must have live top node"); } } #endif @@ -1228,7 +1228,7 @@ void Compile::verify_top(Node* tn) const { ///-------------------Managing Per-Node Debug & Profile Info------------------- void Compile::grow_node_notes(GrowableArray* arr, int grow_by) { - guarantee(arr != NULL, ""); + guarantee(arr != nullptr, ""); int num_blocks = arr->length(); if (grow_by < num_blocks) grow_by = num_blocks; int num_notes = grow_by * _node_notes_block_size; @@ -1243,27 +1243,27 @@ void Compile::grow_node_notes(GrowableArray* arr, int grow_by) { } bool Compile::copy_node_notes_to(Node* dest, Node* source) { - if (source == NULL || dest == NULL) return false; + if (source == nullptr || dest == nullptr) return false; if (dest->is_Con()) return false; // Do not push debug info onto constants. #ifdef ASSERT // Leave a bread crumb trail pointing to the original node: - if (dest != NULL && dest != source && dest->debug_orig() == NULL) { + if (dest != nullptr && dest != source && dest->debug_orig() == nullptr) { dest->set_debug_orig(source); } #endif - if (node_note_array() == NULL) + if (node_note_array() == nullptr) return false; // Not collecting any notes now. // This is a copy onto a pre-existing node, which may already have notes. // If both nodes have notes, do not overwrite any pre-existing notes. Node_Notes* source_notes = node_notes_at(source->_idx); - if (source_notes == NULL || source_notes->is_clear()) return false; + if (source_notes == nullptr || source_notes->is_clear()) return false; Node_Notes* dest_notes = node_notes_at(dest->_idx); - if (dest_notes == NULL || dest_notes->is_clear()) { + if (dest_notes == nullptr || dest_notes->is_clear()) { return set_node_notes_at(dest->_idx, source_notes); } @@ -1297,7 +1297,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { TypePtr::PTR ptr = tj->ptr(); // Known instance (scalarizable allocation) alias only with itself. - bool is_known_inst = tj->isa_oopptr() != NULL && + bool is_known_inst = tj->isa_oopptr() != nullptr && tj->is_oopptr()->is_known_instance(); // Process weird unsafe references. @@ -1368,11 +1368,11 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { // Arrays of known objects become arrays of unknown objects. if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset); } if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); - tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); + tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset); } // Arrays of bytes and of booleans both use 'bastore' and 'baload' so // cannot be distinguished by bytecode alone. @@ -1384,7 +1384,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { // During the 2nd round of IterGVN, NotNull castings are removed. // Make sure the Bottom and NotNull variants alias the same. // Also, make sure exact and non-exact variants alias the same. - if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { + if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) { tj = ta = ta-> remove_speculative()-> cast_to_ptr_type(TypePtr::BotPTR)-> @@ -1421,7 +1421,7 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { cast_to_ptr_type(TypePtr::BotPTR)-> cast_to_exactness(false); } - if (to->speculative() != NULL) { + if (to->speculative() != nullptr) { tj = to = to->remove_speculative(); } // Canonicalize the holder of this field @@ -1429,13 +1429,13 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { // First handle header references such as a LoadKlassNode, even if the // object's klass is unloaded at compile time (4965979). if (!is_known_inst) { // Do it only for non-instance types - tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); + tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, offset); } } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) { // Static fields are in the space above the normal instance // fields in the java.lang.Class instance. if (ik != ciEnv::current()->Class_klass()) { - to = NULL; + to = nullptr; tj = TypeOopPtr::BOTTOM; offset = tj->offset(); } @@ -1444,9 +1444,9 @@ const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { assert(offset < canonical_holder->layout_helper_size_in_bytes(), ""); if (!ik->equals(canonical_holder) || tj->offset() != offset) { if( is_known_inst ) { - tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, nullptr, offset, to->instance_id()); } else { - tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); + tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, nullptr, offset); } } } @@ -1523,11 +1523,11 @@ void Compile::AliasType::Init(int i, const TypePtr* at) { assert(AliasIdxTop <= i && i < Compile::current()->_max_alias_types, "Invalid alias index"); _index = i; _adr_type = at; - _field = NULL; - _element = NULL; + _field = nullptr; + _element = nullptr; _is_rewritable = true; // default - const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; - if (atoop != NULL && atoop->is_known_instance()) { + const TypeOopPtr *atoop = (at != nullptr) ? at->isa_oopptr() : nullptr; + if (atoop != nullptr && atoop->is_known_instance()) { const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot); _general_index = Compile::current()->get_alias_index(gt); } else { @@ -1536,10 +1536,10 @@ void Compile::AliasType::Init(int i, const TypePtr* at) { } BasicType Compile::AliasType::basic_type() const { - if (element() != NULL) { + if (element() != nullptr) { const Type* element = adr_type()->is_aryptr()->elem(); return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type(); - } if (field() != NULL) { + } if (field() != nullptr) { return field()->layout_type(); } else { return T_ILLEGAL; // unknown @@ -1560,7 +1560,7 @@ void Compile::AliasType::print_on(outputStream* st) { st->print(" in "); adr_type()->dump_on(st); const TypeOopPtr* tjp = adr_type()->isa_oopptr(); - if (field() != NULL && tjp) { + if (field() != nullptr && tjp) { if (tjp->is_instptr()->instance_klass() != field()->holder() || tjp->offset() != field()->offset_in_bytes()) { st->print(" != "); @@ -1614,7 +1614,7 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr } // Handle special cases. - if (adr_type == NULL) return alias_type(AliasIdxTop); + if (adr_type == nullptr) return alias_type(AliasIdxTop); if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot); // Do it the slow way. @@ -1647,7 +1647,7 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr } if (idx == AliasIdxTop) { - if (no_create) return NULL; + if (no_create) return nullptr; // Grow the array if necessary. if (_num_alias_types == _max_alias_types) grow_alias_types(); // Add a new alias type. @@ -1690,7 +1690,7 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr const TypeInstPtr* tinst = flat->isa_instptr(); if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { ciField* field; - if (tinst->const_oop() != NULL && + if (tinst->const_oop() != nullptr && tinst->instance_klass() == ciEnv::current()->Class_klass() && tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) { // static field @@ -1700,13 +1700,13 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr ciInstanceKlass *k = tinst->instance_klass(); field = k->get_field_by_offset(tinst->offset(), false); } - assert(field == NULL || - original_field == NULL || + assert(field == nullptr || + original_field == nullptr || (field->holder() == original_field->holder() && field->offset() == original_field->offset() && field->is_static() == original_field->is_static()), "wrong field?"); // Set field() and is_rewritable() attributes. - if (field != NULL) alias_type(idx)->set_field(field); + if (field != nullptr) alias_type(idx)->set_field(field); } } @@ -1717,7 +1717,7 @@ Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_cr // Might as well try to fill the cache for the flattened version, too. AliasCacheEntry* face = probe_alias_cache(flat); - if (face->_adr_type == NULL) { + if (face->_adr_type == nullptr) { face->_adr_type = flat; face->_index = idx; assert(alias_type(flat) == alias_type(idx), "flat type must work too"); @@ -1747,17 +1747,17 @@ bool Compile::have_alias_type(const TypePtr* adr_type) { } // Handle special cases. - if (adr_type == NULL) return true; + if (adr_type == nullptr) return true; if (adr_type == TypePtr::BOTTOM) return true; - return find_alias_type(adr_type, true, NULL) != NULL; + return find_alias_type(adr_type, true, nullptr) != nullptr; } //-----------------------------must_alias-------------------------------------- // True if all values of the given address type are in the given alias category. bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) { if (alias_idx == AliasIdxBot) return true; // the universal category - if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP + if (adr_type == nullptr) return true; // null serves as TypePtr::TOP if (alias_idx == AliasIdxTop) return false; // the empty category if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins @@ -1775,7 +1775,7 @@ bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) { // True if any values of the given address type are in the given alias category. bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) { if (alias_idx == AliasIdxTop) return false; // the empty category - if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP + if (adr_type == nullptr) return false; // null serves as TypePtr::TOP // Known instance doesn't alias with bottom memory if (alias_idx == AliasIdxBot) return !adr_type->is_known_instance(); // the universal category if (adr_type->base() == Type::AnyPtr) return !C->get_adr_type(alias_idx)->is_known_instance(); // TypePtr::BOTTOM or its twins @@ -2070,7 +2070,7 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) { if (live_nodes() > (uint)LiveNodeCountInliningCutoff) { bool do_print_inlining = print_inlining() || print_intrinsics(); - if (do_print_inlining || log() != NULL) { + if (do_print_inlining || log() != nullptr) { // Print inlining message for candidates that we couldn't inline for lack of space. for (int i = 0; i < _late_inlines.length(); i++) { CallGenerator* cg = _late_inlines.at(i); @@ -2123,10 +2123,10 @@ void Compile::inline_incrementally(PhaseIterGVN& igvn) { void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) { // "inlining_incrementally() == false" is used to signal that no inlining is allowed // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details). - // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == NULL" + // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr" // as if "inlining_incrementally() == true" were set. assert(inlining_incrementally() == false, "not allowed"); - assert(_modified_nodes == NULL, "not allowed"); + assert(_modified_nodes == nullptr, "not allowed"); assert(_late_inlines.length() > 0, "sanity"); while (_late_inlines.length() > 0) { @@ -2161,10 +2161,10 @@ bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) { // useful. void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) { Node *r = root(); - if (r != NULL) { + if (r != nullptr) { for (uint i = r->req(); i < r->len(); ++i) { Node *n = r->in(i); - if (n != NULL && n->is_SafePoint()) { + if (n != nullptr && n->is_SafePoint()) { r->rm_prec(i); if (n->outcnt() == 0) { igvn.remove_dead_node(n); @@ -2306,7 +2306,7 @@ void Compile::Optimize() { if (failing()) return; - if (congraph() != NULL && macro_count() > 0) { + if (congraph() != nullptr && macro_count() > 0) { TracePhase tp("macroEliminate", &timers[_t_macroEliminate]); PhaseMacroExpand mexp(igvn); mexp.eliminate_macro_nodes(); @@ -2420,7 +2420,7 @@ void Compile::Optimize() { igvn.optimize(); } - DEBUG_ONLY( _modified_nodes = NULL; ) + DEBUG_ONLY( _modified_nodes = nullptr; ) assert(igvn._worklist.size() == 0, "not empty"); @@ -2602,7 +2602,7 @@ Node* Compile::xform_to_MacroLogicV(PhaseIterGVN& igvn, uint func = compute_truth_table(partition, inputs); Node* pn = partition.at(partition.size() - 1); - Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : NULL; + Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr; return igvn.transform(MacroLogicVNode::make(igvn, in1, in2, in3, mask, func, vt)); } @@ -2648,7 +2648,7 @@ uint Compile::eval_macro_logic_op(uint func, uint in1 , uint in2, uint in3) { } static uint eval_operand(Node* n, ResourceHashtable& eval_map) { - assert(n != NULL, ""); + assert(n != nullptr, ""); assert(eval_map.contains(n), "absent"); return *(eval_map.get(n)); } @@ -2764,9 +2764,9 @@ bool Compile::compute_logic_cone(Node* n, Unique_Node_List& partition, Unique_No bool left_child_predicated = n->in(1)->is_predicated_vector(); bool right_child_predicated = n->in(2)->is_predicated_vector(); - Node* parent_pred = parent_is_predicated ? n->in(n->req()-1) : NULL; - Node* left_child_pred = left_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : NULL; - Node* right_child_pred = right_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : NULL; + Node* parent_pred = parent_is_predicated ? n->in(n->req()-1) : nullptr; + Node* left_child_pred = left_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr; + Node* right_child_pred = right_child_predicated ? n->in(1)->in(n->in(1)->req()-1) : nullptr; do { if (pack_left_child && left_child_LOP && @@ -2833,8 +2833,8 @@ void Compile::process_logic_cone_root(PhaseIterGVN &igvn, Node *n, VectorSet &vi if (compute_logic_cone(n, partition, inputs)) { const TypeVect* vt = n->bottom_type()->is_vect(); Node* pn = partition.at(partition.size() - 1); - Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : NULL; - if (mask == NULL || + Node* mask = pn->is_predicated_vector() ? pn->in(pn->req()-1) : nullptr; + if (mask == nullptr || Matcher::match_rule_supported_vector_masked(Op_MacroLogicV, vt->length(), vt->element_basic_type())) { Node* macro_logic = xform_to_MacroLogicV(igvn, vt, partition, inputs); #ifdef ASSERT @@ -3036,7 +3036,7 @@ void Compile::eliminate_redundant_card_marks(Node* n) { // Already converted to precedence edge for (uint i = mem->req(); i < mem->len(); i++) { // Accumulate any precedence edges - if (mem->in(i) != NULL) { + if (mem->in(i) != nullptr) { n->add_prec(mem->in(i)); } } @@ -3089,7 +3089,7 @@ void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Uni #ifdef ASSERT if( n->is_Mem() ) { int alias_idx = get_alias_index(n->as_Mem()->adr_type()); - assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw || + assert( n->in(0) != nullptr || alias_idx != Compile::AliasIdxRaw || // oop will be recorded in oop map if load crosses safepoint n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() || LoadNode::is_immutable_value(n->in(MemNode::Address))), @@ -3279,12 +3279,12 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f // Some platforms can use the constant pool to load ConP. // Do this transformation here since IGVN will convert ConN back to ConP. const Type* t = addp->bottom_type(); - bool is_oop = t->isa_oopptr() != NULL; - bool is_klass = t->isa_klassptr() != NULL; + bool is_oop = t->isa_oopptr() != nullptr; + bool is_klass = t->isa_klassptr() != nullptr; if ((is_oop && Matcher::const_oop_prefer_decode() ) || (is_klass && Matcher::const_klass_prefer_decode())) { - Node* nn = NULL; + Node* nn = nullptr; int op = is_oop ? Op_ConN : Op_ConNKlass; @@ -3293,13 +3293,13 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f uint cnt = r->outcnt(); for (uint i = 0; i < cnt; i++) { Node* m = r->raw_out(i); - if (m!= NULL && m->Opcode() == op && + if (m!= nullptr && m->Opcode() == op && m->bottom_type()->make_ptr() == t) { nn = m; break; } } - if (nn != NULL) { + if (nn != nullptr) { // Decode a narrow oop to match address // [R12 + narrow_oop_reg<<3 + offset] if (is_oop) { @@ -3316,7 +3316,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f #ifdef ASSERT for (uint j = 0; j < out_i->outcnt(); ++j) { Node *out_j = out_i->raw_out(j); - assert(out_j == NULL || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp, + assert(out_j == nullptr || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp, "more than 2 AddP nodes in a chain (out_j %u)", out_j->_idx); } #endif @@ -3340,7 +3340,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f // (if control is set already) on memory operations. Some CastPP // nodes don't have a control (don't carry a dependency): skip // those. - if (n->in(0) != NULL) { + if (n->in(0) != nullptr) { ResourceMark rm; Unique_Node_List wq; wq.push(n); @@ -3375,20 +3375,20 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f // // x86, ARM and friends can handle 2 adds in addressing mode // and Matcher can fold a DecodeN node into address by using - // a narrow oop directly and do implicit NULL check in address: + // a narrow oop directly and do implicit null check in address: // // [R12 + narrow_oop_reg<<3 + offset] // NullCheck narrow_oop_reg // // On other platforms (Sparc) we have to keep new DecodeN node and - // use it to do implicit NULL check in address: + // use it to do implicit null check in address: // // decode_not_null narrow_oop_reg, base_reg // [base_reg + offset] // NullCheck base_reg // // Pin the new DecodeN node to non-null path on these platform (Sparc) - // to keep the information to which NULL check the new DecodeN node + // to keep the information to which null check the new DecodeN node // corresponds to use it as value in implicit_null_check(). // new_in1->set_req(0, n->in(0)); @@ -3419,7 +3419,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f } assert(in1->is_DecodeNarrowPtr(), "sanity"); - Node* new_in2 = NULL; + Node* new_in2 = nullptr; if (in2->is_DecodeNarrowPtr()) { assert(in2->Opcode() == in1->Opcode(), "must be same node type"); new_in2 = in2->in(1); @@ -3434,24 +3434,24 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f new_in2 = ConNode::make(TypeNarrowOop::NULL_PTR); // // This transformation together with CastPP transformation above - // will generated code for implicit NULL checks for compressed oops. + // will generated code for implicit null checks for compressed oops. // // The original code after Optimize() // // LoadN memory, narrow_oop_reg // decode narrow_oop_reg, base_reg - // CmpP base_reg, NULL + // CmpP base_reg, nullptr // CastPP base_reg // NotNull // Load [base_reg + offset], val_reg // // after these transformations will be // // LoadN memory, narrow_oop_reg - // CmpN narrow_oop_reg, NULL + // CmpN narrow_oop_reg, nullptr // decode_not_null narrow_oop_reg, base_reg // Load [base_reg + offset], val_reg // - // and the uncommon path (== NULL) will use narrow_oop_reg directly + // and the uncommon path (== nullptr) will use narrow_oop_reg directly // since narrow oops can be used in debug info now (see the code in // final_graph_reshaping_walk()). // @@ -3475,7 +3475,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f new_in2 = ConNode::make(t->make_narrowklass()); } } - if (new_in2 != NULL) { + if (new_in2 != nullptr) { Node* cmpN = new CmpNNode(in1->in(1), new_in2); n->subsume_by(cmpN, this); if (in1->outcnt() == 0) { @@ -3493,7 +3493,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out"); // DecodeN could be pinned when it can't be fold into // an address expression, see the code for Op_CastPP above. - assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control"); + assert(n->in(0) == nullptr || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control"); break; case Op_EncodeP: @@ -3528,7 +3528,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f // then they will hang around and should just be replaced with // the original one. Merge them. Node* non_io_proj = proj->in(0)->as_Multi()->proj_out_or_null(proj->_con, false /*is_io_use*/); - if (non_io_proj != NULL) { + if (non_io_proj != nullptr) { proj->subsume_by(non_io_proj , this); } } @@ -3541,15 +3541,15 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f // The EncodeP optimization may create Phi with the same edges // for all paths. It is not handled well by Register Allocator. Node* unique_in = n->in(1); - assert(unique_in != NULL, ""); + assert(unique_in != nullptr, ""); uint cnt = n->req(); for (uint i = 2; i < cnt; i++) { Node* m = n->in(i); - assert(m != NULL, ""); + assert(m != nullptr, ""); if (unique_in != m) - unique_in = NULL; + unique_in = nullptr; } - if (unique_in != NULL) { + if (unique_in != nullptr) { n->subsume_by(unique_in, this); } } @@ -3709,13 +3709,13 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f Node* in2 = n->in(2); juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1); const TypeInt* t = in2->find_int_type(); - if (t != NULL && t->is_con()) { + if (t != nullptr && t->is_con()) { juint shift = t->get_con(); if (shift > mask) { // Unsigned cmp n->set_req(2, ConNode::make(TypeInt::make(shift & mask))); } } else { - if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { + if (t == nullptr || t->_lo < 0 || t->_hi > (int)mask) { Node* shift = new AndINode(in2, ConNode::make(TypeInt::make(mask))); n->set_req(2, shift); } @@ -3769,7 +3769,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f for(;;) { // Loop over all nodes with identical inputs edges as m Node* k = m->find_similar(m->Opcode()); - if (k == NULL) { + if (k == nullptr) { break; } // Push their uses so we get a chance to remove node made @@ -3826,8 +3826,8 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R // Place all non-visited non-null inputs onto stack Node* m = n->in(i); ++i; - if (m != NULL && !frc._visited.test_set(m->_idx)) { - if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) { + if (m != nullptr && !frc._visited.test_set(m->_idx)) { + if (m->is_SafePoint() && m->as_SafePoint()->jvms() != nullptr) { // compute worst case interpreter size in case of a deoptimization update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size()); @@ -3861,7 +3861,7 @@ void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_R while (sfpt.size() > 0) { n = sfpt.pop(); JVMState *jvms = n->as_SafePoint()->jvms(); - assert(jvms != NULL, "sanity"); + assert(jvms != nullptr, "sanity"); int start = jvms->debug_start(); int end = n->req(); bool is_uncommon = (n->is_CallStaticJava() && @@ -3932,7 +3932,7 @@ bool Compile::final_graph_reshaping() { // be freely moved to the least frequent code path by gcm. assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?"); for (int i = 0; i < expensive_count(); i++) { - _expensive_nodes.at(i)->set_req(0, NULL); + _expensive_nodes.at(i)->set_req(0, nullptr); } Final_Reshape_Counts frc; @@ -4025,7 +4025,7 @@ bool Compile::final_graph_reshaping() { if (m->outcnt() == 0 && m != top()) { for (uint j = 0; j < m->req(); j++) { Node* in = m->in(j); - if (in != NULL) { + if (in != nullptr) { dead_nodes.push(in); } } @@ -4063,7 +4063,7 @@ bool Compile::too_many_traps(ciMethod* method, // because of a transient condition during start-up in the interpreter. return false; } - ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL; + ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr; if (md->has_trap_at(bci, m, reason) != 0) { // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic. // Also, if there are multiple reasons, or if there is no per-BCI record, @@ -4086,7 +4086,7 @@ bool Compile::too_many_traps(Deoptimization::DeoptReason reason, // Too many traps globally. // Note that we use cumulative trap_count, not just md->trap_count. if (log()) { - int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason); + int mcount = (logmd == nullptr)? -1: (int)logmd->trap_count(reason); log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'", Deoptimization::trap_reason_name(reason), mcount, trap_count(reason)); @@ -4117,7 +4117,7 @@ bool Compile::too_many_recompiles(ciMethod* method, uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero Deoptimization::DeoptReason per_bc_reason = Deoptimization::reason_recorded_per_bytecode_if_any(reason); - ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL; + ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : nullptr; if ((per_bc_reason == Deoptimization::Reason_none || md->has_trap_at(bci, m, reason) != 0) // The trap frequency measure we care about is the recompile count: @@ -4221,10 +4221,10 @@ void Compile::verify_bidirectional_edges(Unique_Node_List &visited) { uint length = n->len(); for (uint i = 0; i < length; i++) { Node* in = n->in(i); - if (in != NULL && !visited.member(in)) { + if (in != nullptr && !visited.member(in)) { nstack.push(in); // Put it on stack } - if (in != NULL && !in->is_top()) { + if (in != nullptr && !in->is_top()) { // Count instances of `next` int cnt = 0; for (uint idx = 0; idx < in->_outcnt; idx++) { @@ -4241,7 +4241,7 @@ void Compile::verify_bidirectional_edges(Unique_Node_List &visited) { } } assert(cnt == 0, "Mismatched edge count."); - } else if (in == NULL) { + } else if (in == nullptr) { assert(i == 0 || i >= n->req() || n->is_Region() || n->is_Phi() || n->is_ArrayCopy() || (n->is_Unlock() && i == (n->req() - 1)) || @@ -4300,10 +4300,10 @@ void Compile::verify_graph_edges(bool no_dead_code) { // behavior, the Compile's failure reason is quietly copied up to the ciEnv // by the logic in C2Compiler. void Compile::record_failure(const char* reason) { - if (log() != NULL) { + if (log() != nullptr) { log()->elem("failure reason='%s' phase='compile'", reason); } - if (_failure_reason == NULL) { + if (_failure_reason == nullptr) { // Record the first failure reason. _failure_reason = reason; } @@ -4311,7 +4311,7 @@ void Compile::record_failure(const char* reason) { if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { C->print_method(PHASE_FAILURE, 1); } - _root = NULL; // flush the graph, too + _root = nullptr; // flush the graph, too } Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator) @@ -4322,10 +4322,10 @@ Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator) C = Compile::current(); _log = C->log(); } else { - C = NULL; - _log = NULL; + C = nullptr; + _log = nullptr; } - if (_log != NULL) { + if (_log != nullptr) { _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes()); _log->stamp(); _log->end_head(); @@ -4338,7 +4338,7 @@ Compile::TracePhase::~TracePhase() { if (_dolog) { _log = C->log(); } else { - _log = NULL; + _log = nullptr; } #ifdef ASSERT @@ -4352,7 +4352,7 @@ Compile::TracePhase::~TracePhase() { } #endif - if (_log != NULL) { + if (_log != nullptr) { _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes()); } } @@ -4415,7 +4415,7 @@ Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetyp // number. (The prior range check has ensured this.) // This assertion is used by ConvI2LNode::Ideal. int index_max = max_jint - 1; // array size is max_jint, index is one less - if (sizetype != NULL) index_max = sizetype->_hi - 1; + if (sizetype != nullptr) index_max = sizetype->_hi - 1; const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax); idx = constrained_convI2L(phase, idx, iidxtype, ctrl); #endif @@ -4424,7 +4424,7 @@ Node* Compile::conv_I2X_index(PhaseGVN* phase, Node* idx, const TypeInt* sizetyp // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency) { - if (ctrl != NULL) { + if (ctrl != nullptr) { // Express control dependency by a CastII node with a narrow type. value = new CastIINode(value, itype, carry_dependency ? ConstraintCastNode::StrongDependency : ConstraintCastNode::RegularDependency, true /* range check dependency */); // Make the CastII node dependent on the control input to prevent the narrowed ConvI2L @@ -4486,14 +4486,14 @@ void Compile::print_inlining_update(CallGenerator* cg) { if (print_inlining() || print_intrinsics()) { if (cg->is_late_inline()) { if (print_inlining_current()->cg() != cg && - (print_inlining_current()->cg() != NULL || + (print_inlining_current()->cg() != nullptr || print_inlining_current()->ss()->size() != 0)) { print_inlining_push(); } print_inlining_commit(); print_inlining_current()->set_cg(cg); } else { - if (print_inlining_current()->cg() != NULL) { + if (print_inlining_current()->cg() != nullptr) { print_inlining_push(); } print_inlining_commit(); @@ -4535,16 +4535,16 @@ void Compile::process_print_inlining() { if (print_inlining() || print_intrinsics()) { ResourceMark rm; stringStream ss; - assert(_print_inlining_list != NULL, "process_print_inlining should be called only once."); + assert(_print_inlining_list != nullptr, "process_print_inlining should be called only once."); for (int i = 0; i < _print_inlining_list->length(); i++) { PrintInliningBuffer* pib = _print_inlining_list->at(i); ss.print("%s", pib->ss()->freeze()); delete pib; - DEBUG_ONLY(_print_inlining_list->at_put(i, NULL)); + DEBUG_ONLY(_print_inlining_list->at_put(i, nullptr)); } // Reset _print_inlining_list, it only contains destructed objects. // It is on the arena, so it will be freed when the arena is reset. - _print_inlining_list = NULL; + _print_inlining_list = nullptr; // _print_inlining_stream won't be used anymore, either. print_inlining_reset(); size_t end = ss.size(); @@ -4555,17 +4555,17 @@ void Compile::process_print_inlining() { } void Compile::dump_print_inlining() { - if (_print_inlining_output != NULL) { + if (_print_inlining_output != nullptr) { tty->print_raw(_print_inlining_output); } } void Compile::log_late_inline(CallGenerator* cg) { - if (log() != NULL) { + if (log() != nullptr) { log()->head("late_inline method='%d' inline_id='" JLONG_FORMAT "'", log()->identify(cg->method()), cg->unique_id()); JVMState* p = cg->call_node()->jvms(); - while (p != NULL) { + while (p != nullptr) { log()->elem("jvms bci='%d' method='%d'", p->bci(), log()->identify(p->method())); p = p->caller(); } @@ -4575,13 +4575,13 @@ void Compile::log_late_inline(CallGenerator* cg) { void Compile::log_late_inline_failure(CallGenerator* cg, const char* msg) { log_late_inline(cg); - if (log() != NULL) { + if (log() != nullptr) { log()->inline_fail(msg); } } void Compile::log_inline_id(CallGenerator* cg) { - if (log() != NULL) { + if (log() != nullptr) { // The LogCompilation tool needs a unique way to identify late // inline call sites. This id must be unique for this call site in // this compilation. Try to have it unique across compilations as @@ -4596,7 +4596,7 @@ void Compile::log_inline_id(CallGenerator* cg) { } void Compile::log_inline_failure(const char* msg) { - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->inline_fail(msg); } } @@ -4606,7 +4606,7 @@ void Compile::log_inline_failure(const char* msg) { // Don't change thread state and acquire any locks. void Compile::dump_inline_data(outputStream* out) { InlineTree* inl_tree = ilt(); - if (inl_tree != NULL) { + if (inl_tree != nullptr) { out->print(" inline %d", inl_tree->count()); inl_tree->dump_replay_data(out); } @@ -4616,7 +4616,7 @@ void Compile::dump_inline_data_reduced(outputStream* out) { assert(ReplayReduce, ""); InlineTree* inl_tree = ilt(); - if (inl_tree == NULL) { + if (inl_tree == nullptr) { return; } // Enable iterative replay file reduction @@ -4736,7 +4736,7 @@ void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) { identical = 0; } else { Node* n = _expensive_nodes.at(i); - igvn.replace_input_of(n, 0, NULL); + igvn.replace_input_of(n, 0, nullptr); igvn.hash_insert(n); modified = true; } @@ -4745,7 +4745,7 @@ void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) { _expensive_nodes.at_put(j++, _expensive_nodes.at(i)); } else if (_expensive_nodes.length() >= 1) { Node* n = _expensive_nodes.at(i); - igvn.replace_input_of(n, 0, NULL); + igvn.replace_input_of(n, 0, nullptr); igvn.hash_insert(n); modified = true; } @@ -4764,7 +4764,7 @@ void Compile::add_expensive_node(Node * n) { } else { // Clear control input and let IGVN optimize expensive nodes if // OptimizeExpensiveOps is off. - n->set_req(0, NULL); + n->set_req(0, nullptr); } } @@ -4912,7 +4912,7 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) { for (uint next = 0; next < worklist.size(); ++next) { Node *n = worklist.at(next); const Type* t = igvn.type_or_null(n); - assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types"); + assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types"); if (n->is_Type()) { t = n->as_Type()->type(); assert(t == t->remove_speculative(), "no more speculative types"); @@ -5074,7 +5074,7 @@ void Compile::print_method(CompilerPhaseType cpt, int level, Node* n) { // Only used from CompileWrapper void Compile::begin_method() { #ifndef PRODUCT - if (_method != NULL && should_print_igv(1)) { + if (_method != nullptr && should_print_igv(1)) { _igv_printer->begin_method(); } #endif @@ -5089,7 +5089,7 @@ void Compile::end_method() { } #ifndef PRODUCT - if (_method != NULL && should_print_igv(1)) { + if (_method != nullptr && should_print_igv(1)) { _igv_printer->end_method(); } #endif @@ -5122,8 +5122,8 @@ bool Compile::should_print_igv(int level) { } #ifndef PRODUCT -IdealGraphPrinter* Compile::_debug_file_printer = NULL; -IdealGraphPrinter* Compile::_debug_network_printer = NULL; +IdealGraphPrinter* Compile::_debug_file_printer = nullptr; +IdealGraphPrinter* Compile::_debug_network_printer = nullptr; // Called from debugger. Prints method to the default file with the default phase name. // This works regardless of any Ideal Graph Visualizer flags set or not. @@ -5175,7 +5175,7 @@ void igv_append(const char* phase_name) { void Compile::igv_print_method_to_file(const char* phase_name, bool append) { const char* file_name = "custom_debug.xml"; - if (_debug_file_printer == NULL) { + if (_debug_file_printer == nullptr) { _debug_file_printer = new IdealGraphPrinter(C, file_name, append); } else { _debug_file_printer->update_compiled_method(C->method()); @@ -5185,7 +5185,7 @@ void Compile::igv_print_method_to_file(const char* phase_name, bool append) { } void Compile::igv_print_method_to_network(const char* phase_name) { - if (_debug_network_printer == NULL) { + if (_debug_network_printer == nullptr) { _debug_network_printer = new IdealGraphPrinter(C); } else { _debug_network_printer->update_compiled_method(C->method()); @@ -5196,10 +5196,10 @@ void Compile::igv_print_method_to_network(const char* phase_name) { #endif Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) { - if (type != NULL && phase->type(value)->higher_equal(type)) { + if (type != nullptr && phase->type(value)->higher_equal(type)) { return value; } - Node* result = NULL; + Node* result = nullptr; if (bt == T_BYTE) { result = phase->transform(new LShiftINode(value, phase->intcon(24))); result = new RShiftINode(result, phase->intcon(24)); diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index 665d5861ee0..98690431e14 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -146,7 +146,7 @@ class CloneMap { void* _2p(node_idx_t key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy node_idx_t _2_node_idx_t(const void* k) const { return (node_idx_t)(intptr_t)k; } Dict* dict() const { return _dict; } - void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); } + void insert(node_idx_t key, uint64_t val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); } void insert(node_idx_t key, NodeCloneInfo& ci) { insert(key, ci.get()); } void remove(node_idx_t key) { _dict->Delete(_2p(key)); } uint64_t value(node_idx_t key) const { return (uint64_t)_dict->operator[](_2p(key)); } @@ -216,7 +216,7 @@ class Compile : public Phase { AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM }; - // Variant of TraceTime(NULL, &_t_accumulator, CITime); + // Variant of TraceTime(nullptr, &_t_accumulator, CITime); // Integrated with logging. If logging is turned on, and CITimeVerbose is true, // then brackets are put into the log, with time stamps and node counts. // (The time collection itself is always conditionalized on CITime.) @@ -265,7 +265,7 @@ class Compile : public Phase { } } void set_element(const Type* e) { - assert(_element == NULL, ""); + assert(_element == nullptr, ""); _element = e; } @@ -291,9 +291,9 @@ class Compile : public Phase { int _entry_bci; // entry bci for osr methods. const TypeFunc* _tf; // My kind of signature InlineTree* _ilt; // Ditto (temporary). - address _stub_function; // VM entry for stub being compiled, or NULL - const char* _stub_name; // Name of stub or adapter being compiled, or NULL - address _stub_entry_point; // Compile code entry for generated stub, or NULL + address _stub_function; // VM entry for stub being compiled, or null + const char* _stub_name; // Name of stub or adapter being compiled, or null + address _stub_entry_point; // Compile code entry for generated stub, or null // Control of this compilation. int _max_inline_size; // Max inline size for this compilation @@ -376,7 +376,7 @@ class Compile : public Phase { debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode= Arena _node_arena; // Arena for new-space Nodes Arena _old_arena; // Arena for old-space Nodes, lifetime during xform - RootNode* _root; // Unique root of compilation, or NULL after bail-out. + RootNode* _root; // Unique root of compilation, or null after bail-out. Node* _top; // Unique top node. (Reset by various phases.) Node* _immutable_memory; // Initial memory state @@ -437,7 +437,7 @@ class Compile : public Phase { public: PrintInliningBuffer() - : _cg(NULL), _ss(default_stream_buffer_size) {} + : _cg(nullptr), _ss(default_stream_buffer_size) {} stringStream* ss() { return &_ss; } CallGenerator* cg() { return _cg; } @@ -484,7 +484,7 @@ class Compile : public Phase { void print_inlining_assert_ready(); void print_inlining_reset(); - void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { + void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = nullptr) { stringStream ss; CompileTask::print_inlining_inner(&ss, method, inline_level, bci, msg); print_inlining_stream()->print("%s", ss.freeze()); @@ -554,9 +554,9 @@ class Compile : public Phase { ciMethod* method() const { return _method; } int entry_bci() const { return _entry_bci; } bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } - bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } - const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } - void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } + bool is_method_compilation() const { return (_method != nullptr && !_method->flags().is_native()); } + const TypeFunc* tf() const { assert(_tf!=nullptr, ""); return _tf; } + void init_tf(const TypeFunc* tf) { assert(_tf==nullptr, ""); _tf = tf; } InlineTree* ilt() const { return _ilt; } address stub_function() const { return _stub_function; } const char* stub_name() const { return _stub_name; } @@ -633,7 +633,7 @@ class Compile : public Phase { // check the CompilerOracle for special behaviours for this compile bool method_has_option(enum CompileCommand option) { - return method() != NULL && method()->has_option(option); + return method() != nullptr && method()->has_option(option); } #ifndef PRODUCT @@ -754,11 +754,11 @@ class Compile : public Phase { Arena* comp_arena() { return &_comp_arena; } ciEnv* env() const { return _env; } CompileLog* log() const { return _log; } - bool failing() const { return _env->failing() || _failure_reason != NULL; } + bool failing() const { return _env->failing() || _failure_reason != nullptr; } const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; } bool failure_reason_is(const char* r) const { - return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0); + return (r == _failure_reason) || (r != nullptr && _failure_reason != nullptr && strcmp(r, _failure_reason) == 0); } void record_failure(const char* reason); @@ -820,7 +820,7 @@ class Compile : public Phase { DEBUG_ONLY( Unique_Node_List* modified_nodes() const { return _modified_nodes; } ) MachConstantBaseNode* mach_constant_base_node(); - bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } + bool has_mach_constant_base_node() const { return _mach_constant_base_node != nullptr; } // Generated by adlc, true if CallNode requires MachConstantBase. bool needs_deep_clone_jvms(); @@ -864,16 +864,16 @@ class Compile : public Phase { void set_type_last_size(size_t sz) { _type_last_size = sz; } const TypeFunc* last_tf(ciMethod* m) { - return (m == _last_tf_m) ? _last_tf : NULL; + return (m == _last_tf_m) ? _last_tf : nullptr; } void set_last_tf(ciMethod* m, const TypeFunc* tf) { - assert(m != NULL || tf == NULL, ""); + assert(m != nullptr || tf == nullptr, ""); _last_tf_m = m; _last_tf = tf; } AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } - AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } + AliasType* alias_type(const TypePtr* adr_type, ciField* field = nullptr) { return find_alias_type(adr_type, false, field); } bool have_alias_type(const TypePtr* adr_type); AliasType* alias_type(ciField* field); @@ -889,7 +889,7 @@ class Compile : public Phase { // Decide how to build a call. // The profile factor is a discount to apply to this site's interp. profile. CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, - JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, + JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = nullptr, bool allow_intrinsics = true); bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { return should_delay_string_inlining(call_method, jvms) || @@ -919,7 +919,7 @@ class Compile : public Phase { // PerMethodTrapLimit was exceeded for all inlined methods seen so far. bool too_many_traps(Deoptimization::DeoptReason reason, // Privately used parameter for logging: - ciMethodData* logmd = NULL); + ciMethodData* logmd = nullptr); // Report if there were too many recompiles at a method and bci. bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); // Report if there were too many traps or recompiles at a method and bci. @@ -1061,7 +1061,7 @@ class Compile : public Phase { }; // Are we compiling a method? - bool has_method() { return method() != NULL; } + bool has_method() { return method() != nullptr; } // Maybe print some information about this compile. void print_compile_messages(); @@ -1180,7 +1180,7 @@ class Compile : public Phase { static Node* conv_I2X_index(PhaseGVN* phase, Node* offset, const TypeInt* sizetype, // Optional control dependency (for example, on range check) - Node* ctrl = NULL); + Node* ctrl = nullptr); // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check) static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl, bool carry_dependency = false); diff --git a/src/hotspot/share/opto/connode.cpp b/src/hotspot/share/opto/connode.cpp index 1bf7aaaf796..00049c6f19e 100644 --- a/src/hotspot/share/opto/connode.cpp +++ b/src/hotspot/share/opto/connode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -62,6 +62,6 @@ ConNode *ConNode::make(const Type *t) { // or else TypeOopPtr::NULL_PTR. Then set Type::_basic_type[AnyPtr] = T_ILLEGAL default: ShouldNotReachHere(); - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/opto/connode.hpp b/src/hotspot/share/opto/connode.hpp index 317ac18ff84..618326ec527 100644 --- a/src/hotspot/share/opto/connode.hpp +++ b/src/hotspot/share/opto/connode.hpp @@ -70,15 +70,16 @@ public: // Simple pointer constants class ConPNode : public ConNode { public: - ConPNode( const TypePtr *t ) : ConNode(t) {} + ConPNode(const TypePtr *t) : ConNode(t) {} virtual int Opcode() const; // Factory methods: static ConPNode* make(address con) { - if (con == NULL) - return new ConPNode( TypePtr::NULL_PTR ) ; - else - return new ConPNode( TypeRawPtr::make(con) ); + if (con == nullptr) { + return new ConPNode(TypePtr::NULL_PTR); + } else { + return new ConPNode(TypeRawPtr::make(con)); + } } }; diff --git a/src/hotspot/share/opto/constantTable.cpp b/src/hotspot/share/opto/constantTable.cpp index d5b20c3334a..354fe27d30b 100644 --- a/src/hotspot/share/opto/constantTable.cpp +++ b/src/hotspot/share/opto/constantTable.cpp @@ -148,7 +148,7 @@ bool ConstantTable::emit(CodeBuffer& cb) const { MacroAssembler _masm(&cb); for (int i = 0; i < _constants.length(); i++) { Constant con = _constants.at(i); - address constant_addr = NULL; + address constant_addr = nullptr; if (con.is_array()) { constant_addr = _masm.array_constant(con.type(), con.get_array(), con.alignment()); } else { @@ -176,17 +176,17 @@ bool ConstantTable::emit(CodeBuffer& cb) const { // filled in later in fill_jump_table. address dummy = (address) n; constant_addr = _masm.address_constant(dummy); - if (constant_addr == NULL) { + if (constant_addr == nullptr) { return false; } assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), "must be: %d == %d", (int)(constant_addr - _masm.code()->consts()->start()), (int)(con.offset())); // Expand jump-table - address last_addr = NULL; + address last_addr = nullptr; for (uint j = 1; j < n->outcnt(); j++) { last_addr = _masm.address_constant(dummy + j); - if (last_addr == NULL) { + if (last_addr == nullptr) { return false; } } @@ -211,7 +211,7 @@ bool ConstantTable::emit(CodeBuffer& cb) const { } } - if (constant_addr == NULL) { + if (constant_addr == nullptr) { return false; } assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), diff --git a/src/hotspot/share/opto/convertnode.cpp b/src/hotspot/share/opto/convertnode.cpp index 38c539ee3ab..b276a4d1611 100644 --- a/src/hotspot/share/opto/convertnode.cpp +++ b/src/hotspot/share/opto/convertnode.cpp @@ -49,7 +49,7 @@ const Type* Conv2BNode::Value(PhaseGVN* phase) const { if( t == TypeInt::ZERO ) return TypeInt::ZERO; if( t == TypePtr::NULL_PTR ) return TypeInt::ZERO; const TypePtr *tp = t->isa_ptr(); - if( tp != NULL ) { + if(tp != nullptr) { if( tp->ptr() == TypePtr::AnyNull ) return Type::TOP; if( tp->ptr() == TypePtr::Constant) return TypeInt::ONE; if (tp->ptr() == TypePtr::NotNull) return TypeInt::ONE; @@ -85,7 +85,7 @@ Node *ConvD2FNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } } - return NULL; + return nullptr; } //------------------------------Identity--------------------------------------- @@ -112,7 +112,7 @@ Node *ConvD2INode::Ideal(PhaseGVN *phase, bool can_reshape) { set_req(1, in(1)->in(1)); return this; } - return NULL; + return nullptr; } //------------------------------Identity--------------------------------------- @@ -148,7 +148,7 @@ Node *ConvD2LNode::Ideal(PhaseGVN *phase, bool can_reshape) { set_req(1, in(1)->in(1)); return this; } - return NULL; + return nullptr; } //============================================================================= @@ -199,7 +199,7 @@ Node *ConvF2INode::Ideal(PhaseGVN *phase, bool can_reshape) { set_req(1, in(1)->in(1)); return this; } - return NULL; + return nullptr; } //============================================================================= @@ -228,7 +228,7 @@ Node *ConvF2LNode::Ideal(PhaseGVN *phase, bool can_reshape) { set_req(1, in(1)->in(1)); return this; } - return NULL; + return nullptr; } //============================================================================= @@ -293,7 +293,7 @@ const Type* ConvI2LNode::Value(PhaseGVN* phase) const { // Do NOT remove this node's type assertion until no more loop ops can happen. if (phase->C->post_loop_opts_phase()) { const TypeInt* in_type = phase->type(in(1))->isa_int(); - if (in_type != NULL && + if (in_type != nullptr && (in_type->_lo != this_type->_lo || in_type->_hi != this_type->_hi)) { // Although this WORSENS the type, it increases GVN opportunities, @@ -571,7 +571,7 @@ static Node* find_or_make_convI2L(PhaseIterGVN* igvn, Node* parent, const TypeLong* type) { Node* n = new ConvI2LNode(parent, type); Node* existing = igvn->hash_find_insert(n); - if (existing != NULL) { + if (existing != nullptr) { n->destruct(igvn); return existing; } @@ -636,14 +636,14 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Addressing arithmetic will not absorb it as part of a 64-bit AddL. PhaseIterGVN* igvn = phase->is_IterGVN(); Node* z = in(1); - const TypeInteger* rx = NULL; - const TypeInteger* ry = NULL; + const TypeInteger* rx = nullptr; + const TypeInteger* ry = nullptr; if (Compile::push_thru_add(phase, z, this_type, rx, ry, T_INT, T_LONG)) { - if (igvn == NULL) { + if (igvn == nullptr) { // Postpone this optimization to iterative GVN, where we can handle deep // AddI chains without an exponential number of recursive Ideal() calls. phase->record_for_igvn(this); - return NULL; + return nullptr; } int op = z->Opcode(); Node* x = z->in(1); @@ -659,7 +659,7 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) { } #endif //_LP64 - return NULL; + return nullptr; } //============================================================================= @@ -724,13 +724,13 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) { if( andl_op == Op_AddL ) { // Don't do this for nodes which have more than one user since // we'll end up computing the long add anyway. - if (andl->outcnt() > 1) return NULL; + if (andl->outcnt() > 1) return nullptr; Node* x = andl->in(1); Node* y = andl->in(2); assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" ); - if (phase->type(x) == Type::TOP) return NULL; - if (phase->type(y) == Type::TOP) return NULL; + if (phase->type(x) == Type::TOP) return nullptr; + if (phase->type(y) == Type::TOP) return nullptr; Node *add1 = phase->transform(new ConvL2INode(x)); Node *add2 = phase->transform(new ConvL2INode(y)); return new AddINode(add1,add2); @@ -739,7 +739,7 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) { // Disable optimization: LoadL->ConvL2I ==> LoadI. // It causes problems (sizes of Load and Store nodes do not match) // in objects initialization code and Escape Analysis. - return NULL; + return nullptr; } diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp index c76721e5466..9c6c6ed25de 100644 --- a/src/hotspot/share/opto/divnode.cpp +++ b/src/hotspot/share/opto/divnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +89,7 @@ static bool magic_int_divide_constants(jint d, jint &M, jint &s) { //--------------------------transform_int_divide------------------------------- // Convert a division by constant divisor into an alternate Ideal graph. -// Return NULL if no transformation occurs. +// Return null if no transformation occurs. static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor ) { // Check for invalid divisors @@ -101,7 +101,7 @@ static Node *transform_int_divide( PhaseGVN *phase, Node *dividend, jint divisor const int N = 32; // Result - Node *q = NULL; + Node *q = nullptr; if (d == 1) { // division by +/- 1 @@ -334,7 +334,7 @@ static Node* long_by_long_mulhi(PhaseGVN* phase, Node* dividend, jlong magic_con //--------------------------transform_long_divide------------------------------ // Convert a division by constant divisor into an alternate Ideal graph. -// Return NULL if no transformation occurs. +// Return null if no transformation occurs. static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divisor ) { // Check for invalid divisors assert( divisor != 0L && divisor != min_jlong, @@ -345,7 +345,7 @@ static Node *transform_long_divide( PhaseGVN *phase, Node *dividend, jlong divis const int N = 64; // Result - Node *q = NULL; + Node *q = nullptr; if (d == 1) { // division by +/- 1 @@ -460,29 +460,29 @@ Node* DivINode::Identity(PhaseGVN* phase) { Node *DivINode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; const Type *t = phase->type( in(2) ); - if( t == TypeInt::ONE ) // Identity? - return NULL; // Skip it + if( t == TypeInt::ONE ) // Identity? + return nullptr; // Skip it const TypeInt *ti = t->isa_int(); - if( !ti ) return NULL; + if( !ti ) return nullptr; // Check for useless control input // Check for excluding div-zero case if (in(0) && (ti->_hi < 0 || ti->_lo > 0)) { - set_req(0, NULL); // Yank control input + set_req(0, nullptr); // Yank control input return this; } - if( !ti->is_con() ) return NULL; + if( !ti->is_con() ) return nullptr; jint i = ti->get_con(); // Get divisor - if (i == 0) return NULL; // Dividing by zero constant does not idealize + if (i == 0) return nullptr; // Dividing by zero constant does not idealize // Dividing by MININT does not optimize as a power-of-2 shift. - if( i == min_jint ) return NULL; + if( i == min_jint ) return nullptr; return transform_int_divide( phase, in(1), i ); } @@ -566,29 +566,29 @@ Node* DivLNode::Identity(PhaseGVN* phase) { Node *DivLNode::Ideal( PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; const Type *t = phase->type( in(2) ); if( t == TypeLong::ONE ) // Identity? - return NULL; // Skip it + return nullptr; // Skip it const TypeLong *tl = t->isa_long(); - if( !tl ) return NULL; + if( !tl ) return nullptr; // Check for useless control input // Check for excluding div-zero case if (in(0) && (tl->_hi < 0 || tl->_lo > 0)) { - set_req(0, NULL); // Yank control input + set_req(0, nullptr); // Yank control input return this; } - if( !tl->is_con() ) return NULL; + if( !tl->is_con() ) return nullptr; jlong l = tl->get_con(); // Get divisor - if (l == 0) return NULL; // Dividing by zero constant does not idealize + if (l == 0) return nullptr; // Dividing by zero constant does not idealize // Dividing by MINLONG does not optimize as a power-of-2 shift. - if( l == min_jlong ) return NULL; + if( l == min_jlong ) return nullptr; return transform_long_divide( phase, in(1), l ); } @@ -717,28 +717,28 @@ Node* DivFNode::Identity(PhaseGVN* phase) { Node *DivFNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; const Type *t2 = phase->type( in(2) ); if( t2 == TypeF::ONE ) // Identity? - return NULL; // Skip it + return nullptr; // Skip it const TypeF *tf = t2->isa_float_constant(); - if( !tf ) return NULL; - if( tf->base() != Type::FloatCon ) return NULL; + if( !tf ) return nullptr; + if( tf->base() != Type::FloatCon ) return nullptr; // Check for out of range values - if( tf->is_nan() || !tf->is_finite() ) return NULL; + if( tf->is_nan() || !tf->is_finite() ) return nullptr; // Get the value float f = tf->getf(); int exp; // Only for special case of dividing by a power of 2 - if( frexp((double)f, &exp) != 0.5 ) return NULL; + if( frexp((double)f, &exp) != 0.5 ) return nullptr; // Limit the range of acceptable exponents - if( exp < -126 || exp > 126 ) return NULL; + if( exp < -126 || exp > 126 ) return nullptr; // Compute the reciprocal float reciprocal = ((float)1.0) / f; @@ -809,28 +809,28 @@ Node* DivDNode::Identity(PhaseGVN* phase) { Node *DivDNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (in(0) && remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; const Type *t2 = phase->type( in(2) ); if( t2 == TypeD::ONE ) // Identity? - return NULL; // Skip it + return nullptr; // Skip it const TypeD *td = t2->isa_double_constant(); - if( !td ) return NULL; - if( td->base() != Type::DoubleCon ) return NULL; + if( !td ) return nullptr; + if( td->base() != Type::DoubleCon ) return nullptr; // Check for out of range values - if( td->is_nan() || !td->is_finite() ) return NULL; + if( td->is_nan() || !td->is_finite() ) return nullptr; // Get the value double d = td->getd(); int exp; // Only for special case of dividing by a power of 2 - if( frexp(d, &exp) != 0.5 ) return NULL; + if( frexp(d, &exp) != 0.5 ) return nullptr; // Limit the range of acceptable exponents - if( exp < -1021 || exp > 1022 ) return NULL; + if( exp < -1021 || exp > 1022 ) return nullptr; // Compute the reciprocal double reciprocal = 1.0 / d; @@ -876,7 +876,7 @@ const Type* UDivINode::Value(PhaseGVN* phase) const { Node *UDivINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if (in(0) && remove_dead_region(phase, can_reshape)) return this; - return NULL; + return nullptr; } @@ -915,7 +915,7 @@ const Type* UDivLNode::Value(PhaseGVN* phase) const { Node *UDivLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if (in(0) && remove_dead_region(phase, can_reshape)) return this; - return NULL; + return nullptr; } @@ -925,22 +925,22 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if( in(0) && remove_dead_region(phase, can_reshape) ) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; // Get the modulus const Type *t = phase->type( in(2) ); - if( t == Type::TOP ) return NULL; + if( t == Type::TOP ) return nullptr; const TypeInt *ti = t->is_int(); // Check for useless control input // Check for excluding mod-zero case if (in(0) && (ti->_hi < 0 || ti->_lo > 0)) { - set_req(0, NULL); // Yank control input + set_req(0, nullptr); // Yank control input return this; } // See if we are MOD'ing by 2^k or 2^k-1. - if( !ti->is_con() ) return NULL; + if( !ti->is_con() ) return nullptr; jint con = ti->get_con(); Node *hook = new Node(1); @@ -993,7 +993,7 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) { // into a long multiply/int multiply/subtract case // Cannot handle mod 0, and min_jint isn't handled by the transform - if( con == 0 || con == min_jint ) return NULL; + if( con == 0 || con == min_jint ) return nullptr; // Get the absolute value of the constant; at this point, we can use this jint pos_con = (con >= 0) ? con : -con; @@ -1020,11 +1020,11 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Divide using the transform from DivI to MulL Node *result = transform_int_divide( phase, in(1), pos_con ); - if (result != NULL) { + if (result != nullptr) { Node *divide = phase->transform(result); // Re-multiply, using a shift if this is a power of two - Node *mult = NULL; + Node *mult = nullptr; if( log2_con >= 0 ) mult = phase->transform( new LShiftINode( divide, phase->intcon( log2_con ) ) ); @@ -1088,7 +1088,7 @@ const Type* ModINode::Value(PhaseGVN* phase) const { Node *UModINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if( in(0) && remove_dead_region(phase, can_reshape) ) return this; - return NULL; + return nullptr; } //============================================================================= @@ -1097,22 +1097,22 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if( in(0) && remove_dead_region(phase, can_reshape) ) return this; // Don't bother trying to transform a dead node - if( in(0) && in(0)->is_top() ) return NULL; + if( in(0) && in(0)->is_top() ) return nullptr; // Get the modulus const Type *t = phase->type( in(2) ); - if( t == Type::TOP ) return NULL; + if( t == Type::TOP ) return nullptr; const TypeLong *tl = t->is_long(); // Check for useless control input // Check for excluding mod-zero case if (in(0) && (tl->_hi < 0 || tl->_lo > 0)) { - set_req(0, NULL); // Yank control input + set_req(0, nullptr); // Yank control input return this; } // See if we are MOD'ing by 2^k or 2^k-1. - if( !tl->is_con() ) return NULL; + if( !tl->is_con() ) return nullptr; jlong con = tl->get_con(); Node *hook = new Node(1); @@ -1167,7 +1167,7 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // into a long multiply/int multiply/subtract case // Cannot handle mod 0, and min_jlong isn't handled by the transform - if( con == 0 || con == min_jlong ) return NULL; + if( con == 0 || con == min_jlong ) return nullptr; // Get the absolute value of the constant; at this point, we can use this jlong pos_con = (con >= 0) ? con : -con; @@ -1194,11 +1194,11 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Divide using the transform from DivL to MulL Node *result = transform_long_divide( phase, in(1), pos_con ); - if (result != NULL) { + if (result != nullptr) { Node *divide = phase->transform(result); // Re-multiply, using a shift if this is a power of two - Node *mult = NULL; + Node *mult = nullptr; if( log2_con >= 0 ) mult = phase->transform( new LShiftLNode( divide, phase->intcon( log2_con ) ) ); @@ -1306,7 +1306,7 @@ const Type* ModFNode::Value(PhaseGVN* phase) const { Node *UModLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for dead control input if( in(0) && remove_dead_region(phase, can_reshape) ) return this; - return NULL; + return nullptr; } diff --git a/src/hotspot/share/opto/divnode.hpp b/src/hotspot/share/opto/divnode.hpp index d993a2d0bb5..12368642fb2 100644 --- a/src/hotspot/share/opto/divnode.hpp +++ b/src/hotspot/share/opto/divnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,7 +196,7 @@ public: }; virtual int Opcode() const; virtual Node* Identity(PhaseGVN* phase) { return this; } - virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { return NULL; } + virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) { return nullptr; } virtual const Type* Value(PhaseGVN* phase) const { return bottom_type(); } virtual uint hash() const { return Node::hash(); } virtual bool is_CFG() const { return false; } diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp index 5ad9765b965..70ad7d24e39 100644 --- a/src/hotspot/share/opto/doCall.cpp +++ b/src/hotspot/share/opto/doCall.cpp @@ -71,7 +71,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool JVMState* jvms, bool allow_inline, float prof_factor, ciKlass* speculative_receiver_type, bool allow_intrinsics) { - assert(callee != NULL, "failed method resolution"); + assert(callee != nullptr, "failed method resolution"); ciMethod* caller = jvms->method(); int bci = jvms->bci(); @@ -103,7 +103,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool } CompileLog* log = this->log(); - if (log != NULL) { + if (log != nullptr) { int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1; log->begin_elem("call method='%d' count='%d' prof_factor='%f'", @@ -125,15 +125,15 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // Special case the handling of certain common, profitable library // methods. If these methods are replaced with specialized code, // then we return it as the inlined version of the call. - CallGenerator* cg_intrinsic = NULL; + CallGenerator* cg_intrinsic = nullptr; if (allow_inline && allow_intrinsics) { CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); - if (cg != NULL) { + if (cg != nullptr) { if (cg->is_predicated()) { // Code without intrinsic but, hopefully, inlined. CallGenerator* inline_cg = this->call_generator(callee, vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); - if (inline_cg != NULL) { + if (inline_cg != nullptr) { cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg); } } @@ -143,7 +143,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // We will retry the intrinsic if nothing had claimed it afterwards. if (cg->does_virtual_dispatch()) { cg_intrinsic = cg; - cg = NULL; + cg = nullptr; } else if (IncrementalInline && should_delay_vector_inlining(callee, jvms)) { return CallGenerator::for_late_inline(callee, cg); } else { @@ -181,13 +181,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // sometimes has a broader type. Similar scenario is possible with // default methods when type system loses information about implemented // interfaces. - if (cg != NULL && is_virtual_or_interface && !callee->is_static()) { + if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) { CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee, Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none); cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg); } - if (cg != NULL) { + if (cg != nullptr) { // Delay the inlining of this method to give us the // opportunity to perform some high level optimizations // first. @@ -210,10 +210,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool if (call_does_dispatch && site_count > 0 && UseTypeProfile) { // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. bool have_major_receiver = profile.has_receiver(0) && (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); - ciMethod* receiver_method = NULL; + ciMethod* receiver_method = nullptr; int morphism = profile.morphism(); - if (speculative_receiver_type != NULL) { + if (speculative_receiver_type != nullptr) { if (!too_many_traps_or_recompiles(caller, bci, Deoptimization::Reason_speculate_class_check)) { // We have a speculative type, we should be able to resolve // the call. We do that before looking at the profiling at @@ -221,18 +221,18 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // a speculative type should help us avoid. receiver_method = callee->resolve_invoke(jvms->method()->holder(), speculative_receiver_type); - if (receiver_method == NULL) { - speculative_receiver_type = NULL; + if (receiver_method == nullptr) { + speculative_receiver_type = nullptr; } else { morphism = 1; } } else { // speculation failed before. Use profiling at the call // (could allow bimorphic inlining for instance). - speculative_receiver_type = NULL; + speculative_receiver_type = nullptr; } } - if (receiver_method == NULL && + if (receiver_method == nullptr && (have_major_receiver || morphism == 1 || (morphism == 2 && UseBimorphicInlining))) { // receiver_method = profile.method(); @@ -240,33 +240,33 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool receiver_method = callee->resolve_invoke(jvms->method()->holder(), profile.receiver(0)); } - if (receiver_method != NULL) { + if (receiver_method != nullptr) { // The single majority receiver sufficiently outweighs the minority. CallGenerator* hit_cg = this->call_generator(receiver_method, vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); - if (hit_cg != NULL) { + if (hit_cg != nullptr) { // Look up second receiver. - CallGenerator* next_hit_cg = NULL; - ciMethod* next_receiver_method = NULL; + CallGenerator* next_hit_cg = nullptr; + ciMethod* next_receiver_method = nullptr; if (morphism == 2 && UseBimorphicInlining) { next_receiver_method = callee->resolve_invoke(jvms->method()->holder(), profile.receiver(1)); - if (next_receiver_method != NULL) { + if (next_receiver_method != nullptr) { next_hit_cg = this->call_generator(next_receiver_method, vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor); - if (next_hit_cg != NULL && !next_hit_cg->is_inline() && + if (next_hit_cg != nullptr && !next_hit_cg->is_inline() && have_major_receiver && UseOnlyInlinedBimorphic) { // Skip if we can't inline second receiver's method - next_hit_cg = NULL; + next_hit_cg = nullptr; } } } CallGenerator* miss_cg; Deoptimization::DeoptReason reason = (morphism == 2 ? Deoptimization::Reason_bimorphic - : Deoptimization::reason_class_check(speculative_receiver_type != NULL)); - if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) && + : Deoptimization::reason_class_check(speculative_receiver_type != nullptr)); + if ((morphism == 1 || (morphism == 2 && next_hit_cg != nullptr)) && !too_many_traps_or_recompiles(caller, bci, reason) ) { // Generate uncommon trap for class check failure path @@ -279,20 +279,20 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool miss_cg = (IncrementalInlineVirtual ? CallGenerator::for_late_inline_virtual(callee, vtable_index, prof_factor) : CallGenerator::for_virtual_call(callee, vtable_index)); } - if (miss_cg != NULL) { - if (next_hit_cg != NULL) { - assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation"); + if (miss_cg != nullptr) { + if (next_hit_cg != nullptr) { + assert(speculative_receiver_type == nullptr, "shouldn't end up here if we used speculation"); trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)); // We don't need to record dependency on a receiver here and below. // Whenever we inline, the dependency is added by Parse::Parse(). miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); } - if (miss_cg != NULL) { - ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0); + if (miss_cg != nullptr) { + ciKlass* k = speculative_receiver_type != nullptr ? speculative_receiver_type : profile.receiver(0); trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, k, site_count, receiver_count); - float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0); + float hit_prob = speculative_receiver_type != nullptr ? 1.0 : profile.receiver_prob(0); CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob); - if (cg != NULL) return cg; + if (cg != nullptr) return cg; } } } @@ -318,13 +318,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool caller->get_declared_method_holder_at_bci(bci)->as_instance_klass(); ciInstanceKlass* singleton = declared_interface->unique_implementor(); - if (singleton != NULL) { + if (singleton != nullptr) { assert(singleton != declared_interface, "not a unique implementor"); ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(caller->holder(), declared_interface, singleton); - if (cha_monomorphic_target != NULL && + if (cha_monomorphic_target != nullptr && cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless ciKlass* holder = cha_monomorphic_target->holder(); @@ -338,7 +338,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool ciKlass* constraint = (holder->is_subclass_of(singleton) ? holder : singleton); // avoid upcasts CallGenerator* cg = CallGenerator::for_guarded_call(constraint, miss_cg, hit_cg); - if (hit_cg != NULL && cg != NULL) { + if (hit_cg != nullptr && cg != nullptr) { dependencies()->assert_unique_implementor(declared_interface, singleton); dependencies()->assert_unique_concrete_method(declared_interface, cha_monomorphic_target, declared_interface, callee); return cg; @@ -349,7 +349,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool // Nothing claimed the intrinsic, we go with straight-forward inlining // for already discovered intrinsic. - if (allow_intrinsics && cg_intrinsic != NULL) { + if (allow_intrinsics && cg_intrinsic != nullptr) { assert(cg_intrinsic->does_virtual_dispatch(), "sanity"); return cg_intrinsic; } @@ -373,7 +373,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool CallGenerator* cg = CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms)); // For optimized virtual calls assert at runtime that receiver object // is a subtype of the method holder. - if (cg != NULL && is_virtual_or_interface && !callee->is_static()) { + if (cg != nullptr && is_virtual_or_interface && !callee->is_static()) { CallGenerator* trap_cg = CallGenerator::for_uncommon_trap(callee, Deoptimization::Reason_receiver_constraint, Deoptimization::Action_none); cg = CallGenerator::for_guarded_call(callee->holder(), trap_cg, cg); @@ -419,7 +419,7 @@ bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) { CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava(); ciMethod* m = csj->method(); - if (m != NULL && + if (m != nullptr && (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString || m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString)) // Delay String.(new SB()) @@ -509,12 +509,12 @@ void Parse::do_call() { // Find target being called bool will_link; - ciSignature* declared_signature = NULL; + ciSignature* declared_signature = nullptr; ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode ciInstanceKlass* holder_klass = orig_callee->holder(); ciKlass* holder = iter().get_declared_method_holder(); ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); - assert(declared_signature != NULL, "cannot be null"); + assert(declared_signature != nullptr, "cannot be null"); JFR_ONLY(Jfr::on_resolution(this, holder, orig_callee);) // Bump max node limit for JSR292 users @@ -535,7 +535,7 @@ void Parse::do_call() { //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw) // Note: this takes into account invokeinterface of methods declared in java/lang/Object, // which should be invokevirtuals but according to the VM spec may be invokeinterfaces - assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); + assert(holder_klass->is_interface() || holder_klass->super() == nullptr || (bc() != Bytecodes::_invokeinterface), "must match bc"); // Note: In the absence of miranda methods, an abstract class K can perform // an invokevirtual directly on an interface method I.m if K implements I. @@ -565,7 +565,7 @@ void Parse::do_call() { bool call_does_dispatch = false; // Speculative type of the receiver if any - ciKlass* speculative_receiver_type = NULL; + ciKlass* speculative_receiver_type = nullptr; if (is_virtual_or_interface) { Node* receiver_node = stack(sp() - nargs); const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); @@ -579,11 +579,11 @@ void Parse::do_call() { callee = C->optimize_virtual_call(method(), klass, holder, orig_callee, receiver_type, is_virtual, call_does_dispatch, vtable_index); // out-parameters - speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL; + speculative_receiver_type = receiver_type != nullptr ? receiver_type->speculative_type() : nullptr; } // Additional receiver subtype checks for interface calls via invokespecial or invokeinterface. - ciKlass* receiver_constraint = NULL; + ciKlass* receiver_constraint = nullptr; if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) { ciInstanceKlass* calling_klass = method()->holder(); ciInstanceKlass* sender_klass = calling_klass; @@ -595,12 +595,12 @@ void Parse::do_call() { receiver_constraint = holder; } - if (receiver_constraint != NULL) { + if (receiver_constraint != nullptr) { Node* receiver_node = stack(sp() - nargs); Node* cls_node = makecon(TypeKlassPtr::make(receiver_constraint, Type::trust_interfaces)); - Node* bad_type_ctrl = NULL; + Node* bad_type_ctrl = nullptr; Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl); - if (bad_type_ctrl != NULL) { + if (bad_type_ctrl != nullptr) { PreserveJVMState pjvms(this); set_control(bad_type_ctrl); uncommon_trap(Deoptimization::Reason_class_check, @@ -628,7 +628,7 @@ void Parse::do_call() { CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type); // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead. - orig_callee = callee = NULL; + orig_callee = callee = nullptr; // --------------------- // Round double arguments before call @@ -650,17 +650,17 @@ void Parse::do_call() { assert(jvms_in_sync(), "jvms must carry full info into CG"); // save across call, for a subsequent cast_not_null. - Node* receiver = has_receiver ? argument(0) : NULL; + Node* receiver = has_receiver ? argument(0) : nullptr; // The extra CheckCastPPs for speculative types mess with PhaseStringOpts - if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) { + if (receiver != nullptr && !call_does_dispatch && !cg->is_string_late_inline()) { // Feed profiling data for a single receiver to the type system so // it can propagate it as a speculative type receiver = record_profiled_receiver_for_speculation(receiver); } JVMState* new_jvms = cg->generate(jvms); - if (new_jvms == NULL) { + if (new_jvms == nullptr) { // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it // impossible to pull back and try again. Once we call @@ -674,7 +674,7 @@ void Parse::do_call() { // get a normal java call that may inline in that case cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); new_jvms = cg->generate(jvms); - if (new_jvms == NULL) { + if (new_jvms == nullptr) { guarantee(failing(), "call failed to generate: calls should work"); return; } @@ -700,7 +700,7 @@ void Parse::do_call() { if (!stopped()) { // This was some sort of virtual call, which did a null check for us. // Now we can assert receiver-not-null, on the normal return path. - if (receiver != NULL && cg->is_virtual()) { + if (receiver != nullptr && cg->is_virtual()) { Node* cast = cast_not_null(receiver); // %%% assert(receiver == cast, "should already have cast the receiver"); } @@ -726,7 +726,7 @@ void Parse::do_call() { if (ctype->is_loaded()) { const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass()); const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); - if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { + if (arg_type != nullptr && !arg_type->higher_equal(sig_type)) { Node* retnode = pop(); Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); push(cast_obj); @@ -759,7 +759,7 @@ void Parse::do_call() { method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); cg->method()->print_name(); tty->cr(); } - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("assert_null reason='return' klass='%d'", C->log()->identify(rtype)); } @@ -791,7 +791,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { // Add a CatchNode. GrowableArray* bcis = new (C->node_arena()) GrowableArray(C->node_arena(), 8, 0, -1); - GrowableArray* extypes = new (C->node_arena()) GrowableArray(C->node_arena(), 8, 0, NULL); + GrowableArray* extypes = new (C->node_arena()) GrowableArray(C->node_arena(), 8, 0, nullptr); GrowableArray* saw_unloaded = new (C->node_arena()) GrowableArray(C->node_arena(), 8, 0, 0); bool default_handler = false; @@ -901,7 +901,7 @@ void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { // So we insert a RethrowCall and all the logic that goes with it. void Parse::catch_inline_exceptions(SafePointNode* ex_map) { // Caller is responsible for saving away the map for normal control flow! - assert(stopped(), "call set_map(NULL) first"); + assert(stopped(), "call set_map(nullptr) first"); assert(method()->has_exception_handlers(), "don't come here w/o work to do"); Node* ex_node = saved_ex_oop(ex_map); @@ -910,8 +910,8 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { return; } const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); - NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr")); - if (ex_type == NULL) + NOT_PRODUCT(if (ex_type==nullptr) tty->print_cr("*** Exception not InstPtr")); + if (ex_type == nullptr) ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); // determine potential exception handlers @@ -924,10 +924,10 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { ex_node = use_exception_state(ex_map); // Get the exception oop klass from its header - Node* ex_klass_node = NULL; + Node* ex_klass_node = nullptr; if (has_ex_handler() && !ex_type->klass_is_exact()) { Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); - ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); + ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); // Compute the exception klass a little more cleverly. // Obvious solution is to simple do a LoadKlass from the 'ex_node'. @@ -939,13 +939,13 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { ex_klass_node = new PhiNode(ex_node->in(0), TypeInstKlassPtr::OBJECT); for (uint i = 1; i < ex_node->req(); i++) { Node* ex_in = ex_node->in(i); - if (ex_in == top() || ex_in == NULL) { + if (ex_in == top() || ex_in == nullptr) { // This path was not taken. ex_klass_node->init_req(i, top()); continue; } Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes()); - Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); + Node* k = _gvn.transform( LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT)); ex_klass_node->init_req( i, k ); } ex_klass_node = _gvn.transform(ex_klass_node); @@ -1030,7 +1030,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) { make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, OptoRuntime::rethrow_Type(), OptoRuntime::rethrow_stub(), - NULL, NULL, + nullptr, nullptr, ex_node); // Rethrow is a pure call, no side effects, only a result. @@ -1093,7 +1093,7 @@ ciMethod* Compile::optimize_virtual_call(ciMethod* caller, ciInstanceKlass* klas receiver_type, check_access); // Have the call been sufficiently improved such that it is no longer a virtual? - if (optimized_virtual_method != NULL) { + if (optimized_virtual_method != nullptr) { callee = optimized_virtual_method; call_does_dispatch = false; } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) { @@ -1117,8 +1117,8 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c return callee; } - if (receiver_type == NULL) { - return NULL; // no receiver type info + if (receiver_type == nullptr) { + return nullptr; // no receiver type info } // Attempt to improve the receiver @@ -1134,7 +1134,7 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c // All other interesting cases are instance klasses. if (!receiver_type->isa_instptr()) { - return NULL; + return nullptr; } ciInstanceKlass* receiver_klass = receiver_type->is_instptr()->instance_klass(); @@ -1150,7 +1150,7 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c ciInstanceKlass* calling_klass = caller->holder(); ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access); - if (cha_monomorphic_target != NULL) { + if (cha_monomorphic_target != nullptr) { // Hardwiring a virtual. assert(!callee->can_be_statically_bound(), "should have been handled earlier"); assert(!cha_monomorphic_target->is_abstract(), ""); @@ -1171,10 +1171,10 @@ ciMethod* Compile::optimize_inlining(ciMethod* caller, ciInstanceKlass* klass, c // In case of evolution, there is a dependence on every inlined method, since each // such method can be changed when its class is redefined. ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver); - if (exact_method != NULL) { + if (exact_method != nullptr) { return exact_method; } } - return NULL; + return nullptr; } diff --git a/src/hotspot/share/opto/domgraph.cpp b/src/hotspot/share/opto/domgraph.cpp index 0159d9bdc42..bd7b69fdf5b 100644 --- a/src/hotspot/share/opto/domgraph.cpp +++ b/src/hotspot/share/opto/domgraph.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -129,12 +129,12 @@ void PhaseCFG::build_dominator_tree() { Tarjan *w = &tarjan[i]; if( w->_dom != &tarjan[w->_semi] ) w->_dom = w->_dom->_dom; - w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later + w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later } // No immediate dominator for the root Tarjan *w = &tarjan[get_root_block()->_pre_order]; - w->_dom = NULL; - w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later + w->_dom = nullptr; + w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later // Convert the dominator tree array into my kind of graph for(uint i = 1; i <= number_of_blocks(); i++){ // For all Tarjan vertices @@ -145,7 +145,7 @@ void PhaseCFG::build_dominator_tree() { t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child tdom->_dom_child = t; // Make me a child of my parent } else - t->_block->_idom = NULL; // Root + t->_block->_idom = nullptr; // Root } w->setdepth(number_of_blocks() + 1); // Set depth in dominator tree @@ -175,12 +175,12 @@ class Block_Stack { t->_block = b; // Save actual block t->_semi = pre_order; // Block to DFS map t->_label = t; // DFS to vertex map - t->_ancestor = NULL; // Fast LINK & EVAL setup + t->_ancestor = nullptr; // Fast LINK & EVAL setup t->_child = &_tarjan[0]; // Sentenial t->_size = 1; - t->_bucket = NULL; + t->_bucket = nullptr; if (pre_order == 1) - t->_parent = NULL; // first block doesn't have parent + t->_parent = nullptr; // first block doesn't have parent else { // Save parent (current top block on stack) in DFS t->_parent = &_tarjan[_stack_top->block->_pre_order]; @@ -341,11 +341,11 @@ void Tarjan::setdepth( uint stack_size ) { t->_block->_dom_depth = depth; // Set depth in dominator tree Tarjan *dom_child = t->_dom_child; t = t->_dom_next; // next tarjan - if (dom_child != NULL) { + if (dom_child != nullptr) { *top = dom_child; // save child on stack ++top; } - } while (t != NULL); + } while (t != nullptr); } while (next < last); } while (last < top); } @@ -395,7 +395,7 @@ void PhaseIdealLoop::Dominators() { // Initialize _control field for fast reference int i; for( i= C->unique()-1; i>=0; i-- ) - ntarjan[i]._control = NULL; + ntarjan[i]._control = nullptr; // Store the DFS order for the main loop const uint fill_value = max_juint; @@ -413,12 +413,12 @@ void PhaseIdealLoop::Dominators() { for( i = dfsnum-1; i>1; i-- ) { // For all nodes in reverse DFS order NTarjan *w = &ntarjan[i]; // Get Node from DFS - assert(w->_control != NULL,"bad DFS walk"); + assert(w->_control != nullptr,"bad DFS walk"); // Step 2: Node *whead = w->_control; for( uint j=0; j < whead->req(); j++ ) { // For each predecessor - if( whead->in(j) == NULL || !whead->in(j)->is_CFG() ) + if( whead->in(j) == nullptr || !whead->in(j)->is_CFG() ) continue; // Only process control nodes uint b = dfsorder[whead->in(j)->_idx]; if(b == fill_value) continue; @@ -468,28 +468,28 @@ void PhaseIdealLoop::Dominators() { // Step 4: for( i=2; i < dfsnum; i++ ) { // DFS order NTarjan *w = &ntarjan[i]; - assert(w->_control != NULL,"Bad DFS walk"); + assert(w->_control != nullptr,"Bad DFS walk"); if( w->_dom != &ntarjan[w->_semi] ) w->_dom = w->_dom->_dom; - w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later + w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later } // No immediate dominator for the root NTarjan *w = &ntarjan[dfsorder[C->root()->_idx]]; - w->_dom = NULL; - w->_parent = NULL; - w->_dom_next = w->_dom_child = NULL; // Initialize for building tree later + w->_dom = nullptr; + w->_parent = nullptr; + w->_dom_next = w->_dom_child = nullptr; // Initialize for building tree later // Convert the dominator tree array into my kind of graph for( i=1; i_control != NULL,"Bad DFS walk"); + assert(t->_control != nullptr,"Bad DFS walk"); NTarjan *tdom = t->_dom; // Handy access to immediate dominator if( tdom ) { // Root has no immediate dominator _idom[t->_control->_idx] = tdom->_control; // Set immediate dominator t->_dom_next = tdom->_dom_child; // Make me a sibling of parent's child tdom->_dom_child = t; // Make me a child of my parent } else - _idom[C->root()->_idx] = NULL; // Root + _idom[C->root()->_idx] = nullptr; // Root } w->setdepth( C->unique()+1, _dom_depth ); // Set depth in dominator tree // Pick up the 'top' node as well @@ -525,10 +525,10 @@ int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uin dfsorder[b->_idx] = dfsnum; // Save DFS order info w->_semi = dfsnum; // Node to DFS map w->_label = w; // DFS to vertex map - w->_ancestor = NULL; // Fast LINK & EVAL setup + w->_ancestor = nullptr; // Fast LINK & EVAL setup w->_child = &ntarjan[0]; // Sentinel w->_size = 1; - w->_bucket = NULL; + w->_bucket = nullptr; // Need DEF-USE info for this pass for ( int i = b->outcnt(); i-- > 0; ) { // Put on stack backwards @@ -604,11 +604,11 @@ void NTarjan::setdepth( uint stack_size, uint *dom_depth ) { dom_depth[t->_control->_idx] = depth; // Set depth in dominator tree NTarjan *dom_child = t->_dom_child; t = t->_dom_next; // next tarjan - if (dom_child != NULL) { + if (dom_child != nullptr) { *top = dom_child; // save child on stack ++top; } - } while (t != NULL); + } while (t != nullptr); } while (next < last); } while (last < top); } @@ -628,13 +628,13 @@ void NTarjan::dump(int offset) const { for(i = offset; i >0; i--) // Use indenting for tree structure tty->print(" "); tty->print("DFS Parent: "); - if(_parent != NULL) + if(_parent != nullptr) _parent->_control->dump(); // Parent in DFS tty->print("\n"); for(i = offset; i >0; i--) // Use indenting for tree structure tty->print(" "); tty->print("Dom Parent: "); - if(_dom != NULL) + if(_dom != nullptr) _dom->_control->dump(); // Parent in Dominator Tree tty->print("\n"); diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index 6e5a811ce6d..c9a3492150b 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -42,7 +42,7 @@ #include "utilities/macros.hpp" ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) : - _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), + _nodes(C->comp_arena(), C->unique(), C->unique(), nullptr), _in_worklist(C->comp_arena()), _next_pidx(0), _collecting(true), @@ -56,7 +56,7 @@ ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation // Add unknown java object. add_java_object(C->top(), PointsToNode::GlobalEscape); phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); - // Add ConP(#NULL) and ConN(#NULL) nodes. + // Add ConP and ConN null oop nodes Node* oop_null = igvn->zerocon(T_OBJECT); assert(oop_null->_idx < nodes_size(), "should be created already"); add_java_object(oop_null, PointsToNode::NoEscape); @@ -95,12 +95,12 @@ void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); ResourceMark rm; - // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction + // Add ConP and ConN null oop nodes before ConnectionGraph construction // to create space for them in ConnectionGraph::_nodes[]. Node* oop_null = igvn->zerocon(T_OBJECT); Node* noop_null = igvn->zerocon(T_NARROWOOP); int invocation = 0; - if (C->congraph() != NULL) { + if (C->congraph() != nullptr) { invocation = C->congraph()->_invocation + 1; } ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation); @@ -139,9 +139,9 @@ bool ConnectionGraph::compute_escape() { { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); // 1. Populate Connection Graph (CG) with PointsTo nodes. - ideal_nodes.map(C->live_nodes(), NULL); // preallocate space + ideal_nodes.map(C->live_nodes(), nullptr); // preallocate space // Initialize worklist - if (C->root() != NULL) { + if (C->root() != nullptr) { ideal_nodes.push(C->root()); } // Processed ideal nodes are unique on ideal_nodes list @@ -156,7 +156,7 @@ bool ConnectionGraph::compute_escape() { // only once per ideal node since ideal_nodes is Unique_Node list. add_node_to_connection_graph(n, &delayed_worklist); PointsToNode* ptn = ptnode_adr(n->_idx); - if (ptn != NULL && ptn != phantom_obj) { + if (ptn != nullptr && ptn != phantom_obj) { ptnodes_worklist.append(ptn); if (ptn->is_JavaObject()) { java_objects_worklist.append(ptn->as_JavaObject()); @@ -404,7 +404,7 @@ bool ConnectionGraph::compute_escape() { // Returns true if there is an object in the scope of sfn that does not escape globally. bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { Compile* C = _compile; - for (JVMState* jvms = sfn->jvms(); jvms != NULL; jvms = jvms->caller()) { + for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) { if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) { // Jvmti agents can access locals. Must provide info about local objects at runtime. @@ -422,7 +422,7 @@ bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { int num_mon = jvms->nof_monitors(); for (int idx = 0; idx < num_mon; idx++) { Node* m = sfn->monitor_obj(jvms, idx); - if (m != NULL && not_global_escape(m)) { + if (m != nullptr && not_global_escape(m)) { return true; } } @@ -434,7 +434,7 @@ bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) { // Returns true if at least one of the arguments to the call is an object // that does not escape globally. bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { - if (call->method() != NULL) { + if (call->method() != nullptr) { uint max_idx = TypeFunc::Parms + call->method()->arg_size(); for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) { Node* p = call->in(idx); @@ -444,14 +444,14 @@ bool ConnectionGraph::has_arg_escape(CallJavaNode* call) { } } else { const char* name = call->as_CallStaticJava()->_name; - assert(name != NULL, "no name"); + assert(name != nullptr, "no name"); // no arg escapes through uncommon traps if (strcmp(name, "uncommon_trap") != 0) { // process_call_arguments() assumes that all arguments escape globally const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); - if (at->isa_oopptr() != NULL) { + if (at->isa_oopptr() != nullptr) { return true; } } @@ -467,13 +467,13 @@ void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = _igvn->type(n); - if (t->make_ptr() != NULL) { + if (t->make_ptr() != nullptr) { Node* adr = n->in(MemNode::Address); #ifdef ASSERT if (!adr->is_AddP()) { assert(_igvn->type(adr)->isa_rawptr(), "sanity"); } else { - assert((ptnode_adr(adr->_idx) == NULL || + assert((ptnode_adr(adr->_idx) == nullptr || ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); } #endif @@ -489,7 +489,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de PhaseGVN* igvn = _igvn; uint n_idx = n->_idx; PointsToNode* n_ptn = ptnode_adr(n_idx); - if (n_ptn != NULL) { + if (n_ptn != nullptr) { return; // No need to redefine PointsTo node during first iteration. } int opcode = n->Opcode(); @@ -510,7 +510,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de } else { if (n->is_CallStaticJava()) { const char* name = n->as_CallStaticJava()->_name; - if (name != NULL && strcmp(name, "uncommon_trap") == 0) { + if (name != nullptr && strcmp(name, "uncommon_trap") == 0) { return; // Skip uncommon traps } } @@ -518,7 +518,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de delayed_worklist->push(n); // Check if a call returns an object. if ((n->as_Call()->returns_pointer() && - n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || + n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) || (n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method())) { add_call_node(n->as_Call()); @@ -541,7 +541,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de // Graph because such fields are not used for oop loads and stores. int offset = address_offset(n, igvn); add_field(n, PointsToNode::NoEscape, offset); - if (ptn_base == NULL) { + if (ptn_base == nullptr) { delayed_worklist->push(n); // Process it later. } else { n_ptn = ptnode_adr(n_idx); @@ -613,7 +613,7 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. const Type* t = n->as_Phi()->type(); - if (t->make_ptr() != NULL) { + if (t->make_ptr() != nullptr) { add_local_var(n, PointsToNode::NoEscape); // Do not add edges during first iteration because some could be // not defined yet. @@ -709,7 +709,7 @@ void ConnectionGraph::add_final_edges(Node *n) { return; } assert(n->is_Store() || n->is_LoadStore() || - (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), + (n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr), "node should be registered already"); int opcode = n->Opcode(); bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); @@ -720,7 +720,7 @@ void ConnectionGraph::add_final_edges(Node *n) { case Op_AddP: { Node* base = get_addp_base(n); PointsToNode* ptn_base = ptnode_adr(base->_idx); - assert(ptn_base != NULL, "field's base should be registered"); + assert(ptn_base != nullptr, "field's base should be registered"); add_base(n_ptn->as_Field(), ptn_base); break; } @@ -730,21 +730,21 @@ void ConnectionGraph::add_final_edges(Node *n) { case Op_DecodeN: case Op_EncodePKlass: case Op_DecodeNKlass: { - add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr); break; } case Op_CMoveP: { for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { Node* in = n->in(i); - if (in == NULL) { - continue; // ignore NULL + if (in == nullptr) { + continue; // ignore null } Node* uncast_in = in->uncast(); if (uncast_in->is_top() || uncast_in == n) { continue; // ignore top or inputs which go back this node } PointsToNode* ptn = ptnode_adr(in->_idx); - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); add_edge(n_ptn, ptn); } break; @@ -753,25 +753,25 @@ void ConnectionGraph::add_final_edges(Node *n) { case Op_LoadN: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. - assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); - add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); + assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); break; } case Op_Phi: { // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because // ThreadLocal has RawPtr type. - assert(n->as_Phi()->type()->make_ptr() != NULL, "Unexpected node type"); + assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type"); for (uint i = 1; i < n->req(); i++) { Node* in = n->in(i); - if (in == NULL) { - continue; // ignore NULL + if (in == nullptr) { + continue; // ignore null } Node* uncast_in = in->uncast(); if (uncast_in->is_top() || uncast_in == n) { continue; // ignore top or inputs which go back this node } PointsToNode* ptn = ptnode_adr(in->_idx); - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); add_edge(n_ptn, ptn); } break; @@ -780,7 +780,7 @@ void ConnectionGraph::add_final_edges(Node *n) { // we are only interested in the oop result projection from a call assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && n->in(0)->as_Call()->returns_pointer(), "Unexpected node type"); - add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr); break; } case Op_Rethrow: // Exception object escapes @@ -788,15 +788,15 @@ void ConnectionGraph::add_final_edges(Node *n) { assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(), "Unexpected node type"); // Treat Return value as LocalVar with GlobalEscape escape state. - add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), NULL); + add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr); break; } case Op_CompareAndExchangeP: case Op_CompareAndExchangeN: case Op_GetAndSetP: case Op_GetAndSetN:{ - assert(_igvn->type(n)->make_ptr() != NULL, "Unexpected node type"); - add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL); + assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type"); + add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr); // fall-through } case Op_CompareAndSwapP: @@ -827,12 +827,12 @@ void ConnectionGraph::add_final_edges(Node *n) { const Type* at = _igvn->type(adr); if (!adr->is_top() && at->isa_ptr()) { assert(at == Type::TOP || at == TypePtr::NULL_PTR || - at->isa_ptr() != NULL, "expecting a pointer"); + at->isa_ptr() != nullptr, "expecting a pointer"); if (adr->is_AddP()) { adr = get_addp_base(adr); } PointsToNode* ptn = ptnode_adr(adr->_idx); - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); add_edge(n_ptn, ptn); } } @@ -874,7 +874,7 @@ void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique Node* adr = n->in(MemNode::Address); const Type* adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); - if (adr_type == NULL) { + if (adr_type == nullptr) { return; // skip dead nodes } if (adr_type->isa_oopptr() @@ -912,9 +912,9 @@ bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { const Type *adr_type = _igvn->type(adr); adr_type = adr_type->make_ptr(); #ifdef ASSERT - if (adr_type == NULL) { + if (adr_type == nullptr) { n->dump(1); - assert(adr_type != NULL, "dead node should not be on list"); + assert(adr_type != nullptr, "dead node should not be on list"); return true; } #endif @@ -925,22 +925,22 @@ bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { && is_captured_store_address(adr))) { // Point Address to Value PointsToNode* adr_ptn = ptnode_adr(adr->_idx); - assert(adr_ptn != NULL && + assert(adr_ptn != nullptr && adr_ptn->as_Field()->is_oop(), "node should be registered"); Node* val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); add_edge(adr_ptn, ptn); return true; } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { // Stored value escapes in unsafe access. Node* val = n->in(MemNode::ValueIn); PointsToNode* ptn = ptnode_adr(val->_idx); - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address")); // Add edge to object for unsafe access with offset. PointsToNode* adr_ptn = ptnode_adr(adr->_idx); - assert(adr_ptn != NULL, "node should be registered"); + assert(adr_ptn != nullptr, "node should be registered"); if (adr_ptn->is_Field()) { assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); add_edge(adr_ptn, ptn); @@ -960,7 +960,7 @@ void ConnectionGraph::add_call_node(CallNode* call) { if (call->is_Allocate()) { Node* k = call->in(AllocateNode::KlassNode); const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); - assert(kt != NULL, "TypeKlassPtr required."); + assert(kt != nullptr, "TypeKlassPtr required."); PointsToNode::EscapeState es = PointsToNode::NoEscape; bool scalar_replaceable = true; NOT_PRODUCT(const char* nsr_reason = ""); @@ -1029,7 +1029,7 @@ void ConnectionGraph::add_call_node(CallNode* call) { // For a static call, we know exactly what method is being called. // Use bytecode estimator to record whether the call's return value escapes. ciMethod* meth = call->as_CallJava()->method(); - if (meth == NULL) { + if (meth == nullptr) { const char* name = call->as_CallStaticJava()->_name; assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); // Returns a newly allocated non-escaped object. @@ -1062,7 +1062,7 @@ void ConnectionGraph::add_call_node(CallNode* call) { const TypeTuple* d = call->tf()->domain(); bool ret_arg = false; for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { - if (d->field_at(i)->isa_ptr() != NULL && + if (d->field_at(i)->isa_ptr() != nullptr && call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { ret_arg = true; break; @@ -1112,7 +1112,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); Node *arg = call->in(i); - if (arg == NULL) { + if (arg == nullptr) { continue; } const Type *aat = _igvn->type(arg); @@ -1131,14 +1131,14 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { arg = get_addp_base(arg); } PointsToNode* arg_ptn = ptnode_adr(arg->_idx); - assert(arg_ptn != NULL, "should be registered"); + assert(arg_ptn != nullptr, "should be registered"); PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || - aat->isa_ptr() != NULL, "expecting an Ptr"); + aat->isa_ptr() != nullptr, "expecting an Ptr"); bool arg_has_oops = aat->isa_oopptr() && (aat->isa_instptr() || - (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != NULL))); + (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr))); if (i == TypeFunc::Parms) { src_has_oops = arg_has_oops; } @@ -1155,7 +1155,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { #ifdef ASSERT if (!(is_arraycopy || BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || - (call->as_CallLeaf()->_name != NULL && + (call->as_CallLeaf()->_name != nullptr && (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || @@ -1220,7 +1220,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { src = get_addp_base(src); } PointsToNode* src_ptn = ptnode_adr(src->_idx); - assert(src_ptn != NULL, "should be registered"); + assert(src_ptn != nullptr, "should be registered"); if (arg_ptn != src_ptn) { // Special arraycopy edge: // A destination object's field can't have the source object @@ -1239,15 +1239,15 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { // Use bytecode estimator to record the call's escape affects #ifdef ASSERT const char* name = call->as_CallStaticJava()->_name; - assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); + assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only"); #endif ciMethod* meth = call->as_CallJava()->method(); - if ((meth != NULL) && meth->is_boxing_method()) { + if ((meth != nullptr) && meth->is_boxing_method()) { break; // Boxing methods do not modify any oops. } - BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; + BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr; // fall-through if not a Java method or no analyzer information - if (call_analyzer != NULL) { + if (call_analyzer != nullptr) { PointsToNode* call_ptn = ptnode_adr(call->_idx); const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { @@ -1255,16 +1255,16 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { int k = i - TypeFunc::Parms; Node* arg = call->in(i); PointsToNode* arg_ptn = ptnode_adr(arg->_idx); - if (at->isa_ptr() != NULL && + if (at->isa_ptr() != nullptr && call_analyzer->is_arg_returned(k)) { // The call returns arguments. - if (call_ptn != NULL) { // Is call's result used? + if (call_ptn != nullptr) { // Is call's result used? assert(call_ptn->is_LocalVar(), "node should be registered"); - assert(arg_ptn != NULL, "node should be registered"); + assert(arg_ptn != nullptr, "node should be registered"); add_edge(call_ptn, arg_ptn); } } - if (at->isa_oopptr() != NULL && + if (at->isa_oopptr() != nullptr && arg_ptn->escape_state() < PointsToNode::GlobalEscape) { if (!call_analyzer->is_arg_stack(k)) { // The argument global escapes @@ -1278,7 +1278,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { } } } - if (call_ptn != NULL && call_ptn->is_LocalVar()) { + if (call_ptn != nullptr && call_ptn->is_LocalVar()) { // The call returns arguments. assert(call_ptn->edge_count() > 0, "sanity"); if (!call_analyzer->is_return_local()) { @@ -1296,12 +1296,12 @@ void ConnectionGraph::process_call_arguments(CallNode *call) { const TypeTuple* d = call->tf()->domain(); for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { const Type* at = d->field_at(i); - if (at->isa_oopptr() != NULL) { + if (at->isa_oopptr() != nullptr) { Node* arg = call->in(i); if (arg->is_AddP()) { arg = get_addp_base(arg); } - assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); + assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already"); set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call))); } } @@ -1413,7 +1413,7 @@ bool ConnectionGraph::complete_connection_graph( // Bailout if passed limits. if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) { Compile* C = _compile; - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->begin_elem("connectionGraph_bailout reason='reached "); C->log()->text("%s", timeout ? "time" : "iterations"); C->log()->end_elem(" limit'"); @@ -1427,7 +1427,7 @@ bool ConnectionGraph::complete_connection_graph( #undef GRAPH_BUILD_ITER_LIMIT - // Find fields initialized by NULL for non-escaping Allocations. + // Find fields initialized by null for non-escaping Allocations. int non_escaped_length = non_escaped_allocs_worklist.length(); for (int next = 0; next < non_escaped_length; next++) { JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next); @@ -1435,8 +1435,8 @@ bool ConnectionGraph::complete_connection_graph( assert(es <= PointsToNode::ArgEscape, "sanity"); if (es == PointsToNode::NoEscape) { if (find_init_values_null(ptn, _igvn) > 0) { - // Adding references to NULL object does not change escape states - // since it does not escape. Also no fields are added to NULL object. + // Adding references to null object does not change escape states + // since it does not escape. Also no fields are added to null object. add_java_object_edges(null_obj, false); } } @@ -1446,7 +1446,7 @@ bool ConnectionGraph::complete_connection_graph( // seen by an other thread. Mark it so that when it is // expanded no MemBarStoreStore is added. InitializeNode* ini = n->as_Allocate()->initialization(); - if (ini != NULL) + if (ini != nullptr) ini->set_does_not_escape(); } } @@ -1562,7 +1562,7 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w } assert(!use->is_JavaObject(), "sanity"); if (use->is_Arraycopy()) { - if (jobj == null_obj) { // NULL object does not have field edges + if (jobj == null_obj) { // null object does not have field edges continue; } // Added edge from Arraycopy node to arraycopy's source java object @@ -1583,7 +1583,7 @@ int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_w for (EdgeIterator i(use); i.has_next(); i.next()) { PointsToNode* e = i.get(); if (e->is_Arraycopy()) { - if (jobj == null_obj) { // NULL object does not have field edges + if (jobj == null_obj) { // null object does not have field edges continue; } // Add edge from arraycopy's destination java object to Arraycopy node. @@ -1658,7 +1658,7 @@ void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* bas if (// Skip phantom_object since it is only used to indicate that // this field's content globally escapes. (base != phantom_obj) && - // NULL object node does not have fields. + // null object node does not have fields. (base != null_obj)) { for (EdgeIterator i(base); i.has_next(); i.next()) { PointsToNode* f = i.get(); @@ -1692,7 +1692,7 @@ int ConnectionGraph::find_field_value(FieldNode* field) { if (base->ideal_node()->is_Allocate()) { return 0; } - assert(base == null_obj, "only NULL ptr base expected here"); + assert(base == null_obj, "only null ptr base expected here"); } } if (add_edge(field, phantom_obj)) { @@ -1715,7 +1715,7 @@ int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) { } assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); #ifdef ASSERT - if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { + if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) { const char* name = alloc->as_CallStaticJava()->_name; assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); } @@ -1749,8 +1749,8 @@ int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* int new_edges = 0; // Check if an oop field's initializing value is recorded and add - // a corresponding NULL if field's value if it is not recorded. - // Connection Graph does not record a default initialization by NULL + // a corresponding null if field's value if it is not recorded. + // Connection Graph does not record a default initialization by null // captured by Initialize node. // for (EdgeIterator i(pta); i.has_next(); i.next()) { @@ -1762,7 +1762,7 @@ int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* if (offset == Type::OffsetBot) { if (!visited_bottom_offset) { // OffsetBot is used to reference array's element, - // always add reference to NULL to all Field nodes since we don't + // always add reference to null to all Field nodes since we don't // known which element is referenced. if (add_edge(field, null_obj)) { // New edge was added @@ -1785,23 +1785,23 @@ int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* } if (!offsets_worklist.contains(offset)) { offsets_worklist.append(offset); - Node* value = NULL; - if (ini != NULL) { + Node* value = nullptr; + if (ini != nullptr) { // StoreP::memory_type() == T_ADDRESS BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); // Make sure initializing store has the same type as this AddP. // This AddP may reference non existing field because it is on a // dead branch of bimorphic call which is not eliminated yet. - if (store != NULL && store->is_Store() && + if (store != nullptr && store->is_Store() && store->as_Store()->memory_type() == ft) { value = store->in(MemNode::ValueIn); #ifdef ASSERT if (VerifyConnectionGraph) { // Verify that AddP already points to all objects the value points to. PointsToNode* val = ptnode_adr(value->_idx); - assert((val != NULL), "should be processed already"); - PointsToNode* missed_obj = NULL; + assert((val != nullptr), "should be processed already"); + PointsToNode* missed_obj = nullptr; if (val->is_JavaObject()) { if (!field->points_to(val->as_JavaObject())) { missed_obj = val; @@ -1823,7 +1823,7 @@ int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* } } } - if (missed_obj != NULL) { + if (missed_obj != nullptr) { tty->print_cr("----------field---------------------------------"); field->dump(); tty->print_cr("----------missed referernce to object-----------"); @@ -1841,12 +1841,12 @@ int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseTransform* // by Initialize node. // // Need to check for dependent loads to separate such stores from - // stores which follow loads. For now, add initial value NULL so + // stores which follow loads. For now, add initial value null so // that compare pointers optimization works correctly. } } - if (value == NULL) { - // A field's initializing value was not recorded. Add NULL. + if (value == nullptr) { + // A field's initializing value was not recorded. Add null. if (add_edge(field, null_obj)) { // New edge was added new_edges++; @@ -2160,7 +2160,7 @@ const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); // Check simple cases first. - if (jobj1 != NULL) { + if (jobj1 != nullptr) { if (jobj1->escape_state() == PointsToNode::NoEscape) { if (jobj1 == jobj2) { // Comparing the same not escaping object. @@ -2174,7 +2174,7 @@ const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { } } } - if (jobj2 != NULL) { + if (jobj2 != nullptr) { if (jobj2->escape_state() == PointsToNode::NoEscape) { Node* obj = jobj2->ideal_node(); // Comparing not escaping allocation. @@ -2184,8 +2184,8 @@ const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { } } } - if (jobj1 != NULL && jobj1 != phantom_obj && - jobj2 != NULL && jobj2 != phantom_obj && + if (jobj1 != nullptr && jobj1 != phantom_obj && + jobj2 != nullptr && jobj2 != phantom_obj && jobj1->ideal_node()->is_Con() && jobj2->ideal_node()->is_Con()) { // Klass or String constants compare. Need to be careful with @@ -2234,7 +2234,7 @@ const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* n) { void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { PointsToNode* ptadr = _nodes.at(n->_idx); - if (ptadr != NULL) { + if (ptadr != nullptr) { assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); return; } @@ -2245,7 +2245,7 @@ void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { PointsToNode* ptadr = _nodes.at(n->_idx); - if (ptadr != NULL) { + if (ptadr != nullptr) { assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); return; } @@ -2256,7 +2256,7 @@ void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { PointsToNode* ptadr = _nodes.at(n->_idx); - if (ptadr != NULL) { + if (ptadr != nullptr) { assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); return; } @@ -2273,9 +2273,9 @@ void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offse void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, PointsToNode* src, PointsToNode* dst) { assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); - assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); + assert((src != null_obj) && (dst != null_obj), "not for ConP null"); PointsToNode* ptadr = _nodes.at(n->_idx); - if (ptadr != NULL) { + if (ptadr != nullptr) { assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); return; } @@ -2297,16 +2297,16 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { // Check only oop fields. if (!adr_type->isa_aryptr() || adr_type->isa_aryptr()->elem() == Type::BOTTOM || - adr_type->isa_aryptr()->elem()->make_oopptr() != NULL) { + adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) { // OffsetBot is used to reference array's element. Ignore first AddP. - if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { + if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) { bt = T_OBJECT; } } } else if (offset != oopDesc::klass_offset_in_bytes()) { if (adr_type->isa_instptr()) { ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); - if (field != NULL) { + if (field != nullptr) { bt = field->layout_type(); } else { // Check for unsafe oop field access @@ -2321,7 +2321,7 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { } else if (adr_type->isa_aryptr()) { if (offset == arrayOopDesc::length_offset_in_bytes()) { // Ignore array length load. - } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { + } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) { // Ignore first AddP. } else { const Type* elemtype = adr_type->isa_aryptr()->elem(); @@ -2341,31 +2341,31 @@ bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { return (is_reference_type(bt) || bt == T_NARROWOOP); } -// Returns unique pointed java object or NULL. +// Returns unique pointed java object or null. JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { assert(!_collecting, "should not call when constructed graph"); // If the node was created after the escape computation we can't answer. uint idx = n->_idx; if (idx >= nodes_size()) { - return NULL; + return nullptr; } PointsToNode* ptn = ptnode_adr(idx); - if (ptn == NULL) { - return NULL; + if (ptn == nullptr) { + return nullptr; } if (ptn->is_JavaObject()) { return ptn->as_JavaObject(); } assert(ptn->is_LocalVar(), "sanity"); // Check all java objects it points to. - JavaObjectNode* jobj = NULL; + JavaObjectNode* jobj = nullptr; for (EdgeIterator i(ptn); i.has_next(); i.next()) { PointsToNode* e = i.get(); if (e->is_JavaObject()) { - if (jobj == NULL) { + if (jobj == nullptr) { jobj = e->as_JavaObject(); } else if (jobj != e) { - return NULL; + return nullptr; } } } @@ -2406,7 +2406,7 @@ bool ConnectionGraph::not_global_escape(Node *n) { return false; } PointsToNode* ptn = ptnode_adr(idx); - if (ptn == NULL) { + if (ptn == nullptr) { return false; // not in congraph (e.g. ConI) } PointsToNode::EscapeState es = ptn->escape_state(); @@ -2480,7 +2480,7 @@ bool FieldNode::has_base(JavaObjectNode* jobj) const { bool ConnectionGraph::is_captured_store_address(Node* addp) { // Handle simple case first. - assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access"); + assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access"); if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { return true; } else if (addp->in(AddPNode::Address)->is_Phi()) { @@ -2500,7 +2500,7 @@ bool ConnectionGraph::is_captured_store_address(Node* addp) { int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { const Type *adr_type = phase->type(adr); - if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) { + if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) { // We are computing a raw address for a store captured by an Initialize // compute an appropriate address type. AddP cases #3 and #5 (see below). int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); @@ -2510,7 +2510,7 @@ int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { return offs; } const TypePtr *t_ptr = adr_type->isa_ptr(); - assert(t_ptr != NULL, "must be a pointer type"); + assert(t_ptr != nullptr, "must be a pointer type"); return t_ptr->offset(); } @@ -2602,7 +2602,7 @@ Node* ConnectionGraph::get_addp_base(Node *addp) { int opcode = uncast_base->Opcode(); assert(opcode == Op_ConP || opcode == Op_ThreadLocal || opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || - (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || + (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) || is_captured_store_address(addp), "sanity"); } } @@ -2646,7 +2646,7 @@ Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { // return addp2; } - return NULL; + return nullptr; } // @@ -2656,9 +2656,9 @@ Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { bool ConnectionGraph::split_AddP(Node *addp, Node *base) { PhaseGVN* igvn = _igvn; const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); - assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); + assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr"); const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); - if (t == NULL) { + if (t == nullptr) { // We are computing a raw address for a store captured by an Initialize // compute an appropriate address type (cases #3 and #5). assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); @@ -2716,7 +2716,7 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base) { // AddP case #4 (adr is array's element offset AddP node) #ifdef ASSERT const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); - assert(adr->is_AddP() && atype != NULL && + assert(adr->is_AddP() && atype != nullptr && atype->instance_id() == inst_id, "array's element offset should be processed first"); #endif } @@ -2744,12 +2744,12 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro } // Have we recently created a Phi for this alias index? PhiNode *result = get_map_phi(orig_phi->_idx); - if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { + if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) { return result; } // Previous check may fail when the same wide memory Phi was split into Phis // for different memory slices. Search all Phis for this region. - if (result != NULL) { + if (result != nullptr) { Node* region = orig_phi->in(0); for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { Node* phi = region->fast_out(i); @@ -2767,11 +2767,11 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro // to the Compile object, and the C2Compiler will see it and retry. C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); } - return NULL; + return nullptr; } orig_phi_worklist.append_if_missing(orig_phi); const TypePtr *atype = C->get_adr_type(alias_idx); - result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); + result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype); C->copy_node_notes_to(result, orig_phi); igvn->set_type(result, result->bottom_type()); record_for_optimizer(result); @@ -2801,7 +2801,7 @@ PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, Gro while(!finished) { while (idx < phi->req()) { Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); - if (mem != NULL && mem->is_Phi()) { + if (mem != nullptr && mem->is_Phi()) { PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); if (new_phi_created) { // found an phi for which we created a new split, push current one on worklist and begin @@ -2817,20 +2817,20 @@ PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, Gro } } if (C->failing()) { - return NULL; + return nullptr; } result->set_req(idx++, mem); } #ifdef ASSERT // verify that the new Phi has an input for each input of the original assert( phi->req() == result->req(), "must have same number of inputs."); - assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); + assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match"); #endif // Check if all new phi's inputs have specified alias index. // Otherwise use old phi. for (uint i = 1; i < phi->req(); i++) { Node* in = result->in(i); - assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); + assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond."); } // we have finished processing a Phi, see if there are any more to do finished = (phi_list.length() == 0 ); @@ -2870,7 +2870,7 @@ void ConnectionGraph::move_inst_mem(Node* n, GrowableArray &orig_phi Compile* C = _compile; PhaseGVN* igvn = _igvn; const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); - assert(tp != NULL, "ptr type"); + assert(tp != nullptr, "ptr type"); int alias_idx = C->get_alias_index(tp); int general_idx = C->get_general_index(alias_idx); @@ -2899,7 +2899,7 @@ void ConnectionGraph::move_inst_mem(Node* n, GrowableArray &orig_phi continue; } tp = use->as_MemBar()->adr_type()->isa_ptr(); - if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || + if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) || alias_idx == general_idx) { continue; // Nothing to do } @@ -2920,14 +2920,14 @@ void ConnectionGraph::move_inst_mem(Node* n, GrowableArray &orig_phi } // Memory nodes should have new memory input. tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); - assert(tp != NULL, "ptr type"); + assert(tp != nullptr, "ptr type"); int idx = C->get_alias_index(tp); - assert(get_map(use->_idx) != NULL || idx == alias_idx, + assert(get_map(use->_idx) != nullptr || idx == alias_idx, "Following memory nodes should have new memory input or be on the same memory slice"); } else if (use->is_Phi()) { // Phi nodes should be split and moved already. tp = use->as_Phi()->adr_type()->isa_ptr(); - assert(tp != NULL, "ptr type"); + assert(tp != nullptr, "ptr type"); int idx = C->get_alias_index(tp); assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); } else { @@ -2943,15 +2943,15 @@ void ConnectionGraph::move_inst_mem(Node* n, GrowableArray &orig_phi // is the specified alias index. // Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray &orig_phis) { - if (orig_mem == NULL) { + if (orig_mem == nullptr) { return orig_mem; } Compile* C = _compile; PhaseGVN* igvn = _igvn; const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); - bool is_instance = (toop != NULL) && toop->is_known_instance(); + bool is_instance = (toop != nullptr) && toop->is_known_instance(); Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); - Node *prev = NULL; + Node *prev = nullptr; Node *result = orig_mem; while (prev != result) { prev = result; @@ -2963,12 +2963,12 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra if (at == Type::TOP) { break; // Dead } - assert (at->isa_ptr() != NULL, "pointer type required."); + assert (at->isa_ptr() != nullptr, "pointer type required."); int idx = C->get_alias_index(at->is_ptr()); if (idx == alias_idx) { break; // Found } - if (!is_instance && (at->isa_oopptr() == NULL || + if (!is_instance && (at->isa_oopptr() == nullptr || !at->is_oopptr()->is_known_instance())) { break; // Do not skip store to general memory slice. } @@ -2992,7 +2992,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra AllocateNode* alloc = proj_in->as_Initialize()->allocation(); // Stop if this is the initialization for the object instance which // which contains this memory slice, otherwise skip over it. - if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { + if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) { result = proj_in->in(TypeFunc::Memory); } } else if (proj_in->is_MemBar()) { @@ -3018,14 +3018,14 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra result = mmem->memory_at(C->get_general_index(alias_idx)); result = find_inst_mem(result, alias_idx, orig_phis); if (C->failing()) { - return NULL; + return nullptr; } mmem->set_memory_at(alias_idx, result); } } else if (result->is_Phi() && C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { Node *un = result->as_Phi()->unique_input(igvn); - if (un != NULL) { + if (un != nullptr) { orig_phis.append_if_missing(result->as_Phi()); result = un; } else { @@ -3040,7 +3040,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra // Otherwise skip it (the call updated 'result' value). } else if (result->Opcode() == Op_SCMemProj) { Node* mem = result->in(0); - Node* adr = NULL; + Node* adr = nullptr; if (mem->is_LoadStore()) { adr = mem->in(MemNode::Address); } else { @@ -3050,7 +3050,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra } const Type *at = igvn->type(adr); if (at != Type::TOP) { - assert(at->isa_ptr() != NULL, "pointer type required."); + assert(at->isa_ptr() != nullptr, "pointer type required."); int idx = C->get_alias_index(at->is_ptr()); if (idx == alias_idx) { // Assert in debug mode @@ -3063,7 +3063,7 @@ Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArra Node* adr = result->in(3); // Memory edge corresponds to destination array const Type *at = igvn->type(adr); if (at != Type::TOP) { - assert(at->isa_ptr() != NULL, "pointer type required."); + assert(at->isa_ptr() != nullptr, "pointer type required."); int idx = C->get_alias_index(at->is_ptr()); if (idx == alias_idx) { // Assert in debug mode @@ -3213,7 +3213,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } // Find CheckCastPP for the allocate or for the return value of a call n = alloc->result_cast(); - if (n == NULL) { // No uses except Initialize node + if (n == nullptr) { // No uses except Initialize node if (alloc->is_Allocate()) { // Set the scalar_replaceable flag for allocation // so it could be eliminated if it has no uses. @@ -3238,7 +3238,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL && (alloc->is_AllocateArray() || igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) { - Node *cast2 = NULL; + Node *cast2 = nullptr; for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node *use = n->fast_out(i); if (use->is_CheckCastPP()) { @@ -3246,7 +3246,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, break; } } - if (cast2 != NULL) { + if (cast2 != nullptr) { n = cast2; } else { // Non-scalar replaceable if the allocation type is unknown statically @@ -3257,7 +3257,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); - if (t == NULL) { + if (t == nullptr) { continue; // not a TypeOopPtr } if (!t->klass_is_exact()) { @@ -3301,7 +3301,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, "only AddP nodes are Field edges in CG"); if (use->outcnt() > 0) { // Don't process dead nodes Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); - if (addp2 != NULL) { + if (addp2 != nullptr) { assert(alloc->is_AllocateArray(),"array allocation was expected"); alloc_worklist.append_if_missing(addp2); } @@ -3313,12 +3313,12 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, // the users of the raw allocation result and push AddP users // on alloc_worklist. Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); - assert (raw_result != NULL, "must have an allocation result"); + assert (raw_result != nullptr, "must have an allocation result"); for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { Node *use = raw_result->fast_out(i); if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes Node* addp2 = find_second_addp(use, raw_result); - if (addp2 != NULL) { + if (addp2 != nullptr) { assert(alloc->is_AllocateArray(),"array allocation was expected"); alloc_worklist.append_if_missing(addp2); } @@ -3330,11 +3330,11 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } } else if (n->is_AddP()) { JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); - if (jobj == NULL || jobj == phantom_obj) { + if (jobj == nullptr || jobj == phantom_obj) { #ifdef ASSERT ptnode_adr(get_addp_base(n)->_idx)->dump(); ptnode_adr(n->_idx)->dump(); - assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); + assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); #endif _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); return; @@ -3351,10 +3351,10 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, continue; // already processed } JavaObjectNode* jobj = unique_java_object(n); - if (jobj == NULL || jobj == phantom_obj) { + if (jobj == nullptr || jobj == phantom_obj) { #ifdef ASSERT ptnode_adr(n->_idx)->dump(); - assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); + assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation"); #endif _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis()); return; @@ -3362,7 +3362,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, Node *val = get_map(jobj->idx()); // CheckCastPP node TypeNode *tn = n->as_Type(); const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); - assert(tinst != NULL && tinst->is_known_instance() && + assert(tinst != nullptr && tinst->is_known_instance() && tinst->instance_id() == jobj->idx() , "instance type expected."); const Type *tn_type = igvn->type(tn); @@ -3372,7 +3372,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } else { tn_t = tn_type->isa_oopptr(); } - if (tn_t != NULL && tinst->maybe_java_subtype_of(tn_t)) { + if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) { if (tn_type->isa_narrowoop()) { tn_type = tinst->make_narrowoop(); } else { @@ -3385,7 +3385,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, record_for_optimizer(n); } else { assert(tn_type == TypePtr::NULL_PTR || - tn_t != NULL && !tinst->maybe_java_subtype_of(tn_t), + tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t), "unexpected type"); continue; // Skip dead path with different type } @@ -3407,7 +3407,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes Node* addp2 = find_second_addp(use, n); - if (addp2 != NULL) { + if (addp2 != nullptr) { alloc_worklist.append_if_missing(addp2); } alloc_worklist.append_if_missing(use); @@ -3469,9 +3469,9 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, dest = get_addp_base(dest); } JavaObjectNode* jobj = unique_java_object(dest); - if (jobj != NULL) { + if (jobj != nullptr) { Node *base = get_map(jobj->idx()); - if (base != NULL) { + if (base != nullptr) { const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); ac->_dest_type = base_t; } @@ -3481,9 +3481,9 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, src = get_addp_base(src); } jobj = unique_java_object(src); - if (jobj != NULL) { + if (jobj != nullptr) { Node* base = get_map(jobj->idx()); - if (base != NULL) { + if (base != nullptr) { const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); ac->_src_type = base_t; } @@ -3509,14 +3509,14 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } else if (n->is_MemBar()) { // Initialize, MemBar nodes // we don't need to do anything, but the users must be pushed n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); - if (n == NULL) { + if (n == nullptr) { continue; } } else if (n->Opcode() == Op_StrCompressedCopy || n->Opcode() == Op_EncodeISOArray) { // get the memory projection n = n->find_out_with(Op_SCMemProj); - assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); + assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); } else { assert(n->is_Mem(), "memory node required."); Node *addr = n->in(MemNode::Address); @@ -3524,7 +3524,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, if (addr_t == Type::TOP) { continue; } - assert (addr_t->isa_ptr() != NULL, "pointer type required."); + assert (addr_t->isa_ptr() != nullptr, "pointer type required."); int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); assert ((uint)alias_idx < new_index_end, "wrong alias index"); Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); @@ -3541,7 +3541,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } else if (n->is_LoadStore()) { // get the memory projection n = n->find_out_with(Op_SCMemProj); - assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); + assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required"); } } // push user on appropriate worklist @@ -3602,8 +3602,8 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, uint nslices = MIN2(nmm->req(), new_index_start); for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { Node* mem = nmm->in(i); - Node* cur = NULL; - if (mem == NULL || mem->is_top()) { + Node* cur = nullptr; + if (mem == nullptr || mem->is_top()) { continue; } // First, update mergemem by moving memory nodes to corresponding slices @@ -3611,10 +3611,10 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, while (mem->is_Mem()) { const Type *at = igvn->type(mem->in(MemNode::Address)); if (at != Type::TOP) { - assert (at->isa_ptr() != NULL, "pointer type required."); + assert (at->isa_ptr() != nullptr, "pointer type required."); uint idx = (uint)_compile->get_alias_index(at->is_ptr()); if (idx == i) { - if (cur == NULL) { + if (cur == nullptr) { cur = mem; } } else { @@ -3625,7 +3625,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } mem = mem->in(MemNode::Memory); } - nmm->set_memory_at(i, (cur != NULL) ? cur : mem); + nmm->set_memory_at(i, (cur != nullptr) ? cur : mem); // Find any instance of the current type if we haven't encountered // already a memory slice of the instance along the memory chain. for (uint ni = new_index_start; ni < new_index_end; ni++) { @@ -3695,7 +3695,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, for (uint i = 0; i < ideal_nodes.size(); i++) { Node* n = ideal_nodes.at(i); Node* nmem = get_map(n->_idx); - assert(nmem != NULL, "sanity"); + assert(nmem != nullptr, "sanity"); if (n->is_Mem()) { #if 0 // ifdef ASSERT Node* old_mem = n->in(MemNode::Memory); @@ -3795,7 +3795,7 @@ void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); } out->print(" ]] "); - if (_node == NULL) { + if (_node == nullptr) { out->print("%s", newline ? "\n" : ""); } else { _node->dump(newline ? "\n" : "", false, out); @@ -3807,7 +3807,7 @@ void ConnectionGraph::dump(GrowableArray& ptnodes_worklist) { int ptnodes_length = ptnodes_worklist.length(); for (int i = 0; i < ptnodes_length; i++) { PointsToNode *ptn = ptnodes_worklist.at(i); - if (ptn == NULL || !ptn->is_JavaObject()) { + if (ptn == nullptr || !ptn->is_JavaObject()) { continue; } PointsToNode::EscapeState es = ptn->escape_state(); diff --git a/src/hotspot/share/opto/escape.hpp b/src/hotspot/share/opto/escape.hpp index 27ab19f9b42..a334a1d9ee0 100644 --- a/src/hotspot/share/opto/escape.hpp +++ b/src/hotspot/share/opto/escape.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -295,7 +295,7 @@ public: inline PointsToIterator(const PointsToNode* n, int cnt) : node(n), cnt(cnt), i(0) { } inline bool has_next() const { return i < cnt; } inline void next() { i++; } - PointsToNode* get() const { ShouldNotCallThis(); return NULL; } + PointsToNode* get() const { ShouldNotCallThis(); return nullptr; } }; class EdgeIterator: public PointsToIterator { @@ -432,7 +432,7 @@ private: // Set the escape state of an object and its fields. void set_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc NOT_PRODUCT(COMMA const char* reason)) { - // Don't change non-escaping state of NULL pointer. + // Don't change non-escaping state of null pointer. if (ptn != null_obj) { if (ptn->escape_state() < esc) { NOT_PRODUCT(trace_es_update_helper(ptn, esc, false, reason)); @@ -446,7 +446,7 @@ private: } void set_fields_escape_state(PointsToNode* ptn, PointsToNode::EscapeState esc NOT_PRODUCT(COMMA const char* reason)) { - // Don't change non-escaping state of NULL pointer. + // Don't change non-escaping state of null pointer. if (ptn != null_obj) { if (ptn->fields_escape_state() < esc) { NOT_PRODUCT(trace_es_update_helper(ptn, esc, true, reason)); @@ -472,7 +472,7 @@ private: // Optimize objects compare. const TypeInt* optimize_ptr_compare(Node* n); - // Returns unique corresponding java object or NULL. + // Returns unique corresponding java object or null. JavaObjectNode* unique_java_object(Node *n); // Add an edge of the specified type pointing to the specified target. @@ -510,7 +510,7 @@ private: if (is_new) { // New edge? assert(!_verify, "graph is incomplete"); if (to == null_obj) { - return is_new; // Don't add fields to NULL pointer. + return is_new; // Don't add fields to null pointer. } if (to->is_JavaObject()) { is_new = to->add_edge(from); @@ -564,7 +564,7 @@ private: PhiNode* get_map_phi(int idx) { Node* phi = _node_map[idx]; - return (phi == NULL) ? NULL : phi->as_Phi(); + return (phi == nullptr) ? nullptr : phi->as_Phi(); } // Returns true if there is an object in the scope of sfn that does not escape globally. @@ -617,21 +617,21 @@ public: void add_local_var_and_edge(Node* n, PointsToNode::EscapeState es, Node* to, Unique_Node_List *delayed_worklist) { PointsToNode* ptn = ptnode_adr(to->_idx); - if (delayed_worklist != NULL) { // First iteration of CG construction + if (delayed_worklist != nullptr) { // First iteration of CG construction add_local_var(n, es); - if (ptn == NULL) { + if (ptn == nullptr) { delayed_worklist->push(n); return; // Process it later. } } else { - assert(ptn != NULL, "node should be registered"); + assert(ptn != nullptr, "node should be registered"); } add_edge(ptnode_adr(n->_idx), ptn); } // Map ideal node to existing PointsTo node (usually phantom_object). void map_ideal_node(Node *n, PointsToNode* ptn) { - assert(ptn != NULL, "only existing PointsTo node"); + assert(ptn != nullptr, "only existing PointsTo node"); _nodes.at_put(n->_idx, ptn); } @@ -649,8 +649,8 @@ public: }; inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, NodeType type): - _edges(CG->_compile->comp_arena(), 2, 0, NULL), - _uses (CG->_compile->comp_arena(), 2, 0, NULL), + _edges(CG->_compile->comp_arena(), 2, 0, nullptr), + _uses (CG->_compile->comp_arena(), 2, 0, nullptr), _type((u1)type), _flags(ScalarReplaceable), _escape((u1)es), @@ -658,12 +658,12 @@ inline PointsToNode::PointsToNode(ConnectionGraph *CG, Node* n, EscapeState es, _node(n), _idx(n->_idx), _pidx(CG->next_pidx()) { - assert(n != NULL && es != UnknownEscape, "sanity"); + assert(n != nullptr && es != UnknownEscape, "sanity"); } inline FieldNode::FieldNode(ConnectionGraph *CG, Node* n, EscapeState es, int offs, bool is_oop): PointsToNode(CG, n, es, Field), - _bases(CG->_compile->comp_arena(), 2, 0, NULL), + _bases(CG->_compile->comp_arena(), 2, 0, nullptr), _offset(offs), _is_oop(is_oop), _has_unknown_base(false) { } diff --git a/src/hotspot/share/opto/gcm.cpp b/src/hotspot/share/opto/gcm.cpp index 3654b9ff768..95bb4f11472 100644 --- a/src/hotspot/share/opto/gcm.cpp +++ b/src/hotspot/share/opto/gcm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { if (use->is_Proj()) { Block* buse = get_block_for_node(use); if (buse != b) { // In wrong block? - if (buse != NULL) { + if (buse != nullptr) { buse->find_remove(use); // Remove from wrong block } map_node_to_block(use, b); @@ -77,9 +77,9 @@ void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { // the projection will be in a predecessor block. void PhaseCFG::replace_block_proj_ctrl( Node *n ) { const Node *in0 = n->in(0); - assert(in0 != NULL, "Only control-dependent"); + assert(in0 != nullptr, "Only control-dependent"); const Node *p = in0->is_block_proj(); - if (p != NULL && p != n) { // Control from a block projection? + if (p != nullptr && p != n) { // Control from a block projection? assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); // Find trailing Region Block *pb = get_block_for_node(in0); // Block-projection already has basic block @@ -109,7 +109,7 @@ bool PhaseCFG::is_dominator(Node* dom_node, Node* node) { } Block* d = find_block_for_node(dom_node); Block* n = find_block_for_node(node); - assert(n != NULL && d != NULL, "blocks must exist"); + assert(n != nullptr && d != nullptr, "blocks must exist"); if (d == n) { if (dom_node->is_block_start()) { @@ -212,15 +212,15 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { // removed in final_graph_reshaping), fix the control of the // node to cover the precedence edges and remove the // dependencies. - Node* n = NULL; + Node* n = nullptr; for (uint i = node->len()-1; i >= node->req(); i--) { Node* m = node->in(i); - if (m == NULL) continue; + if (m == nullptr) continue; // Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block if (is_CFG(m)) { node->rm_prec(i); - if (n == NULL) { + if (n == nullptr) { n = m; } else { assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other"); @@ -231,7 +231,7 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node"); } } - if (n != NULL) { + if (n != nullptr) { assert(node->in(0), "control should have been set"); assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other"); if (!is_dominator(n, node->in(0))) { @@ -239,9 +239,9 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { } } - // process all inputs that are non NULL + // process all inputs that are non null for (int i = node->req()-1; i >= 0; --i) { - if (node->in(i) != NULL) { + if (node->in(i) != nullptr) { spstack.push(node->in(i)); } } @@ -254,10 +254,10 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { // Check this by by seeing that it is dominated by b1, the deepest // input observed until b2. static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { - if (b1 == NULL) return; + if (b1 == nullptr) return; assert(b1->_dom_depth < b2->_dom_depth, "sanity"); Block* tmp = b2; - while (tmp != b1 && tmp != NULL) { + while (tmp != b1 && tmp != nullptr) { tmp = tmp->_idom; } if (tmp != b1) { @@ -265,7 +265,7 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { tty->print_cr("!!! Unschedulable graph !!!"); for (uint j=0; jlen(); j++) { // For all inputs Node* inn = n->in(j); // Get input - if (inn == NULL) continue; // Ignore NULL, missing inputs + if (inn == nullptr) continue; // Ignore null, missing inputs Block* inb = cfg->get_block_for_node(inn); tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); @@ -280,13 +280,13 @@ static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { // Find the last input dominated by all other inputs. - Block* deepb = NULL; // Deepest block so far + Block* deepb = nullptr; // Deepest block so far int deepb_dom_depth = 0; for (uint k = 0; k < n->len(); k++) { // For all inputs Node* inn = n->in(k); // Get input - if (inn == NULL) continue; // Ignore NULL, missing inputs + if (inn == nullptr) continue; // Ignore null, missing inputs Block* inb = cfg->get_block_for_node(inn); - assert(inb != NULL, "must already have scheduled this input"); + assert(inb != nullptr, "must already have scheduled this input"); if (deepb_dom_depth < (int) inb->_dom_depth) { // The new inb must be dominated by the previous deepb. // The various inputs must be linearly ordered in the dom @@ -296,7 +296,7 @@ static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { deepb_dom_depth = deepb->_dom_depth; } } - assert(deepb != NULL, "must be at least one input to n"); + assert(deepb != nullptr, "must be at least one input to n"); return deepb; } @@ -325,7 +325,7 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { // to root and nodes that use is_block_proj() nodes should be attached // to the region that starts their block. const Node* control_input = parent_node->in(0); - if (control_input != NULL) { + if (control_input != nullptr) { replace_block_proj_ctrl(parent_node); } else { // Is a constant with NO inputs? @@ -345,7 +345,7 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { while (input_index < parent_node->len()) { Node* in = parent_node->in(input_index++); - if (in == NULL) { + if (in == nullptr) { continue; } @@ -401,10 +401,10 @@ bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) { //------------------------------dom_lca---------------------------------------- // Find least common ancestor in dominator tree // LCA is a current notion of LCA, to be raised above 'this'. -// As a convenient boundary condition, return 'this' if LCA is NULL. +// As a convenient boundary condition, return 'this' if LCA is null. // Find the LCA of those two nodes. Block* Block::dom_lca(Block* LCA) { - if (LCA == NULL || LCA == this) return this; + if (LCA == nullptr || LCA == this) return this; Block* anc = this; while (anc->_dom_depth > LCA->_dom_depth) @@ -428,7 +428,7 @@ Block* Block::dom_lca(Block* LCA) { // the LCA only with the phi input paths which actually use this def. static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { Block* buse = cfg->get_block_for_node(use); - if (buse == NULL) return LCA; // Unused killing Projs have no use block + if (buse == nullptr) return LCA; // Unused killing Projs have no use block if (!use->is_Phi()) return buse->dom_lca(LCA); uint pmax = use->req(); // Number of Phi inputs // Why does not this loop just break after finding the matching input to @@ -507,9 +507,9 @@ static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) Node* mem_inputs[4]; int mem_inputs_length = 0; - if (base != NULL) mem_inputs[mem_inputs_length++] = base; - if (index != NULL) mem_inputs[mem_inputs_length++] = index; - if (store != NULL) mem_inputs[mem_inputs_length++] = store; + if (base != nullptr) mem_inputs[mem_inputs_length++] = base; + if (index != nullptr) mem_inputs[mem_inputs_length++] = index; + if (store != nullptr) mem_inputs[mem_inputs_length++] = store; // In the comparison below, add one to account for the control input, // which may be null, but always takes up a spot in the in array. @@ -519,9 +519,9 @@ static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) // from the early block of only the address portion of the instruction, // and ignore other blocks that may have factored into the wider // schedule_early calculation. - if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); + if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0); - Block* deepb = NULL; // Deepest block so far + Block* deepb = nullptr; // Deepest block so far int deepb_dom_depth = 0; for (int i = 0; i < mem_inputs_length; i++) { Block* inb = cfg->get_block_for_node(mem_inputs[i]); @@ -554,9 +554,9 @@ bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) { Node* end = store_block->end(); if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) { Node* if_true = end->find_out_with(Op_IfTrue); - assert(if_true != NULL, "null check without null projection"); + assert(if_true != nullptr, "null check without null projection"); Node* null_block_region = if_true->find_out_with(Op_Region); - assert(null_block_region != NULL, "null check without null region"); + assert(null_block_region != nullptr, "null check without null region"); return get_block_for_node(null_block_region) == load_block; } return false; @@ -580,7 +580,7 @@ bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) { // above the LCA, if it is not the early block. Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { assert(load->needs_anti_dependence_check(), "must be a load of some sort"); - assert(LCA != NULL, ""); + assert(LCA != nullptr, ""); DEBUG_ONLY(Block* LCA_orig = LCA); // Compute the alias index. Loads and stores with different alias indices @@ -650,7 +650,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { Node* initial_mem = load->in(MemNode::Memory); worklist_store.push(initial_mem); worklist_visited.push(initial_mem); - worklist_mem.push(NULL); + worklist_mem.push(nullptr); while (worklist_store.size() > 0) { // Examine a nearby store to see if it might interfere with our load. Node* mem = worklist_mem.pop(); @@ -665,7 +665,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { ) { mem = store; // It's not a possibly interfering store. if (store == initial_mem) - initial_mem = NULL; // only process initial memory once + initial_mem = nullptr; // only process initial memory once for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { store = mem->fast_out(i); @@ -708,7 +708,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { MachSafePointNode* ms = (MachSafePointNode*) mstore; assert(ms->is_MachCallJava(), ""); MachCallJavaNode* mcj = (MachCallJavaNode*) ms; - if (mcj->_method == NULL) { + if (mcj->_method == nullptr) { // These runtime calls do not write to Java visible memory // (other than Raw) and so do not require anti-dependence edges. continue; @@ -737,7 +737,7 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { // earliest legal block for 'load'. In the latter case, // immediately insert an anti-dependence edge. Block* store_block = get_block_for_node(store); - assert(store_block != NULL, "unused killing projections skipped above"); + assert(store_block != nullptr, "unused killing projections skipped above"); if (store->is_Phi()) { // Loop-phis need to raise load before input. (Other phis are treated @@ -887,9 +887,9 @@ Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, // Iterator for the Node_Backward_Iterator Node *Node_Backward_Iterator::next() { - // If the _stack is empty, then just return NULL: finished. + // If the _stack is empty, then just return null: finished. if ( !_stack.size() ) - return NULL; + return nullptr; // I visit unvisited not-anti-dependence users first, then anti-dependent // children next. I iterate backwards to support removal of nodes. @@ -911,7 +911,7 @@ Node *Node_Backward_Iterator::next() { uint src_rpo = _cfg.get_block_for_node(src)->_rpo; // Schedule all nodes in a post-order visit - Node *unvisited = NULL; // Unvisited anti-dependent Node, if any + Node *unvisited = nullptr; // Unvisited anti-dependent Node, if any // Scan for unvisited nodes while (idx > 0) { @@ -1180,7 +1180,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { // Do not hoist (to cover latency) instructions which target a // single register. Hoisting stretches the live range of the // single register and may force spilling. - MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; + MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr; if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) in_latency = true; @@ -1206,10 +1206,10 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { while (LCA != early) { LCA = LCA->_idom; // Follow up the dominator tree - if (LCA == NULL) { + if (LCA == nullptr) { // Bailout without retry assert(false, "graph should be schedulable"); - C->record_method_not_compilable("late schedule failed: LCA == NULL"); + C->record_method_not_compilable("late schedule failed: LCA is null"); return least; } @@ -1314,7 +1314,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { } #endif - MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; + MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr; if (mach) { switch (mach->ideal_Opcode()) { case Op_CreateEx: @@ -1326,7 +1326,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { // Don't move CheckCastPP nodes away from their input, if the input // is a rawptr (5071820). Node *def = self->in(1); - if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { + if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) { early->add_inst(self); #ifdef ASSERT _raw_oops.push(def); @@ -1384,20 +1384,20 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { } // Gather LCA of all uses - Block *LCA = NULL; + Block *LCA = nullptr; { for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { // For all uses, find LCA Node* use = self->fast_out(i); LCA = raise_LCA_above_use(LCA, use, self, this); } - guarantee(LCA != NULL, "There must be a LCA"); + guarantee(LCA != nullptr, "There must be a LCA"); } // (Hide defs of imax, i from rest of block.) // Place temps in the block of their use. This isn't a // requirement for correctness but it reduces useless // interference between temps and other nodes. - if (mach != NULL && mach->is_MachTemp()) { + if (mach != nullptr && mach->is_MachTemp()) { map_node_to_block(self, LCA); LCA->add_inst(self); continue; @@ -1432,7 +1432,7 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { while (LCA->_loop->depth() > early->_loop->depth()) { LCA = LCA->_idom; } - assert(LCA != NULL, "a valid LCA must exist"); + assert(LCA != nullptr, "a valid LCA must exist"); verify_memory_writer_placement(LCA, self); } @@ -1445,10 +1445,10 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) { // allocatable (hoisting can make a value live longer, leading to // anti and output dependency problems which are normally resolved // by the register allocator giving everyone a different register). - if (mach != NULL && must_clone[mach->ideal_Opcode()]) + if (mach != nullptr && must_clone[mach->ideal_Opcode()]) try_to_hoist = false; - Block* late = NULL; + Block* late = nullptr; if (try_to_hoist) { // Now find the block with the least execution frequency. // Start at the latest schedule and work up to the earliest schedule @@ -1528,8 +1528,8 @@ void PhaseCFG::global_code_motion() { } #endif - // Detect implicit-null-check opportunities. Basically, find NULL checks - // with suitable memory ops nearby. Use the memory op to do the NULL check. + // Detect implicit-null-check opportunities. Basically, find null checks + // with suitable memory ops nearby. Use the memory op to do the null check. // I can generate a memory op if there is not one nearby. if (C->is_method_compilation()) { // By reversing the loop direction we get a very minor gain on mpegaudio. @@ -1549,7 +1549,7 @@ void PhaseCFG::global_code_motion() { } bool block_size_threshold_ok = false; - intptr_t *recalc_pressure_nodes = NULL; + intptr_t *recalc_pressure_nodes = nullptr; if (OptoRegScheduling) { for (uint i = 0; i < number_of_blocks(); i++) { Block* block = get_block(i); @@ -1602,11 +1602,11 @@ void PhaseCFG::global_code_motion() { if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { C->record_method_not_compilable("local schedule failed"); } - _regalloc = NULL; + _regalloc = nullptr; return; } } - _regalloc = NULL; + _regalloc = nullptr; // If we inserted any instructions between a Call and his CatchNode, // clone the instructions on all paths below the Catch. @@ -1747,7 +1747,7 @@ CFGLoop* PhaseCFG::create_loop_tree() { for (uint i = 0; i < number_of_blocks(); i++) { Block* block = get_block(i); // Check that _loop field are clear...we could clear them if not. - assert(block->_loop == NULL, "clear _loop expected"); + assert(block->_loop == nullptr, "clear _loop expected"); // Sanity check that the RPO numbering is reflected in the _blocks array. // It doesn't have to be for the loop tree to be built, but if it is not, // then the blocks have been reordered since dom graph building...which @@ -1780,7 +1780,7 @@ CFGLoop* PhaseCFG::create_loop_tree() { assert(worklist.size() == 0, "nonempty worklist"); CFGLoop* nloop = new CFGLoop(idct++); - assert(loop_head->_loop == NULL, "just checking"); + assert(loop_head->_loop == nullptr, "just checking"); loop_head->_loop = nloop; // Add to nloop so push_pred() will skip over inner loops nloop->add_member(loop_head); @@ -1803,7 +1803,7 @@ CFGLoop* PhaseCFG::create_loop_tree() { for (uint i = 0; i < number_of_blocks(); i++) { Block* block = get_block(i); CFGLoop* lp = block->_loop; - if (lp == NULL) { + if (lp == nullptr) { // Not assigned to a loop. Add it to the method's pseudo loop. block->_loop = root_loop; lp = root_loop; @@ -1812,7 +1812,7 @@ CFGLoop* PhaseCFG::create_loop_tree() { lp->add_member(block); } if (lp != root_loop) { - if (lp->parent() == NULL) { + if (lp->parent() == nullptr) { // Not a nested loop. Make it a child of the method's pseudo loop. root_loop->add_nested_loop(lp); } @@ -1831,7 +1831,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) Node* pred_n = blk->pred(i); Block* pred = cfg->get_block_for_node(pred_n); CFGLoop *pred_loop = pred->_loop; - if (pred_loop == NULL) { + if (pred_loop == nullptr) { // Filter out blocks for non-single-entry loops. // For all reasonable loops, the head occurs before the tail in RPO. if (pred->_rpo > head()->_rpo) { @@ -1840,11 +1840,11 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) } } else if (pred_loop != this) { // Nested loop. - while (pred_loop->_parent != NULL && pred_loop->_parent != this) { + while (pred_loop->_parent != nullptr && pred_loop->_parent != this) { pred_loop = pred_loop->_parent; } // Make pred's loop be a child - if (pred_loop->_parent == NULL) { + if (pred_loop->_parent == nullptr) { add_nested_loop(pred_loop); // Continue with loop entry predecessor. Block* pred_head = pred_loop->head(); @@ -1852,7 +1852,7 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) assert(pred_head != head(), "loop head in only one loop"); push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); } else { - assert(pred_loop->_parent == this && _parent == NULL, "just checking"); + assert(pred_loop->_parent == this && _parent == nullptr, "just checking"); } } } @@ -1860,14 +1860,14 @@ void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) //------------------------------add_nested_loop-------------------------------- // Make cl a child of the current loop in the loop tree. void CFGLoop::add_nested_loop(CFGLoop* cl) { - assert(_parent == NULL, "no parent yet"); + assert(_parent == nullptr, "no parent yet"); assert(cl != this, "not my own parent"); cl->_parent = this; CFGLoop* ch = _child; - if (ch == NULL) { + if (ch == nullptr) { _child = cl; } else { - while (ch->_sibling != NULL) { ch = ch->_sibling; } + while (ch->_sibling != nullptr) { ch = ch->_sibling; } ch->_sibling = cl; } } @@ -1878,7 +1878,7 @@ void CFGLoop::add_nested_loop(CFGLoop* cl) { void CFGLoop::compute_loop_depth(int depth) { _depth = depth; CFGLoop* ch = _child; - while (ch != NULL) { + while (ch != nullptr) { ch->compute_loop_depth(depth + 1); ch = ch->_sibling; } @@ -1897,7 +1897,7 @@ void CFGLoop::compute_freq() { // Nested loops first CFGLoop* ch = _child; - while (ch != NULL) { + while (ch != nullptr) { ch->compute_freq(); ch = ch->_sibling; } @@ -2227,7 +2227,7 @@ void CFGLoop::scale_freq() { s->_freq = block_freq; } CFGLoop* ch = _child; - while (ch != NULL) { + while (ch != nullptr) { ch->scale_freq(); ch = ch->_sibling; } @@ -2235,7 +2235,7 @@ void CFGLoop::scale_freq() { // Frequency of outer loop double CFGLoop::outer_loop_freq() const { - if (_child != NULL) { + if (_child != nullptr) { return _child->_freq; } return _freq; @@ -2245,8 +2245,8 @@ double CFGLoop::outer_loop_freq() const { //------------------------------dump_tree-------------------------------------- void CFGLoop::dump_tree() const { dump(); - if (_child != NULL) _child->dump_tree(); - if (_sibling != NULL) _sibling->dump_tree(); + if (_child != nullptr) _child->dump_tree(); + if (_sibling != nullptr) _sibling->dump_tree(); } //------------------------------dump------------------------------------------- diff --git a/src/hotspot/share/opto/generateOptoStub.cpp b/src/hotspot/share/opto/generateOptoStub.cpp index c5dfd1eeb40..49a4d6998b5 100644 --- a/src/hotspot/share/opto/generateOptoStub.cpp +++ b/src/hotspot/share/opto/generateOptoStub.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,7 @@ void GraphKit::gen_stub(address C_function, // Drop in the last_Java_sp. last_Java_fp is not touched. // Always do this after the other "last_Java_frame" fields are set since - // as soon as last_Java_sp != NULL the has_last_Java_frame is true and + // as soon as last_Java_sp != nullptr the has_last_Java_frame is true and // users will look at the other fields. // Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset())); @@ -234,7 +234,7 @@ void GraphKit::gen_stub(address C_function, // Runtime call returning oop in TLS? Fetch it out if( pass_tls ) { Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset())); - Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); + Node* vm_result = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result // clear thread-local-storage(tls) store_to_memory(control(), adr, null(), T_ADDRESS, NoAlias, MemNode::unordered); @@ -243,7 +243,7 @@ void GraphKit::gen_stub(address C_function, //----------------------------- // check exception Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset())); - Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); + Node* pending = make_load(nullptr, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); Node* exit_memory = reset_memory(); @@ -254,7 +254,7 @@ void GraphKit::gen_stub(address C_function, Node* if_null = _gvn.transform( new IfFalseNode(iff) ); Node* if_not_null = _gvn.transform( new IfTrueNode(iff) ); - assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); + assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() )); Node *to_exc = new TailCallNode(if_not_null, i_o(), @@ -267,7 +267,7 @@ void GraphKit::gen_stub(address C_function, //----------------------------- // If this is a normal subroutine return, issue the return and be done. - Node *ret = NULL; + Node *ret = nullptr; switch( is_fancy_jump ) { case 0: // Make a return instruction // Return to caller, free any space for return address diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index b9bf66ba8ab..4ef3a7409dc 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -60,7 +60,7 @@ GraphKit::GraphKit(JVMState* jvms) _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) { _exceptions = jvms->map()->next_exception(); - if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); + if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr); set_jvms(jvms); } @@ -71,8 +71,8 @@ GraphKit::GraphKit() _gvn(*C->initial_gvn()), _barrier_set(BarrierSet::barrier_set()->barrier_set_c2()) { - _exceptions = NULL; - set_map(NULL); + _exceptions = nullptr; + set_map(nullptr); debug_only(_sp = -99); debug_only(set_bci(-99)); } @@ -119,7 +119,7 @@ JVMState* GraphKit::sync_jvms_for_reexecute() { #ifdef ASSERT bool GraphKit::jvms_in_sync() const { Parse* parse = is_Parse(); - if (parse == NULL) { + if (parse == nullptr) { if (bci() != jvms()->bci()) return false; if (sp() != (int)jvms()->sp()) return false; return true; @@ -139,33 +139,33 @@ bool GraphKit::jvms_in_sync() const { // Such merge points must never "escape" into the parser at large, // until they have been handed to gvn.transform. static bool is_hidden_merge(Node* reg) { - if (reg == NULL) return false; + if (reg == nullptr) return false; if (reg->is_Phi()) { reg = reg->in(0); - if (reg == NULL) return false; + if (reg == nullptr) return false; } - return reg->is_Region() && reg->in(0) != NULL && reg->in(0)->is_Root(); + return reg->is_Region() && reg->in(0) != nullptr && reg->in(0)->is_Root(); } void GraphKit::verify_map() const { - if (map() == NULL) return; // null map is OK + if (map() == nullptr) return; // null map is OK assert(map()->req() <= jvms()->endoff(), "no extra garbage on map"); assert(!map()->has_exceptions(), "call add_exception_states_from 1st"); assert(!is_hidden_merge(control()), "call use_exception_state, not set_map"); } void GraphKit::verify_exception_state(SafePointNode* ex_map) { - assert(ex_map->next_exception() == NULL, "not already part of a chain"); + assert(ex_map->next_exception() == nullptr, "not already part of a chain"); assert(has_saved_ex_oop(ex_map), "every exception state has an ex_oop"); } #endif //---------------------------stop_and_kill_map--------------------------------- -// Set _map to NULL, signalling a stop to further bytecode execution. +// Set _map to null, signalling a stop to further bytecode execution. // First smash the current map's control to a constant, to mark it dead. void GraphKit::stop_and_kill_map() { SafePointNode* dead_map = stop(); - if (dead_map != NULL) { + if (dead_map != nullptr) { dead_map->disconnect_inputs(C); // Mark the map as killed. assert(dead_map->is_killed(), "must be so marked"); } @@ -173,9 +173,9 @@ void GraphKit::stop_and_kill_map() { //--------------------------------stopped-------------------------------------- -// Tell if _map is NULL, or control is top. +// Tell if _map is null, or control is top. bool GraphKit::stopped() { - if (map() == NULL) return true; + if (map() == nullptr) return true; else if (control() == top()) return true; else return false; } @@ -184,7 +184,7 @@ bool GraphKit::stopped() { //-----------------------------has_ex_handler---------------------------------- // Tell if this method or any caller method has exception handlers. bool GraphKit::has_ex_handler() { - for (JVMState* jvmsp = jvms(); jvmsp != NULL; jvmsp = jvmsp->caller()) { + for (JVMState* jvmsp = jvms(); jvmsp != nullptr; jvmsp = jvmsp->caller()) { if (jvmsp->has_method() && jvmsp->method()->has_exception_handlers()) { return true; } @@ -240,7 +240,7 @@ SafePointNode* GraphKit::make_exception_state(Node* ex_oop) { //--------------------------add_exception_state-------------------------------- // Add an exception to my list of exceptions. void GraphKit::add_exception_state(SafePointNode* ex_map) { - if (ex_map == NULL || ex_map->control() == top()) { + if (ex_map == nullptr || ex_map->control() == top()) { return; } #ifdef ASSERT @@ -259,7 +259,7 @@ void GraphKit::add_exception_state(SafePointNode* ex_map) { return; } assert(ex_type->isa_instptr(), "exception must be an instance"); - for (SafePointNode* e2 = _exceptions; e2 != NULL; e2 = e2->next_exception()) { + for (SafePointNode* e2 = _exceptions; e2 != nullptr; e2 = e2->next_exception()) { const Type* ex_type2 = _gvn.type(saved_ex_oop(e2)); // We check sp also because call bytecodes can generate exceptions // both before and after arguments are popped! @@ -277,11 +277,11 @@ void GraphKit::add_exception_state(SafePointNode* ex_map) { //-----------------------add_exception_states_from----------------------------- void GraphKit::add_exception_states_from(JVMState* jvms) { SafePointNode* ex_map = jvms->map()->next_exception(); - if (ex_map != NULL) { - jvms->map()->set_next_exception(NULL); - for (SafePointNode* next_map; ex_map != NULL; ex_map = next_map) { + if (ex_map != nullptr) { + jvms->map()->set_next_exception(nullptr); + for (SafePointNode* next_map; ex_map != nullptr; ex_map = next_map) { next_map = ex_map->next_exception(); - ex_map->set_next_exception(NULL); + ex_map->set_next_exception(nullptr); add_exception_state(ex_map); } } @@ -289,18 +289,18 @@ void GraphKit::add_exception_states_from(JVMState* jvms) { //-----------------------transfer_exceptions_into_jvms------------------------- JVMState* GraphKit::transfer_exceptions_into_jvms() { - if (map() == NULL) { + if (map() == nullptr) { // We need a JVMS to carry the exceptions, but the map has gone away. // Create a scratch JVMS, cloned from any of the exception states... if (has_exceptions()) { _map = _exceptions; _map = clone_map(); - _map->set_next_exception(NULL); + _map->set_next_exception(nullptr); clear_saved_ex_oop(_map); debug_only(verify_map()); } else { // ...or created from scratch - JVMState* jvms = new (C) JVMState(_method, NULL); + JVMState* jvms = new (C) JVMState(_method, nullptr); jvms->set_bci(_bci); jvms->set_sp(_sp); jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); @@ -315,7 +315,7 @@ JVMState* GraphKit::transfer_exceptions_into_jvms() { JVMState* jvms = sync_jvms(); assert(!jvms->map()->has_exceptions(), "no exceptions on this map yet"); jvms->map()->set_next_exception(_exceptions); - _exceptions = NULL; // done with this set of exceptions + _exceptions = nullptr; // done with this set of exceptions return jvms; } @@ -337,7 +337,7 @@ static inline void add_one_req(Node* dstphi, Node* src) { // This helper function combines exception states by building phis on a // specially marked state-merging region. These regions and phis are // untransformed, and can build up gradually. The region is marked by -// having a control input of its exception map, rather than NULL. Such +// having a control input of its exception map, rather than null. Such // regions do not appear except in this function, and in use_exception_state. void GraphKit::combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map) { if (failing()) return; // dying anyway... @@ -489,7 +489,7 @@ Node* GraphKit::use_exception_state(SafePointNode* phi_map) { Bytecodes::Code GraphKit::java_bc() const { ciMethod* method = this->method(); int bci = this->bci(); - if (method != NULL && bci != InvocationEntryBci) + if (method != nullptr && bci != InvocationEntryBci) return method->java_code_at_bci(bci); else return Bytecodes::_illegal; @@ -519,7 +519,7 @@ void GraphKit::uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptR // Do not try anything fancy if we're notifying the VM on every throw. // Cf. case Bytecodes::_athrow in parse2.cpp. uncommon_trap(reason, Deoptimization::Action_none, - (ciKlass*)NULL, (char*)NULL, must_throw); + (ciKlass*)nullptr, (char*)nullptr, must_throw); } } @@ -566,7 +566,7 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { // for its backtrace. // Fixing this remaining case of 4292742 requires some flavor of // escape analysis. Leave that for the future. - ciInstance* ex_obj = NULL; + ciInstance* ex_obj = nullptr; switch (reason) { case Deoptimization::Reason_null_check: ex_obj = env()->NullPointerException_instance(); @@ -587,7 +587,7 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { break; } if (failing()) { stop(); return; } // exception allocation might fail - if (ex_obj != NULL) { + if (ex_obj != nullptr) { if (env()->jvmti_can_post_on_exceptions()) { // check if we must post exception events, take uncommon trap if so uncommon_trap_if_should_post_on_exceptions(reason, must_throw); @@ -596,7 +596,7 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { } // Cheat with a preallocated exception object. - if (C->log() != NULL) + if (C->log() != nullptr) C->log()->elem("hot_throw preallocated='1' reason='%s'", Deoptimization::trap_reason_name(reason)); const TypeInstPtr* ex_con = TypeInstPtr::make(ex_obj); @@ -632,13 +632,13 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { // Usual case: Bail to interpreter. // Reserve the right to recompile if we haven't seen anything yet. - ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : NULL; + ciMethod* m = Deoptimization::reason_is_speculate(reason) ? C->method() : nullptr; Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile; if (treat_throw_as_hot && (method()->method_data()->trap_recompiled_at(bci(), m) || C->too_many_traps(reason))) { // We cannot afford to take more traps here. Suffer in the interpreter. - if (C->log() != NULL) + if (C->log() != nullptr) C->log()->elem("hot_throw preallocated='0' reason='%s' mcount='%d'", Deoptimization::trap_reason_name(reason), C->trap_count(reason)); @@ -650,7 +650,7 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason) { // allocation time and code size, by drastically reducing the number // of in-edges on the call to the uncommon trap. - uncommon_trap(reason, action, (ciKlass*)NULL, (char*)NULL, must_throw); + uncommon_trap(reason, action, (ciKlass*)nullptr, (char*)nullptr, must_throw); } @@ -660,11 +660,11 @@ PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { _kit = kit; _map = kit->map(); // preserve the map _sp = kit->sp(); - kit->set_map(clone_map ? kit->clone_map() : NULL); + kit->set_map(clone_map ? kit->clone_map() : nullptr); #ifdef ASSERT _bci = kit->bci(); Parse* parser = kit->is_Parse(); - int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); + int block = (parser == nullptr || parser->block() == nullptr) ? -1 : parser->block()->rpo(); _block = block; #endif } @@ -673,7 +673,7 @@ PreserveJVMState::~PreserveJVMState() { #ifdef ASSERT assert(kit->bci() == _bci, "bci must not shift"); Parse* parser = kit->is_Parse(); - int block = (parser == NULL || parser->block() == NULL) ? -1 : parser->block()->rpo(); + int block = (parser == nullptr || parser->block() == nullptr) ? -1 : parser->block()->rpo(); assert(block == _block, "block must not shift"); #endif kit->set_map(_map); @@ -718,7 +718,7 @@ PreserveReexecuteState::~PreserveReexecuteState() { // function eventually and do it all there. SafePointNode* GraphKit::clone_map() { - if (map() == NULL) return NULL; + if (map() == nullptr) return nullptr; // Clone the memory edge first Node* mem = MergeMemNode::make(map()->memory()); @@ -763,7 +763,7 @@ void GraphKit::destruct_map_clone(SafePointNode* sfp) { void GraphKit::set_map_clone(SafePointNode* m) { _map = m; _map = clone_map(); - _map->set_next_exception(NULL); + _map->set_next_exception(nullptr); debug_only(verify_map()); } @@ -786,7 +786,7 @@ void GraphKit::kill_dead_locals() { // bci can be -1 (InvocationEntryBci). We return the entry // liveness for the method. - if (method() == NULL || method()->code_size() == 0) { + if (method() == nullptr || method()->code_size() == 0) { // We are building a graph for a call to a native method. // All locals are live. return; @@ -814,14 +814,14 @@ void GraphKit::kill_dead_locals() { // Return true if all dead locals are set to top in the map. // Used to assert "clean" debug info at various points. bool GraphKit::dead_locals_are_killed() { - if (method() == NULL || method()->code_size() == 0) { + if (method() == nullptr || method()->code_size() == 0) { // No locals need to be dead, so all is as it should be. return true; } // Make sure somebody called kill_dead_locals upstream. ResourceMark rm; - for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { + for (JVMState* jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { if (jvms->loc_size() == 0) continue; // no locals to consult SafePointNode* map = jvms->map(); ciMethod* method = jvms->method(); @@ -854,7 +854,7 @@ bool GraphKit::dead_locals_are_killed() { static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) { ciMethod* cur_method = jvms->method(); int cur_bci = jvms->bci(); - if (cur_method != NULL && cur_bci != InvocationEntryBci) { + if (cur_method != nullptr && cur_bci != InvocationEntryBci) { Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci); return Interpreter::bytecode_should_reexecute(code) || (is_anewarray && code == Bytecodes::_multianewarray); @@ -948,7 +948,7 @@ void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) { // Loop over the map input edges associated with jvms, add them // to the call node, & reset all offsets to match call node array. - for (JVMState* in_jvms = youngest_jvms; in_jvms != NULL; ) { + for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) { uint debug_end = debug_ptr; uint debug_start = debug_ptr - in_jvms->debug_size(); debug_ptr = debug_start; // back up the ptr @@ -1097,9 +1097,9 @@ bool GraphKit::compute_stack_effects(int& inputs, int& depth) { case Bytecodes::_invokeinterface: { bool ignored_will_link; - ciSignature* declared_signature = NULL; + ciSignature* declared_signature = nullptr; ciMethod* ignored_callee = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); - assert(declared_signature != NULL, "cannot be null"); + assert(declared_signature != nullptr, "cannot be null"); inputs = declared_signature->arg_size_for_bc(code); int size = declared_signature->return_type()->size(); depth = size - inputs; @@ -1197,9 +1197,9 @@ Node* GraphKit::ConvL2I(Node* offset) { Node* GraphKit::load_object_klass(Node* obj) { // Special-case a fresh allocation to avoid building nodes: Node* akls = AllocateNode::Ideal_klass(obj, &_gvn); - if (akls != NULL) return akls; + if (akls != nullptr) return akls; Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); - return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); + return _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), k_adr, TypeInstPtr::KLASS)); } //-------------------------load_array_length----------------------------------- @@ -1207,7 +1207,7 @@ Node* GraphKit::load_array_length(Node* array) { // Special-case a fresh allocation to avoid building nodes: AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array, &_gvn); Node *alen; - if (alloc == NULL) { + if (alloc == nullptr) { Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); } else { @@ -1237,8 +1237,8 @@ Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc, } //------------------------------do_null_check---------------------------------- -// Helper function to do a NULL pointer check. Returned value is -// the incoming address with NULL casted away. You are allowed to use the +// Helper function to do a null pointer check. Returned value is +// the incoming address with null casted away. You are allowed to use the // not-null value only if you are control dependent on the test. #ifndef PRODUCT extern int explicit_null_checks_inserted, @@ -1249,12 +1249,12 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, bool assert_null, Node* *null_control, bool speculative) { - assert(!assert_null || null_control == NULL, "not both at once"); + assert(!assert_null || null_control == nullptr, "not both at once"); if (stopped()) return top(); NOT_PRODUCT(explicit_null_checks_inserted++); - // Construct NULL check - Node *chk = NULL; + // Construct null check + Node *chk = nullptr; switch(type) { case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break; case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break; @@ -1264,9 +1264,9 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, const Type *t = _gvn.type( value ); const TypeOopPtr* tp = t->isa_oopptr(); - if (tp != NULL && !tp->is_loaded() + if (tp != nullptr && !tp->is_loaded() // Only for do_null_check, not any of its siblings: - && !assert_null && null_control == NULL) { + && !assert_null && null_control == nullptr) { // Usually, any field access or invocation on an unloaded oop type // will simply fail to link, since the statically linked class is // likely also to be unloaded. However, in -Xcomp mode, sometimes @@ -1296,8 +1296,8 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, return value; // Elided null assert quickly! } } else { - // See if mixing in the NULL pointer changes type. - // If so, then the NULL pointer was not allowed in the original + // See if mixing in the null pointer changes type. + // If so, then the null pointer was not allowed in the original // type. In other words, "value" was not-null. if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) { // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ... @@ -1312,7 +1312,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, default: fatal("unexpected type: %s", type2name(type)); } - assert(chk != NULL, "sanity check"); + assert(chk != nullptr, "sanity check"); chk = _gvn.transform(chk); BoolTest::mask btest = assert_null ? BoolTest::eq : BoolTest::ne; @@ -1345,7 +1345,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, return res; } cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true); - if (cfg == NULL) break; // Quit at region nodes + if (cfg == nullptr) break; // Quit at region nodes depth++; } } @@ -1370,18 +1370,18 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, // To cause an implicit null check, we set the not-null probability // to the maximum (PROB_MAX). For an explicit check the probability // is set to a smaller value. - if (null_control != NULL || too_many_traps(reason)) { + if (null_control != nullptr || too_many_traps(reason)) { // probability is less likely ok_prob = PROB_LIKELY_MAG(3); } else if (!assert_null && (ImplicitNullCheckThreshold > 0) && - method() != NULL && + method() != nullptr && (method()->method_data()->trap_count(reason) >= (uint)ImplicitNullCheckThreshold)) { ok_prob = PROB_LIKELY_MAG(3); } - if (null_control != NULL) { + if (null_control != nullptr) { IfNode* iff = create_and_map_if(control(), tst, ok_prob, COUNT_UNKNOWN); Node* null_true = _gvn.transform( new IfFalseNode(iff)); set_control( _gvn.transform( new IfTrueNode(iff))); @@ -1400,7 +1400,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, } else if (assert_null) { uncommon_trap(reason, Deoptimization::Action_make_not_entrant, - NULL, "assert_null"); + nullptr, "assert_null"); } else { replace_in_map(value, zerocon(type)); builtin_throw(reason); @@ -1422,7 +1422,7 @@ Node* GraphKit::null_check_common(Node* value, BasicType type, // (If there is a null_control, a non-null value may come back to haunt us.) if (type == T_OBJECT) { Node* cast = cast_not_null(value, false); - if (null_control == NULL || (*null_control) == top()) + if (null_control == nullptr || (*null_control) == top()) replace_in_map(value, cast); value = cast; } @@ -1519,7 +1519,7 @@ Node* GraphKit::memory(uint alias_idx) { Node* GraphKit::reset_memory() { Node* mem = map()->memory(); // do not use this node for any more parsing! - debug_only( map()->set_memory((Node*)NULL) ); + debug_only( map()->set_memory((Node*)nullptr) ); return _gvn.transform( mem ); } @@ -1555,7 +1555,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, bool unsafe, uint8_t barrier_data) { assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); - const TypePtr* adr_type = NULL; // debug-mode-only argument + const TypePtr* adr_type = nullptr; // debug-mode-only argument debug_only(adr_type = C->get_adr_type(adr_idx)); Node* mem = memory(adr_idx); Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data); @@ -1576,7 +1576,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, bool unsafe, int barrier_data) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); - const TypePtr* adr_type = NULL; + const TypePtr* adr_type = nullptr; debug_only(adr_type = C->get_adr_type(adr_idx)); Node *mem = memory(adr_idx); Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access); @@ -1607,7 +1607,7 @@ Node* GraphKit::access_store_at(Node* obj, const Type* val_type, BasicType bt, DecoratorSet decorators) { - // Transformation of a value which could be NULL pointer (CastPP #NULL) + // Transformation of a value which could be null pointer (CastPP #null) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(val) == TypePtr::NULL_PTR) { @@ -1618,7 +1618,7 @@ Node* GraphKit::access_store_at(Node* obj, return top(); // Dead path ? } - assert(val != NULL, "not dead path"); + assert(val != nullptr, "not dead path"); C2AccessValuePtr addr(adr, adr_type); C2AccessValue value(val, val_type); @@ -1658,7 +1658,7 @@ Node* GraphKit::access_load(Node* adr, // actual address to load val at } C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr()); - C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, NULL, addr); + C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr); if (access.is_raw()) { return _barrier_set->BarrierSetC2::load_at(access, val_type); } else { @@ -1823,7 +1823,7 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_p // Capture the return value, if any. Node* ret; - if (call->method() == NULL || + if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) ret = top(); else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); @@ -1856,7 +1856,7 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_p Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) { // Set fixed predefined input arguments Node* memory = reset_memory(); - Node* m = narrow_mem == NULL ? memory : narrow_mem; + Node* m = narrow_mem == nullptr ? memory : narrow_mem; call->init_req( TypeFunc::Control, control() ); call->init_req( TypeFunc::I_O, top() ); // does no i/o call->init_req( TypeFunc::Memory, m ); // may gc ptrs @@ -1867,9 +1867,9 @@ Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* //-------------------set_predefined_output_for_runtime_call-------------------- // Set control and memory (not i_o) from the call. -// If keep_mem is not NULL, use it for the output state, +// If keep_mem is not null, use it for the output state, // except for the RawPtr output of the call, if hook_mem is TypeRawPtr::BOTTOM. -// If hook_mem is NULL, this call produces no memory effects at all. +// If hook_mem is null, this call produces no memory effects at all. // If hook_mem is a Java-visible memory slice (such as arraycopy operands), // then only that memory slice is taken from the call. // In the last case, we must put an appropriate memory barrier before @@ -1883,7 +1883,7 @@ void GraphKit::set_predefined_output_for_runtime_call(Node* call, if (keep_mem) { // First clone the existing memory state set_all_memory(keep_mem); - if (hook_mem != NULL) { + if (hook_mem != nullptr) { // Make memory for the call Node* mem = _gvn.transform( new ProjNode(call, TypeFunc::Memory) ); // Set the RawPtr memory state only. This covers all the heap top/GC stuff @@ -1897,7 +1897,7 @@ void GraphKit::set_predefined_output_for_runtime_call(Node* call, assert(C->alias_type(call->adr_type()) == C->alias_type(hook_mem), "call node must be constructed correctly"); } else { - assert(hook_mem == NULL, ""); + assert(hook_mem == nullptr, ""); // This is not a "slow path" call; all memory comes from the call. set_all_memory_call(call); } @@ -1918,7 +1918,7 @@ static void add_mergemem_users_to_worklist(Unique_Node_List& wl, Node* mem) { // Replace the call with the current state of the kit. void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes) { - JVMState* ejvms = NULL; + JVMState* ejvms = nullptr; if (has_exceptions()) { ejvms = transfer_exceptions_into_jvms(); } @@ -1940,10 +1940,10 @@ void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes Node* final_io = final_state->in(TypeFunc::I_O); // Replace all the old call edges with the edges from the inlining result - if (callprojs.fallthrough_catchproj != NULL) { + if (callprojs.fallthrough_catchproj != nullptr) { C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl); } - if (callprojs.fallthrough_memproj != NULL) { + if (callprojs.fallthrough_memproj != nullptr) { if (final_mem->is_MergeMem()) { // Parser's exits MergeMem was not transformed but may be optimized final_mem = _gvn.transform(final_mem); @@ -1951,28 +1951,28 @@ void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem); add_mergemem_users_to_worklist(wl, final_mem); } - if (callprojs.fallthrough_ioproj != NULL) { + if (callprojs.fallthrough_ioproj != nullptr) { C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io); } // Replace the result with the new result if it exists and is used - if (callprojs.resproj != NULL && result != NULL) { + if (callprojs.resproj != nullptr && result != nullptr) { C->gvn_replace_by(callprojs.resproj, result); } - if (ejvms == NULL) { + if (ejvms == nullptr) { // No exception edges to simply kill off those paths - if (callprojs.catchall_catchproj != NULL) { + if (callprojs.catchall_catchproj != nullptr) { C->gvn_replace_by(callprojs.catchall_catchproj, C->top()); } - if (callprojs.catchall_memproj != NULL) { + if (callprojs.catchall_memproj != nullptr) { C->gvn_replace_by(callprojs.catchall_memproj, C->top()); } - if (callprojs.catchall_ioproj != NULL) { + if (callprojs.catchall_ioproj != nullptr) { C->gvn_replace_by(callprojs.catchall_ioproj, C->top()); } // Replace the old exception object with top - if (callprojs.exobj != NULL) { + if (callprojs.exobj != nullptr) { C->gvn_replace_by(callprojs.exobj, C->top()); } } else { @@ -1984,21 +1984,21 @@ void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes Node* ex_oop = ekit.use_exception_state(ex_map); - if (callprojs.catchall_catchproj != NULL) { + if (callprojs.catchall_catchproj != nullptr) { C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control()); ex_ctl = ekit.control(); } - if (callprojs.catchall_memproj != NULL) { + if (callprojs.catchall_memproj != nullptr) { Node* ex_mem = ekit.reset_memory(); C->gvn_replace_by(callprojs.catchall_memproj, ex_mem); add_mergemem_users_to_worklist(wl, ex_mem); } - if (callprojs.catchall_ioproj != NULL) { + if (callprojs.catchall_ioproj != nullptr) { C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o()); } // Replace the old exception object with the newly created one - if (callprojs.exobj != NULL) { + if (callprojs.exobj != nullptr) { C->gvn_replace_by(callprojs.exobj, ex_oop); } } @@ -2013,7 +2013,7 @@ void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes _gvn.transform(wl.pop()); } - if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) { + if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) { replaced_nodes.apply(C, final_ctl); } if (!ex_ctl->is_top() && do_replaced_nodes) { @@ -2048,7 +2048,7 @@ Node* GraphKit::uncommon_trap(int trap_request, bool must_throw, bool keep_exact_action) { if (failing()) stop(); - if (stopped()) return NULL; // trap reachable? + if (stopped()) return nullptr; // trap reachable? // Note: If ProfileTraps is true, and if a deopt. actually // occurs here, the runtime will make sure an MDO exists. There is @@ -2081,7 +2081,7 @@ Node* GraphKit::uncommon_trap(int trap_request, Deoptimization::trap_request_index(trap_request) < 0 && too_many_recompiles(reason)) { // This BCI is causing too many recompilations. - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'", Deoptimization::trap_reason_name(reason), Deoptimization::trap_action_name(action)); @@ -2113,20 +2113,20 @@ Node* GraphKit::uncommon_trap(int trap_request, } CompileLog* log = C->log(); - if (log != NULL) { - int kid = (klass == NULL)? -1: log->identify(klass); + if (log != nullptr) { + int kid = (klass == nullptr)? -1: log->identify(klass); log->begin_elem("uncommon_trap bci='%d'", bci()); char buf[100]; log->print(" %s", Deoptimization::format_trap_request(buf, sizeof(buf), trap_request)); if (kid >= 0) log->print(" klass='%d'", kid); - if (comment != NULL) log->print(" comment='%s'", comment); + if (comment != nullptr) log->print(" comment='%s'", comment); log->end_elem(); } // Make sure any guarding test views this path as very unlikely Node *i0 = control()->in(0); - if (i0 != NULL && i0->is_If()) { // Found a guarding if test? + if (i0 != nullptr && i0->is_If()) { // Found a guarding if test? IfNode *iff = i0->as_If(); float f = iff->_prob; // Get prob if (control()->Opcode() == Op_IfTrue) { @@ -2143,7 +2143,7 @@ Node* GraphKit::uncommon_trap(int trap_request, // Now insert the uncommon trap subroutine call address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; // Pass the index of the class to be loaded Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | (must_throw ? RC_MUST_THROW : 0), @@ -2178,14 +2178,14 @@ Node* GraphKit::just_allocated_object(Node* current_control) { // Object:: is invoked after allocation, most of invoke nodes // will be reduced, but a region node is kept in parse time, we check // the pattern and skip the region node if it degraded to a copy. - if (ctrl != NULL && ctrl->is_Region() && ctrl->req() == 2 && + if (ctrl != nullptr && ctrl->is_Region() && ctrl->req() == 2 && ctrl->as_Region()->is_copy()) { ctrl = ctrl->as_Region()->is_copy(); } if (C->recent_alloc_ctl() == ctrl) { return C->recent_alloc_obj(); } - return NULL; + return nullptr; } @@ -2224,7 +2224,7 @@ Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, Prof } else { assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement"); const TypePtr* ptr = TypePtr::NOTNULL; - if (speculative != NULL) { + if (speculative != nullptr) { speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr(); } else { speculative = ptr; @@ -2268,7 +2268,7 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { java_bc() == Bytecodes::_aastore) && method()->method_data()->is_mature()) { ciProfileData* data = method()->method_data()->bci_to_data(bci()); - if (data != NULL) { + if (data != nullptr) { if (!data->as_BitData()->null_seen()) { ptr_kind = ProfileNeverNull; } else { @@ -2277,7 +2277,7 @@ Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) { uint i = 0; for (; i < call->row_limit(); i++) { ciKlass* receiver = call->receiver(i); - if (receiver != NULL) { + if (receiver != nullptr) { break; } } @@ -2306,7 +2306,7 @@ void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms); if (is_reference_type(targ->basic_type())) { ProfilePtrKind ptr_kind = ProfileMaybeNull; - ciKlass* better_type = NULL; + ciKlass* better_type = nullptr; if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) { record_profile_for_speculation(argument(j), better_type, ptr_kind); } @@ -2326,7 +2326,7 @@ void GraphKit::record_profiled_parameters_for_speculation() { for (int i = 0, j = 0; i < method()->arg_size() ; i++) { if (_gvn.type(local(i))->isa_oopptr()) { ProfilePtrKind ptr_kind = ProfileMaybeNull; - ciKlass* better_type = NULL; + ciKlass* better_type = nullptr; if (method()->parameter_profiled_type(j, better_type, ptr_kind)) { record_profile_for_speculation(local(i), better_type, ptr_kind); } @@ -2344,7 +2344,7 @@ void GraphKit::record_profiled_return_for_speculation() { return; } ProfilePtrKind ptr_kind = ProfileMaybeNull; - ciKlass* better_type = NULL; + ciKlass* better_type = nullptr; if (method()->return_profiled_type(bci(), better_type, ptr_kind)) { // If profiling reports a single type for the return value, // feed it to the type system so it can propagate it as a @@ -2403,7 +2403,7 @@ Node* GraphKit::dprecision_rounding(Node *n) { // Generate a fast path/slow path idiom. Graph looks like: // [foo] indicates that 'foo' is a parameter // -// [in] NULL +// [in] null // \ / // CmpP // Bool ne @@ -2441,23 +2441,23 @@ Node* GraphKit::null_check_oop(Node* value, Node* *null_control, bool never_see_null, bool safe_for_replace, bool speculative) { - // Initial NULL check taken path + // Initial null check taken path (*null_control) = top(); Node* cast = null_check_common(value, T_OBJECT, false, null_control, speculative); // Generate uncommon_trap: if (never_see_null && (*null_control) != top()) { // If we see an unexpected null at a check-cast we record it and force a - // recompile; the offending check-cast will be compiled to handle NULLs. + // recompile; the offending check-cast will be compiled to handle nulls. // If we see more than one offending BCI, then all checkcasts in the - // method will be compiled to handle NULLs. + // method will be compiled to handle nulls. PreserveJVMState pjvms(this); set_control(*null_control); replace_in_map(value, null()); Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculative); uncommon_trap(reason, Deoptimization::Action_make_not_entrant); - (*null_control) = top(); // NULL path is dead + (*null_control) = top(); // null path is dead } if ((*null_control) == top() && safe_for_replace) { replace_in_map(value, cast); @@ -2488,17 +2488,17 @@ Node* GraphKit::make_runtime_call(int flags, const char* call_name, const TypePtr* adr_type, // The following parms are all optional. - // The first NULL ends the list. + // The first null ends the list. Node* parm0, Node* parm1, Node* parm2, Node* parm3, Node* parm4, Node* parm5, Node* parm6, Node* parm7) { - assert(call_addr != NULL, "must not call NULL targets"); + assert(call_addr != nullptr, "must not call null targets"); // Slow-path call bool is_leaf = !(flags & RC_NO_LEAF); bool has_io = (!is_leaf && !(flags & RC_NO_IO)); - if (call_name == NULL) { + if (call_name == nullptr) { assert(!is_leaf, "must supply name for leaf"); call_name = OptoRuntime::stub_name(call_addr); } @@ -2521,7 +2521,7 @@ Node* GraphKit::make_runtime_call(int flags, bool wide_in = !(flags & RC_NARROW_MEM); bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot); - Node* prev_mem = NULL; + Node* prev_mem = nullptr; if (wide_in) { prev_mem = set_predefined_input_for_runtime_call(call); } else { @@ -2530,17 +2530,17 @@ Node* GraphKit::make_runtime_call(int flags, prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem); } - // Hook each parm in order. Stop looking at the first NULL. - if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0); - if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1); - if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2); - if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3); - if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4); - if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5); - if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6); - if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7); + // Hook each parm in order. Stop looking at the first null. + if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3); + if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4); + if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5); + if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6); + if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7); /* close each nested if ===> */ } } } } } } } } - assert(call->in(call->req()-1) != NULL, "must initialize all parms"); + assert(call->in(call->req()-1) != nullptr, "must initialize all parms"); if (!is_leaf) { // Non-leaves can block and take safepoints: @@ -2600,7 +2600,7 @@ void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) { if (mms.is_empty()) { // clone base memory Phi's inputs for this memory slice assert(old_slice == mms.base_memory(), "sanity"); - phi = PhiNode::make(region, NULL, Type::MEMORY, mms.adr_type(C)); + phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C)); _gvn.set_type(phi, Type::MEMORY); for (uint i = 1; i < phi->req(); i++) { phi->init_req(i, old_slice->in(i)); @@ -2655,7 +2655,7 @@ void GraphKit::make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool sep } static IfNode* gen_subtype_check_compare(Node* ctrl, Node* in1, Node* in2, BoolTest::mask test, float p, PhaseGVN& gvn, BasicType bt) { - Node* cmp = NULL; + Node* cmp = nullptr; switch(bt) { case T_INT: cmp = new CmpINode(in1, in2); break; case T_ADDRESS: cmp = new CmpPNode(in1, in2); break; @@ -2732,7 +2732,7 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No // First load the super-klass's check-offset Node *p1 = gvn.transform(new AddPNode(superklass, superklass, gvn.MakeConX(in_bytes(Klass::super_check_offset_offset())))); Node* m = C->immutable_memory(); - Node *chk_off = gvn.transform(new LoadINode(NULL, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); + Node *chk_off = gvn.transform(new LoadINode(nullptr, m, p1, gvn.type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); bool might_be_cache = (gvn.find_int_con(chk_off, cacheoff_con) == cacheoff_con); @@ -2740,8 +2740,8 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No // the secondary superclass list, or a failing value with a sentinel offset // if the super-klass is an interface or exceptionally deep in the Java // hierarchy and we have to scan the secondary superclass list the hard way. - // Worst-case type is a little odd: NULL is allowed as a result (usually - // klass loads can never produce a NULL). + // Worst-case type is a little odd: null is allowed as a result (usually + // klass loads can never produce a null). Node *chk_off_X = chk_off; #ifdef _LP64 chk_off_X = gvn.transform(new ConvI2LNode(chk_off_X)); @@ -2756,10 +2756,10 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No // incorrect/missed optimization of the following Load. // - it's a cache so, worse case, not reading the latest value // wouldn't cause incorrect execution - if (might_be_cache && mem != NULL) { + if (might_be_cache && mem != nullptr) { kmem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(C->get_alias_index(gvn.type(p2)->is_ptr())) : mem; } - Node *nkls = gvn.transform(LoadKlassNode::make(gvn, NULL, kmem, p2, gvn.type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); + Node *nkls = gvn.transform(LoadKlassNode::make(gvn, nullptr, kmem, p2, gvn.type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); // Compile speed common case: ARE a subtype and we canNOT fail if( superklass == nkls ) @@ -2919,7 +2919,7 @@ Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass, //------------------------------seems_never_null------------------------------- // Use null_seen information if it is available from the profile. // If we see an unexpected null at a type check we record it and force a -// recompile; the offending check will be recompiled to handle NULLs. +// recompile; the offending check will be recompiled to handle nulls. // If we see several offending BCIs, then all checks in the // method will be recompiled. bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) { @@ -2932,7 +2932,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculatin if (speculating) { return true; } - if (data == NULL) + if (data == nullptr) // Edge case: no mature data. Be optimistic here. return true; // If the profile has not seen a null, assume it won't happen. @@ -2948,7 +2948,7 @@ bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculatin void GraphKit::guard_klass_being_initialized(Node* klass) { int init_state_off = in_bytes(InstanceKlass::init_state_offset()); Node* adr = basic_plus_adr(top(), klass, init_state_off); - Node* init_state = LoadNode::make(_gvn, NULL, immutable_memory(), adr, + Node* init_state = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, adr->bottom_type()->is_ptr(), TypeInt::BYTE, T_BYTE, MemNode::unordered); init_state = _gvn.transform(init_state); @@ -2967,7 +2967,7 @@ void GraphKit::guard_init_thread(Node* klass) { int init_thread_off = in_bytes(InstanceKlass::init_thread_offset()); Node* adr = basic_plus_adr(top(), klass, init_thread_off); - Node* init_thread = LoadNode::make(_gvn, NULL, immutable_memory(), adr, + Node* init_thread = LoadNode::make(_gvn, nullptr, immutable_memory(), adr, adr->bottom_type()->is_ptr(), TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered); init_thread = _gvn.transform(init_thread); @@ -2995,7 +2995,7 @@ void GraphKit::clinit_barrier(ciInstanceKlass* ik, ciMethod* context) { } else { uncommon_trap(Deoptimization::Reason_uninitialized, Deoptimization::Action_reinterpret, - NULL); + nullptr); } } @@ -3006,21 +3006,21 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, const TypeKlassPtr* require_klass, ciKlass* spec_klass, bool safe_for_replace) { - if (!UseTypeProfile || !TypeProfileCasts) return NULL; + if (!UseTypeProfile || !TypeProfileCasts) return nullptr; - Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL); + Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr); // Make sure we haven't already deoptimized from this tactic. if (too_many_traps_or_recompiles(reason)) - return NULL; + return nullptr; // (No, this isn't a call, but it's enough like a virtual call // to use the same ciMethod accessor to get the profile info...) // If we have a speculative type use it instead of profiling (which // may not help us) - ciKlass* exact_kls = spec_klass == NULL ? profile_has_unique_klass() : spec_klass; - if (exact_kls != NULL) {// no cast failures here - if (require_klass == NULL || + ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass; + if (exact_kls != nullptr) {// no cast failures here + if (require_klass == nullptr || C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) { // If we narrow the type to match what the type profile sees or // the speculative type, we can then remove the rest of the @@ -3043,7 +3043,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, // assert(ssc == Compile::SSC_always_true)... except maybe the profile lied to us. } - return NULL; + return nullptr; } /** @@ -3061,14 +3061,14 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj, return obj; } - // type == NULL if profiling tells us this object is always null - if (type != NULL) { + // type is null if profiling tells us this object is always null + if (type != nullptr) { Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check; Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check; if (!too_many_traps_or_recompiles(null_reason) && !too_many_traps_or_recompiles(class_reason)) { - Node* not_null_obj = NULL; + Node* not_null_obj = nullptr; // not_null is true if we know the object is not null and // there's no need for a null check if (!not_null) { @@ -3116,7 +3116,7 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac Node* phi = new PhiNode(region, TypeInt::BOOL); C->set_has_split_ifs(true); // Has chance for split-if optimization - ciProfileData* data = NULL; + ciProfileData* data = nullptr; if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode data = method()->method_data()->bci_to_data(bci()); } @@ -3129,7 +3129,7 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken - if (stopped()) { // Doing instance-of on a NULL? + if (stopped()) { // Doing instance-of on a null? set_control(null_ctl); return intcon(0); } @@ -3159,13 +3159,13 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass, bool safe_for_replac // We may not have profiling here or it may not help us. If we // have a speculative type use it to perform an exact cast. ciKlass* spec_obj_type = obj_type->speculative_type(); - if (spec_obj_type != NULL || (ProfileDynamicTypes && data != NULL)) { - Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace); + if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) { + Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace); if (stopped()) { // Profile disagrees with this path. set_control(null_ctl); // Null is the only remaining possibility. return intcon(0); } - if (cast_obj != NULL) { + if (cast_obj != nullptr) { not_null_obj = cast_obj; } } @@ -3218,7 +3218,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, // for example, in some objArray manipulations, such as a[i]=a[j].) if (tk->singleton()) { const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr(); - if (objtp != NULL) { + if (objtp != nullptr) { switch (C->static_subtype_check(tk, objtp->as_klass_type())) { case Compile::SSC_always_true: // If we know the type check always succeed then we don't use @@ -3244,9 +3244,9 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, } } - ciProfileData* data = NULL; + ciProfileData* data = nullptr; bool safe_for_replace = false; - if (failure_control == NULL) { // use MDO in regular case only + if (failure_control == nullptr) { // use MDO in regular case only assert(java_bc() == Bytecodes::_aastore || java_bc() == Bytecodes::_checkcast, "interpreter profiles type checks only for these BCs"); @@ -3262,7 +3262,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, // Use null-cast information if it is available bool speculative_not_null = false; - bool never_see_null = ((failure_control == NULL) // regular case only + bool never_see_null = ((failure_control == nullptr) // regular case only && seems_never_null(obj, data, speculative_not_null)); // Null check; get casted pointer; set region slot 3 @@ -3270,7 +3270,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null); // If not_null_obj is dead, only null-path is taken - if (stopped()) { // Doing instance-of on a NULL? + if (stopped()) { // Doing instance-of on a null? set_control(null_ctl); return null(); } @@ -3284,7 +3284,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, phi ->del_req(_null_path); } - Node* cast_obj = NULL; + Node* cast_obj = nullptr; if (tk->klass_is_exact()) { // The following optimization tries to statically cast the speculative type of the object // (for example obtained during profiling) to the type of the superklass and then do a @@ -3294,10 +3294,10 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, // We may not have profiling here or it may not help us. If we have // a speculative type use it to perform an exact cast. ciKlass* spec_obj_type = obj_type->speculative_type(); - if (spec_obj_type != NULL || data != NULL) { + if (spec_obj_type != nullptr || data != nullptr) { cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk, spec_obj_type, safe_for_replace); - if (cast_obj != NULL) { - if (failure_control != NULL) // failure is now impossible + if (cast_obj != nullptr) { + if (failure_control != nullptr) // failure is now impossible (*failure_control) = top(); // adjust the type of the phi to the exact klass: phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR)); @@ -3305,14 +3305,14 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, } } - if (cast_obj == NULL) { + if (cast_obj == nullptr) { // Generate the subtype check Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, superklass ); // Plug in success path into the merge cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop)); // Failure path ends in uncommon trap (or may be dead - failure impossible) - if (failure_control == NULL) { + if (failure_control == nullptr) { if (not_subtype_ctrl != top()) { // If failure is possible PreserveJVMState pjvms(this); set_control(not_subtype_ctrl); @@ -3329,7 +3329,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, region->init_req(_obj_path, control()); phi ->init_req(_obj_path, cast_obj); - // A merge of NULL or Casted-NotNull obj + // A merge of null or Casted-NotNull obj Node* res = _gvn.transform(phi); // Note I do NOT always 'replace_in_map(obj,result)' here. @@ -3413,9 +3413,9 @@ FastLockNode* GraphKit::shared_lock(Node* obj) { assert(SynchronizationEntryBCI == InvocationEntryBci, ""); if( !GenerateSynchronizationCode ) - return NULL; // Not locking things? + return nullptr; // Not locking things? if (stopped()) // Dead monitor? - return NULL; + return nullptr; assert(dead_locals_are_killed(), "should kill locals before sync. point"); @@ -3514,13 +3514,13 @@ void GraphKit::shared_unlock(Node* box, Node* obj) { //-------------------------------get_layout_helper----------------------------- // If the given klass is a constant or known to be an array, // fetch the constant layout helper value into constant_value -// and return (Node*)NULL. Otherwise, load the non-constant +// and return null. Otherwise, load the non-constant // layout helper value, and return the node which represents it. // This two-faced routine is useful because allocation sites // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); - if (!StressReflectiveCode && inst_klass != NULL) { + if (!StressReflectiveCode && inst_klass != nullptr) { bool xklass = inst_klass->klass_is_exact(); if (xklass || inst_klass->isa_aryklassptr()) { jint lhelper; @@ -3535,13 +3535,13 @@ Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { } if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; - return (Node*) NULL; + return (Node*) nullptr; } } } constant_value = Klass::_lh_neutral_value; // put in a known value Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); - return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); + return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered); } // We just put in an allocate/initialize with a big raw-memory effect. @@ -3661,13 +3661,13 @@ Node* GraphKit::new_instance(Node* klass_node, // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + int layout_is_con = (layout_val == nullptr); - if (extra_slow_test == NULL) extra_slow_test = intcon(0); + if (extra_slow_test == nullptr) extra_slow_test = intcon(0); // Generate the initial go-slow test. It's either ALWAYS (return a - // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective + // Node for 1) or NEVER (return a null) or perhaps (in the reflective // case) a computed value derived from the layout_helper. - Node* initial_slow_test = NULL; + Node* initial_slow_test = nullptr; if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); @@ -3686,7 +3686,7 @@ Node* GraphKit::new_instance(Node* klass_node, // Find the size in bytes. This is easy; it's the layout_helper. // The size value must be valid even if the slow path is taken. - Node* size = NULL; + Node* size = nullptr; if (layout_is_con) { size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); } else { // reflective case @@ -3698,7 +3698,7 @@ Node* GraphKit::new_instance(Node* klass_node, Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong)); size = _gvn.transform( new AndXNode(size, mask) ); } - if (return_size_val != NULL) { + if (return_size_val != nullptr) { (*return_size_val) = size; } @@ -3735,7 +3735,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + int layout_is_con = (layout_val == nullptr); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { @@ -3750,7 +3750,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) uncommon_trap(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile); } - layout_val = NULL; + layout_val = nullptr; layout_is_con = true; } @@ -3777,7 +3777,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) // and align_to(x, y) == ((x + y-1) & ~(y-1)) // The rounding mask is strength-reduced, if possible. int round_mask = MinObjAlignmentInBytes - 1; - Node* header_size = NULL; + Node* header_size = nullptr; int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); // (T_BYTE has the weakest alignment and size restrictions...) if (layout_is_con) { @@ -3799,7 +3799,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) header_size = _gvn.transform( new AddINode(hsize, mask) ); } - Node* elem_shift = NULL; + Node* elem_shift = nullptr; if (layout_is_con) { int eshift = Klass::layout_helper_log2_element_size(layout_con); if (eshift != 0) @@ -3816,7 +3816,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* headerx = ConvI2X(header_size); #ifdef _LP64 { const TypeInt* tilen = _gvn.find_int_type(length); - if (tilen != NULL && tilen->_lo < 0) { + if (tilen != nullptr && tilen->_lo < 0) { // Add a manual constraint to a positive range. Cf. array_element_address. jint size_max = fast_size_limit; if (size_max > tilen->_hi) size_max = tilen->_hi; @@ -3853,7 +3853,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) // places, one where the length is sharply limited, and the other // after a successful allocation. Node* abody = lengthx; - if (elem_shift != NULL) + if (elem_shift != nullptr) abody = _gvn.transform( new LShiftXNode(lengthx, elem_shift) ); Node* size = _gvn.transform( new AddXNode(headerx, abody) ); if (round_mask != 0) { @@ -3862,7 +3862,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) } // else if round_mask == 0, the size computation is self-rounding - if (return_size_val != NULL) { + if (return_size_val != nullptr) { // This is the size (*return_size_val) = size; } @@ -3901,7 +3901,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) // (This happens via a non-constant argument to inline_native_newArray.) // In any case, the value of klass_node provides the desired array type. const TypeInt* length_type = _gvn.find_int_type(length); - if (ary_type->isa_aryptr() && length_type != NULL) { + if (ary_type->isa_aryptr() && length_type != nullptr) { // Try to get a better type than POS for the size ary_type = ary_type->is_aryptr()->cast_to_size(length_type); } @@ -3918,8 +3918,8 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) //---------------------------Ideal_allocation---------------------------------- // Given an oop pointer or raw pointer, see if it feeds from an AllocateNode. AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { - if (ptr == NULL) { // reduce dumb test in callers - return NULL; + if (ptr == nullptr) { // reduce dumb test in callers + return nullptr; } BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); @@ -3927,27 +3927,27 @@ AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase) { if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast ptr = ptr->in(1); - if (ptr == NULL) return NULL; + if (ptr == nullptr) return nullptr; } - // Return NULL for allocations with several casts: + // Return null for allocations with several casts: // j.l.reflect.Array.newInstance(jobject, jint) // Object.clone() // to keep more precise type from last cast. if (ptr->is_Proj()) { Node* allo = ptr->in(0); - if (allo != NULL && allo->is_Allocate()) { + if (allo != nullptr && allo->is_Allocate()) { return allo->as_Allocate(); } } // Report failure to match. - return NULL; + return nullptr; } // Fancy version which also strips off an offset (and reports it to caller). AllocateNode* AllocateNode::Ideal_allocation(Node* ptr, PhaseTransform* phase, intptr_t& offset) { Node* base = AddPNode::Ideal_base_and_offset(ptr, phase, offset); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; return Ideal_allocation(base, phase); } @@ -3960,13 +3960,13 @@ AllocateNode* InitializeNode::allocation() { return alloc->as_Allocate(); } } - return NULL; + return nullptr; } // Trace Allocate -> Proj[Parm] -> Initialize InitializeNode* AllocateNode::initialization() { ProjNode* rawoop = proj_out_or_null(AllocateNode::RawAddress); - if (rawoop == NULL) return NULL; + if (rawoop == nullptr) return nullptr; for (DUIterator_Fast imax, i = rawoop->fast_outs(imax); i < imax; i++) { Node* init = rawoop->fast_out(i); if (init->is_Initialize()) { @@ -3974,7 +3974,7 @@ InitializeNode* AllocateNode::initialization() { return init->as_Initialize(); } } - return NULL; + return nullptr; } //----------------------------- loop predicates --------------------------- @@ -4049,7 +4049,7 @@ Node* GraphKit::load_String_length(Node* str, bool set_ctrl) { Node* GraphKit::load_String_value(Node* str, bool set_ctrl) { int value_offset = java_lang_String::value_offset(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, nullptr, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull, TypeAry::make(TypeInt::BYTE, TypeInt::POS), @@ -4066,7 +4066,7 @@ Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) { } int coder_offset = java_lang_String::coder_offset(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, nullptr, 0); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); Node* p = basic_plus_adr(str, str, coder_offset); @@ -4078,7 +4078,7 @@ Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) { void GraphKit::store_String_value(Node* str, Node* value) { int value_offset = java_lang_String::value_offset(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, nullptr, 0); const TypePtr* value_field_type = string_type->add_offset(value_offset); access_store_at(str, basic_plus_adr(str, value_offset), value_field_type, @@ -4088,7 +4088,7 @@ void GraphKit::store_String_value(Node* str, Node* value) { void GraphKit::store_String_coder(Node* str, Node* value) { int coder_offset = java_lang_String::coder_offset(); const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(), - false, NULL, 0); + false, nullptr, 0); const TypePtr* coder_field_type = string_type->add_offset(coder_offset); access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type, @@ -4189,19 +4189,19 @@ void GraphKit::inflate_string_slow(Node* src, Node* dst, Node* start, Node* coun Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) { if (!field->is_constant()) { - return NULL; // Field not marked as constant. + return nullptr; // Field not marked as constant. } - ciInstance* holder = NULL; + ciInstance* holder = nullptr; if (!field->is_static()) { ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop(); - if (const_oop != NULL && const_oop->is_instance()) { + if (const_oop != nullptr && const_oop->is_instance()) { holder = const_oop->as_instance(); } } const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(), /*is_unsigned_load=*/false); - if (con_type != NULL) { + if (con_type != nullptr) { return makecon(con_type); } - return NULL; + return nullptr; } diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index 731223f491e..fe731fc35ca 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -72,7 +72,7 @@ class GraphKit : public Phase { private: SafePointNode* map_not_null() const { - assert(_map != NULL, "must call stopped() to test for reset compiler map"); + assert(_map != nullptr, "must call stopped() to test for reset compiler map"); return _map; } @@ -86,8 +86,8 @@ class GraphKit : public Phase { } #endif - virtual Parse* is_Parse() const { return NULL; } - virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; } + virtual Parse* is_Parse() const { return nullptr; } + virtual LibraryCallKit* is_LibraryCallKit() const { return nullptr; } ciEnv* env() const { return _env; } PhaseGVN& gvn() const { return _gvn; } @@ -132,7 +132,7 @@ class GraphKit : public Phase { // See layout accessors in class JVMState. SafePointNode* map() const { return _map; } - bool has_exceptions() const { return _exceptions != NULL; } + bool has_exceptions() const { return _exceptions != nullptr; } JVMState* jvms() const { return map_not_null()->_jvms; } int sp() const { return _sp; } int bci() const { return _bci; } @@ -143,7 +143,7 @@ class GraphKit : public Phase { assert(jvms == this->jvms(), "sanity"); _sp = jvms->sp(); _bci = jvms->bci(); - _method = jvms->has_method() ? jvms->method() : NULL; } + _method = jvms->has_method() ? jvms->method() : nullptr; } void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; } void clean_stack(int from_sp); // clear garbage beyond from_sp to top @@ -182,14 +182,14 @@ class GraphKit : public Phase { // Tell if the compilation is failing. bool failing() const { return C->failing(); } - // Set _map to NULL, signalling a stop to further bytecode execution. + // Set _map to null, signalling a stop to further bytecode execution. // Preserve the map intact for future use, and return it back to the caller. - SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; } + SafePointNode* stop() { SafePointNode* m = map(); set_map(nullptr); return m; } - // Stop, but first smash the map's inputs to NULL, to mark it dead. + // Stop, but first smash the map's inputs to null, to mark it dead. void stop_and_kill_map(); - // Tell if _map is NULL, or control is top. + // Tell if _map is null, or control is top. bool stopped(); // Tell if this method or any caller method has exception handlers. @@ -221,9 +221,9 @@ class GraphKit : public Phase { // Detach and return an exception state. SafePointNode* pop_exception_state() { SafePointNode* ex_map = _exceptions; - if (ex_map != NULL) { + if (ex_map != nullptr) { _exceptions = ex_map->next_exception(); - ex_map->set_next_exception(NULL); + ex_map->set_next_exception(nullptr); debug_only(verify_exception_state(ex_map)); } return ex_map; @@ -246,10 +246,10 @@ class GraphKit : public Phase { // Combine all exceptions of any sort whatever into a single master state. SafePointNode* combine_and_pop_all_exception_states() { - if (_exceptions == NULL) return NULL; + if (_exceptions == nullptr) return nullptr; SafePointNode* phi_map = pop_exception_state(); SafePointNode* ex_map; - while ((ex_map = pop_exception_state()) != NULL) { + while ((ex_map = pop_exception_state()) != nullptr) { combine_exception_states(ex_map, phi_map); } return phi_map; @@ -353,16 +353,16 @@ class GraphKit : public Phase { bool replace_length_in_map); - // Helper function to do a NULL pointer check or ZERO check based on type. + // Helper function to do a null pointer check or ZERO check based on type. // Throw an exception if a given value is null. // Return the value cast to not-null. // Be clever about equivalent dominating null checks. Node* null_check_common(Node* value, BasicType type, bool assert_null = false, - Node* *null_control = NULL, + Node* *null_control = nullptr, bool speculative = false); Node* null_check(Node* value, BasicType type = T_OBJECT) { - return null_check_common(value, type, false, NULL, !_gvn.type(value)->speculative_maybe_null()); + return null_check_common(value, type, false, nullptr, !_gvn.type(value)->speculative_maybe_null()); } Node* null_check_receiver() { assert(argument(0)->bottom_type()->isa_ptr(), "must be"); @@ -381,7 +381,7 @@ class GraphKit : public Phase { // Throw an uncommon trap if a given value is __not__ null. // Return the value cast to null, and be clever about dominating checks. Node* null_assert(Node* value, BasicType type = T_OBJECT) { - return null_check_common(value, type, true, NULL, _gvn.type(value)->speculative_always_null()); + return null_check_common(value, type, true, nullptr, _gvn.type(value)->speculative_always_null()); } // Check if value is null and abort if it is @@ -414,7 +414,7 @@ class GraphKit : public Phase { profile.morphism() == 1) { return profile.receiver(0); } - return NULL; + return nullptr; } // record type from profiling with the type system @@ -479,7 +479,7 @@ class GraphKit : public Phase { int n_size = type2size[n_type]; if (n_size == 1) return pop(); else if (n_size == 2) return pop_pair(); - else return NULL; + else return nullptr; } Node* control() const { return map_not_null()->control(); } @@ -549,7 +549,7 @@ class GraphKit : public Phase { bool require_atomic_access = false, bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) { // This version computes alias_index from an address type - assert(adr_type != NULL, "use other make_load factory"); + assert(adr_type != nullptr, "use other make_load factory"); return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data); @@ -577,7 +577,7 @@ class GraphKit : public Phase { bool unsafe = false, int barrier_data = 0) { // This version computes alias_index from an address type - assert(adr_type != NULL, "use other store_to_memory factory"); + assert(adr_type != nullptr, "use other store_to_memory factory"); return store_to_memory(ctl, adr, val, bt, C->get_alias_index(adr_type), mo, require_atomic_access, @@ -660,9 +660,9 @@ class GraphKit : public Phase { // Return addressing for an array element. Node* array_element_address(Node* ary, Node* idx, BasicType elembt, // Optional constraint on the array size: - const TypeInt* sizetype = NULL, + const TypeInt* sizetype = nullptr, // Optional control dependency (for example, on range check) - Node* ctrl = NULL); + Node* ctrl = nullptr); // Return a load of array element at idx. Node* load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl); @@ -717,12 +717,12 @@ class GraphKit : public Phase { // Similar to set_edges_for_java_call, but simplified for runtime calls. void set_predefined_output_for_runtime_call(Node* call) { - set_predefined_output_for_runtime_call(call, NULL, NULL); + set_predefined_output_for_runtime_call(call, nullptr, nullptr); } void set_predefined_output_for_runtime_call(Node* call, Node* keep_mem, const TypePtr* hook_mem); - Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = NULL); + Node* set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem = nullptr); // Replace the call with the current state of the kit. Requires // that the call was generated with separate io_projs so that @@ -738,13 +738,13 @@ class GraphKit : public Phase { // The optional reason is debug information written to the compile log. // Optional must_throw is the same as with add_safepoint_edges. Node* uncommon_trap(int trap_request, - ciKlass* klass = NULL, const char* reason_string = NULL, + ciKlass* klass = nullptr, const char* reason_string = nullptr, bool must_throw = false, bool keep_exact_action = false); // Shorthand, to avoid saying "Deoptimization::" so many times. Node* uncommon_trap(Deoptimization::DeoptReason reason, Deoptimization::DeoptAction action, - ciKlass* klass = NULL, const char* reason_string = NULL, + ciKlass* klass = nullptr, const char* reason_string = nullptr, bool must_throw = false, bool keep_exact_action = false) { return uncommon_trap(Deoptimization::make_trap_request(reason, action), klass, reason_string, must_throw, keep_exact_action); @@ -753,7 +753,7 @@ class GraphKit : public Phase { // Bail out to the interpreter and keep exact action (avoid switching to Action_none). Node* uncommon_trap_exact(Deoptimization::DeoptReason reason, Deoptimization::DeoptAction action, - ciKlass* klass = NULL, const char* reason_string = NULL, + ciKlass* klass = nullptr, const char* reason_string = nullptr, bool must_throw = false) { return uncommon_trap(Deoptimization::make_trap_request(reason, action), klass, reason_string, must_throw, /*keep_exact_action=*/true); @@ -800,11 +800,11 @@ class GraphKit : public Phase { Node* make_runtime_call(int flags, const TypeFunc* call_type, address call_addr, const char* call_name, - const TypePtr* adr_type, // NULL if no memory effects - Node* parm0 = NULL, Node* parm1 = NULL, - Node* parm2 = NULL, Node* parm3 = NULL, - Node* parm4 = NULL, Node* parm5 = NULL, - Node* parm6 = NULL, Node* parm7 = NULL); + const TypePtr* adr_type, // null if no memory effects + Node* parm0 = nullptr, Node* parm1 = nullptr, + Node* parm2 = nullptr, Node* parm3 = nullptr, + Node* parm4 = nullptr, Node* parm5 = nullptr, + Node* parm6 = nullptr, Node* parm7 = nullptr); Node* sign_extend_byte(Node* in); Node* sign_extend_short(Node* in); @@ -826,8 +826,8 @@ class GraphKit : public Phase { // Helper functions to build synchronizations int next_monitor(); - Node* insert_mem_bar(int opcode, Node* precedent = NULL); - Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL); + Node* insert_mem_bar(int opcode, Node* precedent = nullptr); + Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = nullptr); // Optional 'precedent' is appended as an extra edge, to force ordering. FastLockNode* shared_lock(Node* obj); void shared_unlock(Node* box, Node* obj); @@ -842,7 +842,7 @@ class GraphKit : public Phase { // Generate a check-cast idiom. Used by both the check-cast bytecode // and the array-store bytecode Node* gen_checkcast( Node *subobj, Node* superkls, - Node* *failure_control = NULL ); + Node* *failure_control = nullptr ); Node* gen_subtype_check(Node* obj, Node* superklass); @@ -862,11 +862,11 @@ class GraphKit : public Phase { bool deoptimize_on_exception=false); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, - Node* slow_test = NULL, - Node* *return_size_val = NULL, + Node* slow_test = nullptr, + Node* *return_size_val = nullptr, bool deoptimize_on_exception = false); Node* new_array(Node* klass_node, Node* count_val, int nargs, - Node* *return_size_val = NULL, + Node* *return_size_val = nullptr, bool deoptimize_on_exception = false); // java.lang.String helpers diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp index d15e1f0034b..45eaecda441 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.cpp +++ b/src/hotspot/share/opto/idealGraphPrinter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,10 +79,10 @@ int IdealGraphPrinter::_file_count = 0; IdealGraphPrinter *IdealGraphPrinter::printer() { JavaThread *thread = JavaThread::current(); - if (!thread->is_Compiler_thread()) return NULL; + if (!thread->is_Compiler_thread()) return nullptr; CompilerThread *compiler_thread = (CompilerThread *)thread; - if (compiler_thread->ideal_graph_printer() == NULL) { + if (compiler_thread->ideal_graph_printer() == nullptr) { IdealGraphPrinter *printer = new IdealGraphPrinter(); compiler_thread->set_ideal_graph_printer(printer); } @@ -98,15 +98,15 @@ void IdealGraphPrinter::clean_up() { if (printer) { delete printer; } - c->set_ideal_graph_printer(NULL); + c->set_ideal_graph_printer(nullptr); } } IdealGraphPrinter* debug_file_printer = Compile::debug_file_printer(); - if (debug_file_printer != NULL) { + if (debug_file_printer != nullptr) { delete debug_file_printer; } IdealGraphPrinter* debug_network_printer = Compile::debug_network_printer(); - if (debug_network_printer != NULL) { + if (debug_network_printer != nullptr) { delete debug_network_printer; } } @@ -116,11 +116,11 @@ IdealGraphPrinter::IdealGraphPrinter() { init(PrintIdealGraphFile, true, false); } -// Either print methods to the specified file 'file_name' or if NULL over the network to the IGV. If 'append' +// Either print methods to the specified file 'file_name' or if null over the network to the IGV. If 'append' // is set, the next phase is directly appended to the specified file 'file_name'. This is useful when doing // replay compilation with a tool like rr that cannot alter the current program state but only the file. IdealGraphPrinter::IdealGraphPrinter(Compile* compile, const char* file_name, bool append) { - assert(!append || (append && file_name != NULL), "can only use append flag when printing to file"); + assert(!append || (append && file_name != nullptr), "can only use append flag when printing to file"); init(file_name, false, append); C = compile; if (append) { @@ -138,13 +138,13 @@ void IdealGraphPrinter::init(const char* file_name, bool use_multiple_files, boo // appear in the dump. _traverse_outs = true; _should_send_method = true; - _output = NULL; + _output = nullptr; buffer[0] = 0; _depth = 0; - _current_method = NULL; - _network_stream = NULL; + _current_method = nullptr; + _network_stream = nullptr; - if (file_name != NULL) { + if (file_name != nullptr) { init_file_stream(file_name, use_multiple_files, append); } else { init_network_stream(); @@ -165,20 +165,20 @@ IdealGraphPrinter::~IdealGraphPrinter() { if(_xml) { delete _xml; - _xml = NULL; + _xml = nullptr; } if (_network_stream) { delete _network_stream; if (_network_stream == _output) { - _output = NULL; + _output = nullptr; } - _network_stream = NULL; + _network_stream = nullptr; } if (_output) { delete _output; - _output = NULL; + _output = nullptr; } } @@ -257,7 +257,7 @@ void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree _xml->print_cr("]]>"); tail(BYTECODES_ELEMENT); - if (tree != NULL && tree->subtrees().length() > 0) { + if (tree != nullptr && tree->subtrees().length() > 0) { head(INLINE_ELEMENT); GrowableArray subtrees = tree->subtrees(); for (int i = 0; i < subtrees.length(); i++) { @@ -271,7 +271,7 @@ void IdealGraphPrinter::print_method(ciMethod *method, int bci, InlineTree *tree } void IdealGraphPrinter::print_inline_tree(InlineTree *tree) { - if (tree != NULL) { + if (tree != nullptr) { print_method(tree->method(), tree->caller_bci(), tree); } } @@ -281,7 +281,7 @@ void IdealGraphPrinter::print_inlining() { // Print inline tree if (_should_send_method) { InlineTree *inlineTree = C->ilt(); - if (inlineTree != NULL) { + if (inlineTree != nullptr) { print_inline_tree(inlineTree); } else { // print this method only @@ -334,7 +334,7 @@ void IdealGraphPrinter::begin_method() { // Has to be called whenever a method has finished compilation void IdealGraphPrinter::end_method() { tail(GROUP_ELEMENT); - _current_method = NULL; + _current_method = nullptr; _xml->flush(); } @@ -381,14 +381,14 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { print_prop("debug_idx", node->_debug_idx); #endif - if (C->cfg() != NULL) { + if (C->cfg() != nullptr) { Block* block = C->cfg()->get_block_for_node(node); - if (block == NULL) { + if (block == nullptr) { print_prop("block", C->cfg()->get_block(0)->_pre_order); } else { print_prop("block", block->_pre_order); if (node == block->head()) { - if (block->_idom != NULL) { + if (block->_idom != nullptr) { print_prop("idom", block->_idom->_pre_order); } print_prop("dom_depth", block->_dom_depth); @@ -427,7 +427,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { } Node_Notes* nn = C->node_notes_at(node->_idx); - if (nn != NULL && !nn->is_clear() && nn->jvms() != NULL) { + if (nn != nullptr && !nn->is_clear() && nn->jvms() != nullptr) { buffer[0] = 0; stringStream ss(buffer, sizeof(buffer) - 1); nn->jvms()->dump_spec(&ss); @@ -466,7 +466,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { print_prop("is_reduction", "true"); } - if (C->matcher() != NULL) { + if (C->matcher() != nullptr) { if (C->matcher()->is_shared(node)) { print_prop("is_shared", "true"); } else { @@ -478,7 +478,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { print_prop("is_dontcare", "false"); } Node* old = C->matcher()->find_old_node(node); - if (old != NULL) { + if (old != nullptr) { print_prop("old_node_idx", old->_idx); } } @@ -497,7 +497,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { stringStream s2(buffer, sizeof(buffer) - 1); node->dump_spec(&s2); - if (t != NULL && (t->isa_instptr() || t->isa_instklassptr())) { + if (t != nullptr && (t->isa_instptr() || t->isa_instklassptr())) { const TypeInstPtr *toop = t->isa_instptr(); const TypeInstKlassPtr *tkls = t->isa_instklassptr(); if (toop) { @@ -583,19 +583,19 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { } } - JVMState* caller = NULL; + JVMState* caller = nullptr; if (node->is_SafePoint()) { caller = node->as_SafePoint()->jvms(); } else { Node_Notes* notes = C->node_notes_at(node->_idx); - if (notes != NULL) { + if (notes != nullptr) { caller = notes->jvms(); } } - if (caller != NULL) { + if (caller != nullptr) { stringStream bciStream; - ciMethod* last = NULL; + ciMethod* last = nullptr; int last_bci; while(caller) { if (caller->has_method()) { @@ -606,13 +606,13 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { caller = caller->caller(); } print_prop("bci", bciStream.freeze()); - if (last != NULL && last->has_linenumber_table() && last_bci >= 0) { + if (last != nullptr && last->has_linenumber_table() && last_bci >= 0) { print_prop("line", last->line_number_from_bci(last_bci)); } } #ifdef ASSERT - if (node->debug_orig() != NULL) { + if (node->debug_orig() != nullptr) { stringStream dorigStream; node->dump_orig(&dorigStream, false); print_prop("debug_orig", dorigStream.freeze()); @@ -643,12 +643,12 @@ void IdealGraphPrinter::print_field(const Node* node) { stringStream ss(buffer, sizeof(buffer) - 1); ciField* field = get_field(node); uint depth = 0; - if (field == NULL) { + if (field == nullptr) { depth++; field = find_source_field_of_array_access(node, depth); } - if (field != NULL) { + if (field != nullptr) { // Either direct field access or array access field->print_name_on(&ss); for (uint i = 0; i < depth; i++) { @@ -665,34 +665,34 @@ void IdealGraphPrinter::print_field(const Node* node) { ciField* IdealGraphPrinter::get_field(const Node* node) { const TypePtr* adr_type = node->adr_type(); - Compile::AliasType* atp = NULL; + Compile::AliasType* atp = nullptr; if (C->have_alias_type(adr_type)) { atp = C->alias_type(adr_type); } - if (atp != NULL) { + if (atp != nullptr) { ciField* field = atp->field(); - if (field != NULL) { + if (field != nullptr) { // Found field associated with 'node'. return field; } } - return NULL; + return nullptr; } // Try to find the field that is associated with a memory node belonging to an array access. ciField* IdealGraphPrinter::find_source_field_of_array_access(const Node* node, uint& depth) { if (!node->is_Mem()) { // Not an array access - return NULL; + return nullptr; } do { - if (node->adr_type() != NULL && node->adr_type()->isa_aryptr()) { + if (node->adr_type() != nullptr && node->adr_type()->isa_aryptr()) { // Only process array accesses. Pattern match to find actual field source access. node = get_load_node(node); - if (node != NULL) { + if (node != nullptr) { ciField* field = get_field(node); - if (field != NULL) { + if (field != nullptr) { return field; } // Could be a multi-dimensional array. Repeat loop. @@ -704,16 +704,16 @@ ciField* IdealGraphPrinter::find_source_field_of_array_access(const Node* node, break; } while (depth < 256); // Cannot have more than 255 dimensions - return NULL; + return nullptr; } // Pattern match on the inputs of 'node' to find load node for the field access. Node* IdealGraphPrinter::get_load_node(const Node* node) { - Node* load = NULL; + Node* load = nullptr; Node* addr = node->as_Mem()->in(MemNode::Address); - if (addr != NULL && addr->is_AddP()) { + if (addr != nullptr && addr->is_AddP()) { Node* base = addr->as_AddP()->base_node(); - if (base != NULL) { + if (base != nullptr) { base = base->uncast(); if (base->is_Load()) { // Mem(AddP([ConstraintCast*](LoadP))) for non-compressed oops. @@ -773,7 +773,7 @@ void IdealGraphPrinter::print_method(const char *name, int level) { // Print current ideal graph void IdealGraphPrinter::print(const char *name, Node *node) { - if (!_current_method || !_should_send_method || node == NULL) return; + if (!_current_method || !_should_send_method || node == nullptr) return; // Warning, unsafe cast? _chaitin = (PhaseChaitin *)C->regalloc(); @@ -785,7 +785,7 @@ void IdealGraphPrinter::print(const char *name, Node *node) { VectorSet temp_set; head(NODES_ELEMENT); - if (C->cfg() != NULL) { + if (C->cfg() != nullptr) { // Compute the maximum estimated frequency in the current graph. _max_freq = 1.0e-6; for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { @@ -801,7 +801,7 @@ void IdealGraphPrinter::print(const char *name, Node *node) { head(EDGES_ELEMENT); walk_nodes(node, true, &temp_set); tail(EDGES_ELEMENT); - if (C->cfg() != NULL) { + if (C->cfg() != nullptr) { head(CONTROL_FLOW_ELEMENT); for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { Block* block = C->cfg()->get_block(i); @@ -866,7 +866,7 @@ void IdealGraphPrinter::init_network_stream() { tty->print_cr("Client available, but does not want to receive data!"); _network_stream->close(); delete _network_stream; - _network_stream = NULL; + _network_stream = nullptr; return; } _output = _network_stream; @@ -879,11 +879,11 @@ void IdealGraphPrinter::init_network_stream() { } void IdealGraphPrinter::update_compiled_method(ciMethod* current_method) { - assert(C != NULL, "must already be set"); + assert(C != nullptr, "must already be set"); if (current_method != _current_method) { // If a different method, end the old and begin with the new one. end_method(); - _current_method = NULL; + _current_method = nullptr; begin_method(); } } diff --git a/src/hotspot/share/opto/idealGraphPrinter.hpp b/src/hotspot/share/opto/idealGraphPrinter.hpp index 6f845d5b048..091560d9231 100644 --- a/src/hotspot/share/opto/idealGraphPrinter.hpp +++ b/src/hotspot/share/opto/idealGraphPrinter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,7 +122,7 @@ class IdealGraphPrinter : public CHeapObj { ~IdealGraphPrinter(); public: - IdealGraphPrinter(Compile* compile, const char* file_name = NULL, bool append = false); + IdealGraphPrinter(Compile* compile, const char* file_name = nullptr, bool append = false); static void clean_up(); static IdealGraphPrinter *printer(); diff --git a/src/hotspot/share/opto/idealKit.cpp b/src/hotspot/share/opto/idealKit.cpp index d22ffadea08..86f2821c4d6 100644 --- a/src/hotspot/share/opto/idealKit.cpp +++ b/src/hotspot/share/opto/idealKit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,9 +45,9 @@ IdealKit::IdealKit(GraphKit* gkit, bool delay_all_transforms, bool has_declarati _initial_i_o = gkit->i_o(); _delay_all_transforms = delay_all_transforms; _var_ct = 0; - _cvstate = NULL; + _cvstate = nullptr; // We can go memory state free or else we need the entire memory state - assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); + assert(_initial_memory == nullptr || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase"); int init_size = 5; _pending_cvstates = new (C->node_arena()) GrowableArray(C->node_arena(), init_size, 0, 0); @@ -73,11 +73,11 @@ void IdealKit::if_then(Node* left, BoolTest::mask relop, Node* right, float prob, float cnt, bool push_new_state) { assert((state() & (BlockS|LoopS|IfThenS|ElseS)), "bad state for new If"); Node* bol; - if (left->bottom_type()->isa_ptr() == NULL) { - if (left->bottom_type()->isa_int() != NULL) { + if (left->bottom_type()->isa_ptr() == nullptr) { + if (left->bottom_type()->isa_int() != nullptr) { bol = Bool(CmpI(left, right), relop); } else { - assert(left->bottom_type()->isa_long() != NULL, "what else?"); + assert(left->bottom_type()->isa_long() != nullptr, "what else?"); bol = Bool(CmpL(left, right), relop); } @@ -202,7 +202,7 @@ void IdealKit::end_loop() { // must be specified (which should be 1 less than // the number of precedessors.) Node* IdealKit::make_label(int goto_ct) { - assert(_cvstate != NULL, "must declare variables before labels"); + assert(_cvstate != nullptr, "must declare variables before labels"); Node* lab = new_cvstate(); int sz = 1 + goto_ct + 1 /* fall thru */; Node* reg = delay_transform(new RegionNode(sz)); @@ -228,7 +228,7 @@ void IdealKit::goto_(Node* lab, bool bind) { Node* reg = lab->in(TypeFunc::Control); // find next empty slot in region uint slot = 1; - while (slot < reg->req() && reg->in(slot) != NULL) slot++; + while (slot < reg->req() && reg->in(slot) != nullptr) slot++; assert(slot < reg->req(), "too many gotos"); // If this is last predecessor, then don't force phi creation if (slot == reg->req() - 1) bind = false; @@ -245,9 +245,9 @@ void IdealKit::goto_(Node* lab, bool bind) { // Get the current value of the var Node* m = _cvstate->in(i); // If the var went unused no need for a phi - if (m == NULL) { + if (m == nullptr) { continue; - } else if (l == NULL || m == l) { + } else if (l == nullptr || m == l) { // Only one unique value "m" is known to reach this label so a phi // is not yet necessary unless: // the label is being bound and all predecessors have not been seen, @@ -326,7 +326,7 @@ Node* IdealKit::copy_cvstate() { //-----------------------------clear----------------------------------- void IdealKit::clear(Node* m) { - for (uint i = 0; i < m->req(); i++) m->set_req(i, NULL); + for (uint i = 0; i < m->req(); i++) m->set_req(i, nullptr); } //-----------------------------IdealVariable---------------------------- @@ -356,7 +356,7 @@ Node* IdealKit::load(Node* ctl, LoadNode::ControlDependency control_dependency) { assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); - const TypePtr* adr_type = NULL; // debug-mode-only argument + const TypePtr* adr_type = nullptr; // debug-mode-only argument debug_only(adr_type = C->get_adr_type(adr_idx)); Node* mem = memory(adr_idx); Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access); @@ -368,7 +368,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt, MemNode::MemOrd mo, bool require_atomic_access, bool mismatched) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); - const TypePtr* adr_type = NULL; + const TypePtr* adr_type = nullptr; debug_only(adr_type = C->get_adr_type(adr_idx)); Node *mem = memory(adr_idx); Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access); @@ -387,7 +387,7 @@ Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store, int oo BasicType bt, int adr_idx) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); - const TypePtr* adr_type = NULL; + const TypePtr* adr_type = nullptr; debug_only(adr_type = C->get_adr_type(adr_idx)); Node *mem = memory(adr_idx); @@ -411,11 +411,11 @@ void IdealKit::do_memory_merge(Node* merging, Node* join) { // Get the region for the join state Node* join_region = join->in(TypeFunc::Control); - assert(join_region != NULL, "join region must exist"); - if (join->in(TypeFunc::I_O) == NULL ) { + assert(join_region != nullptr, "join region must exist"); + if (join->in(TypeFunc::I_O) == nullptr ) { join->set_req(TypeFunc::I_O, merging->in(TypeFunc::I_O)); } - if (join->in(TypeFunc::Memory) == NULL ) { + if (join->in(TypeFunc::Memory) == nullptr ) { join->set_req(TypeFunc::Memory, merging->in(TypeFunc::Memory)); return; } @@ -503,10 +503,10 @@ Node* IdealKit::make_leaf_call(const TypeFunc *slow_call_type, call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ ); call->init_req( TypeFunc::ReturnAdr, top() ); - if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); - if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); - if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); - if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); + if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != nullptr) call->init_req(TypeFunc::Parms+3, parm3); // Node *c = _gvn.transform(call); call = (CallNode *) _gvn.transform(call); @@ -524,7 +524,7 @@ Node* IdealKit::make_leaf_call(const TypeFunc *slow_call_type, assert(C->alias_type(call->adr_type()) == C->alias_type(adr_type), "call node must be constructed correctly"); - Node* res = NULL; + Node* res = nullptr; if (slow_call_type->range()->cnt() > TypeFunc::Parms) { assert(slow_call_type->range()->cnt() == TypeFunc::Parms+1, "only one return value"); res = transform(new ProjNode(call, TypeFunc::Parms)); @@ -555,10 +555,10 @@ void IdealKit::make_leaf_call_no_fp(const TypeFunc *slow_call_type, call->init_req( TypeFunc::FramePtr, top() /* frameptr() */ ); call->init_req( TypeFunc::ReturnAdr, top() ); - if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); - if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); - if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); - if (parm3 != NULL) call->init_req(TypeFunc::Parms+3, parm3); + if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != nullptr) call->init_req(TypeFunc::Parms+3, parm3); // Node *c = _gvn.transform(call); call = (CallNode *) _gvn.transform(call); diff --git a/src/hotspot/share/opto/idealKit.hpp b/src/hotspot/share/opto/idealKit.hpp index 55d63b8720f..20acec47211 100644 --- a/src/hotspot/share/opto/idealKit.hpp +++ b/src/hotspot/share/opto/idealKit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -159,7 +159,7 @@ class IdealKit: public StackObj { void set_i_o(Node* c) { _cvstate->set_req(TypeFunc::I_O, c); } void set(IdealVariable& v, Node* rhs) { _cvstate->set_req(first_var + v.id(), rhs); } Node* value(IdealVariable& v) { return _cvstate->in(first_var + v.id()); } - void dead(IdealVariable& v) { set(v, (Node*)NULL); } + void dead(IdealVariable& v) { set(v, (Node*)nullptr); } void if_then(Node* left, BoolTest::mask relop, Node* right, float prob = PROB_FAIR, float cnt = COUNT_UNKNOWN, bool push_new_state = true); @@ -248,9 +248,9 @@ class IdealKit: public StackObj { address slow_call, const char *leaf_name, Node* parm0, - Node* parm1 = NULL, - Node* parm2 = NULL, - Node* parm3 = NULL); + Node* parm1 = nullptr, + Node* parm2 = nullptr, + Node* parm3 = nullptr); void make_leaf_call_no_fp(const TypeFunc *slow_call_type, address slow_call, diff --git a/src/hotspot/share/opto/ifg.cpp b/src/hotspot/share/opto/ifg.cpp index cee302f6ef1..ed728342fd0 100644 --- a/src/hotspot/share/opto/ifg.cpp +++ b/src/hotspot/share/opto/ifg.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -819,7 +819,7 @@ void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, P } void PhaseChaitin::print_pressure_info(Pressure& pressure, const char *str) { - if (str != NULL) { + if (str != nullptr) { tty->print_cr("# *** %s ***", str); } tty->print_cr("# start pressure is = %d", pressure.start_pressure()); diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp index 4f258293cf2..43574d9ff0d 100644 --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,28 +79,28 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // Look for a compare of a constant and a merged value Node *i1 = iff->in(1); - if( !i1->is_Bool() ) return NULL; + if( !i1->is_Bool() ) return nullptr; BoolNode *b = i1->as_Bool(); Node *cmp = b->in(1); - if( !cmp->is_Cmp() ) return NULL; + if( !cmp->is_Cmp() ) return nullptr; i1 = cmp->in(1); - if( i1 == NULL || !i1->is_Phi() ) return NULL; + if( i1 == nullptr || !i1->is_Phi() ) return nullptr; PhiNode *phi = i1->as_Phi(); Node *con2 = cmp->in(2); - if( !con2->is_Con() ) return NULL; + if( !con2->is_Con() ) return nullptr; // See that the merge point contains some constants - Node *con1=NULL; + Node *con1=nullptr; uint i4; for( i4 = 1; i4 < phi->req(); i4++ ) { con1 = phi->in(i4); - if( !con1 ) return NULL; // Do not optimize partially collapsed merges + if( !con1 ) return nullptr; // Do not optimize partially collapsed merges if( con1->is_Con() ) break; // Found a constant // Also allow null-vs-not-null checks const TypePtr *tp = igvn->type(con1)->isa_ptr(); if( tp && tp->_ptr == TypePtr::NotNull ) break; } - if( i4 >= phi->req() ) return NULL; // Found no constants + if( i4 >= phi->req() ) return nullptr; // Found no constants igvn->C->set_has_split_ifs(true); // Has chance for split-if @@ -111,18 +111,18 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { const Type *t = cmp2->Value(igvn); // This compare is dead, so whack it! igvn->remove_dead_node(cmp2); - if( !t->singleton() ) return NULL; + if( !t->singleton() ) return nullptr; // No intervening control, like a simple Call Node* r = iff->in(0); if (!r->is_Region() || r->is_Loop() || phi->region() != r || r->as_Region()->is_copy()) { - return NULL; + return nullptr; } // No other users of the cmp/bool if (b->outcnt() != 1 || cmp->outcnt() != 1) { //tty->print_cr("many users of cmp/bool"); - return NULL; + return nullptr; } // Make sure we can determine where all the uses of merged values go @@ -139,13 +139,13 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { tty->print_cr("Region has odd use"); u->dump(2); }*/ - return NULL; + return nullptr; } if( u != phi ) { // CNC - do not allow any other merged value //tty->print_cr("Merging another value"); //u->dump(2); - return NULL; + return nullptr; } // Make sure we can account for all Phi uses for (DUIterator_Fast kmax, k = u->fast_outs(kmax); k < kmax; k++) { @@ -157,8 +157,8 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // If the cast is derived from data flow edges, it may not have a control edge. // If so, it should be safe to split. But follow-up code can not deal with // this (l. 359). So skip. - if (v->in(0) == NULL) { - return NULL; + if (v->in(0) == nullptr) { + return nullptr; } if (v->in(0)->in(0) == iff) { continue; // CastPP/II of the IfNode is OK @@ -167,9 +167,9 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // Disabled following code because I cannot tell if exactly one // path dominates without a real dominator check. CNC 9/9/1999 //uint vop = v->Opcode(); - //if( vop == Op_Phi ) { // Phi from another merge point might be OK - // Node *r = v->in(0); // Get controlling point - // if( !r ) return NULL; // Degraded to a copy + //if( vop == Op_Phi ) { // Phi from another merge point might be OK + // Node *r = v->in(0); // Get controlling point + // if( !r ) return nullptr; // Degraded to a copy // // Find exactly one path in (either True or False doms, but not IFF) // int cnt = 0; // for( uint i = 1; i < r->req(); i++ ) @@ -190,7 +190,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { } */ } - return NULL; + return nullptr; /* CNC - Cut out all the fancy acceptance tests // Can we clone this use when doing the transformation? @@ -198,14 +198,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { if( !v->in(0) && v != cmp ) { tty->print_cr("Phi has free-floating use"); v->dump(2); - return NULL; + return nullptr; } for( uint l = 1; l < v->req(); l++ ) { if( (!v->in(l)->is_Phi() || v->in(l)->in(0) != r) && !v->in(l)->is_Con() ) { tty->print_cr("Phi has use"); v->dump(2); - return NULL; + return nullptr; } // End of if Phi-use input is neither Phi nor Constant } // End of for all inputs to Phi-use */ @@ -214,7 +214,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // Only do this if the IF node is in a sane state if (iff->outcnt() != 2) - return NULL; + return nullptr; // Got a hit! Do the Mondo Hack! // @@ -243,17 +243,17 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { req_c++; } Node* proj = PhaseIdealLoop::find_predicate(r->in(ii)); - if (proj != NULL) { + if (proj != nullptr) { // Bail out if splitting through a region with a predicate input (could // also be a loop header before loop opts creates a LoopNode for it). - return NULL; + return nullptr; } } // If all the defs of the phi are the same constant, we already have the desired end state. // Skip the split that would create empty phi and region nodes. if ((r->req() - req_c) == 1) { - return NULL; + return nullptr; } // At this point we know that we can apply the split if optimization. If the region is still on the worklist, @@ -261,7 +261,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // This also avoids the creation of dead data loops when rewiring data nodes below when a region is dying. if (igvn->_worklist.member(r)) { igvn->_worklist.push(iff); // retry split if later again - return NULL; + return nullptr; } Node *region_c = new RegionNode(req_c + 1); @@ -336,17 +336,17 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { igvn->register_new_node_with_optimizer( region_f ); igvn->hash_delete(cmp);// Remove soon-to-be-dead node from hash table. - cmp->set_req(1,NULL); // Whack the inputs to cmp because it will be dead - cmp->set_req(2,NULL); + cmp->set_req(1,nullptr); // Whack the inputs to cmp because it will be dead + cmp->set_req(2,nullptr); // Check for all uses of the Phi and give them a new home. // The 'cmp' got cloned, but CastPP/IIs need to be moved. - Node *phi_s = NULL; // do not construct unless needed - Node *phi_f = NULL; // do not construct unless needed + Node *phi_s = nullptr; // do not construct unless needed + Node *phi_f = nullptr; // do not construct unless needed for (DUIterator_Last i2min, i2 = phi->last_outs(i2min); i2 >= i2min; --i2) { Node* v = phi->last_out(i2);// User of the phi igvn->rehash_node_delayed(v); // Have to fixup other Phi users uint vop = v->Opcode(); - Node *proj = NULL; + Node *proj = nullptr; if( vop == Op_Phi ) { // Remote merge point Node *r = v->in(0); for (uint i3 = 1; i3 < r->req(); i3++) @@ -359,11 +359,11 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { } else { assert( 0, "do not know how to handle this guy" ); } - guarantee(proj != NULL, "sanity"); + guarantee(proj != nullptr, "sanity"); Node *proj_path_data, *proj_path_ctrl; if( proj->Opcode() == Op_IfTrue ) { - if( phi_s == NULL ) { + if( phi_s == nullptr ) { // Only construct phi_s if needed, otherwise provides // interfering use. phi_s = PhiNode::make_blank(region_s,phi); @@ -375,7 +375,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { proj_path_data = phi_s; proj_path_ctrl = region_s; } else { - if( phi_f == NULL ) { + if( phi_f == nullptr ) { // Only construct phi_f if needed, otherwise provides // interfering use. phi_f = PhiNode::make_blank(region_f,phi); @@ -431,7 +431,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { for (DUIterator_Last lmin, l = r->last_outs(lmin); l >= lmin;) { Node* u = r->last_out(l); if( u == r ) { - r->set_req(0, NULL); + r->set_req(0, nullptr); } else { assert(u->outcnt() == 0, "only dead users"); igvn->remove_dead_node(u); @@ -452,14 +452,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) { // for the failed path ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) { if (outcnt() != 2) { - return NULL; + return nullptr; } Node* b = in(1); - if (b == NULL || !b->is_Bool()) return NULL; + if (b == nullptr || !b->is_Bool()) return nullptr; BoolNode* bn = b->as_Bool(); Node* cmp = bn->in(1); - if (cmp == NULL) return NULL; - if (cmp->Opcode() != Op_CmpU) return NULL; + if (cmp == nullptr) return nullptr; + if (cmp->Opcode() != Op_CmpU) return nullptr; l = cmp->in(1); r = cmp->in(2); @@ -469,10 +469,10 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) { r = cmp->in(1); flip_test = 2; } else if (bn->_test._test != BoolTest::lt) { - return NULL; + return nullptr; } - if (l->is_top()) return NULL; // Top input means dead test - if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return NULL; + if (l->is_top()) return nullptr; // Top input means dead test + if (r->Opcode() != Op_LoadRange && !is_RangeCheck()) return nullptr; // We have recognized one of these forms: // Flip 1: If (Bool[<] CmpU(l, LoadRange)) ... @@ -485,15 +485,15 @@ ProjNode* IfNode::range_check_trap_proj(int& flip_test, Node*& l, Node*& r) { //------------------------------is_range_check--------------------------------- // Return 0 if not a range check. Return 1 if a range check and set index and -// offset. Return 2 if we had to negate the test. Index is NULL if the check +// offset. Return 2 if we had to negate the test. Index is null if the check // is versus a constant. int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) { int flip_test = 0; - Node* l = NULL; - Node* r = NULL; + Node* l = nullptr; + Node* r = nullptr; ProjNode* iftrap = range_check_trap_proj(flip_test, l, r); - if (iftrap == NULL) { + if (iftrap == nullptr) { return 0; } @@ -501,7 +501,7 @@ int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) { // along the OOB path. Otherwise, it's possible that the user wrote // something which optimized to look like a range check but behaves // in some other way. - if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == NULL) { + if (iftrap->is_uncommon_trap_proj(Deoptimization::Reason_range_check) == nullptr) { return 0; } @@ -518,7 +518,7 @@ int RangeCheckNode::is_range_check(Node* &range, Node* &index, jint &offset) { } } else if ((off = l->find_int_con(-1)) >= 0) { // constant offset with no variable index - ind = NULL; + ind = nullptr; } else { // variable index with no constant offset (or dead negative index) off = 0; @@ -563,7 +563,7 @@ static void adjust_check(Node* proj, Node* range, Node* index, } //------------------------------up_one_dom------------------------------------- -// Walk up the dominator tree one step. Return NULL at root or true +// Walk up the dominator tree one step. Return null at root or true // complex merges. Skips through small diamonds. Node* IfNode::up_one_dom(Node *curr, bool linear_only) { Node *dom = curr->in(0); @@ -576,10 +576,10 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) { // Use linear_only if we are still parsing, since we cannot // trust the regions to be fully filled in. if (linear_only) - return NULL; + return nullptr; if( dom->is_Root() ) - return NULL; + return nullptr; // Else hit a Region. Check for a loop header if( dom->is_Loop() ) @@ -598,12 +598,12 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) { if( din4->is_Call() && // Handle a slow-path call on either arm (din4 = din4->in(0)) ) din4 = din4->in(0); - if (din3 != NULL && din3 == din4 && din3->is_If()) // Regions not degraded to a copy + if (din3 != nullptr && din3 == din4 && din3->is_If()) // Regions not degraded to a copy return din3; // Skip around diamonds } // Give up the search at true merges - return NULL; // Dead loop? Or hit root? + return nullptr; // Dead loop? Or hit root? } @@ -620,7 +620,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj const CmpNode* cmp = bol->in(1)->as_Cmp(); if (cmp->in(1) == val) { const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int(); - if (cmp2_t != NULL) { + if (cmp2_t != nullptr) { jint lo = cmp2_t->_lo; jint hi = cmp2_t->_hi; BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate(); @@ -628,7 +628,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj case BoolTest::ne: { // If val is compared to its lower or upper bound, we can narrow the type const TypeInt* val_t = gvn->type(val)->isa_int(); - if (val_t != NULL && !val_t->singleton() && cmp2_t->is_con()) { + if (val_t != nullptr && !val_t->singleton() && cmp2_t->is_con()) { if (val_t->_lo == lo) { return TypeInt::make(val_t->_lo + 1, val_t->_hi, val_t->_widen); } else if (val_t->_hi == hi) { @@ -636,7 +636,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj } } // Can't refine type - return NULL; + return nullptr; } case BoolTest::eq: return cmp2_t; @@ -669,7 +669,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj } } } - return NULL; + return nullptr; } //------------------------------fold_compares---------------------------- @@ -712,11 +712,11 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node* val, Node* if_proj // Is the comparison for this If suitable for folding? bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) { - return in(1) != NULL && + return in(1) != nullptr && in(1)->is_Bool() && - in(1)->in(1) != NULL && + in(1)->in(1) != nullptr && in(1)->in(1)->Opcode() == Op_CmpI && - in(1)->in(1)->in(2) != NULL && + in(1)->in(1)->in(2) != nullptr && in(1)->in(1)->in(2) != igvn->C->top() && (in(1)->as_Bool()->_test.is_less() || in(1)->as_Bool()->_test.is_greater() || @@ -725,14 +725,14 @@ bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) { // Is a dominating control suitable for folding with this if? bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) { - return ctrl != NULL && + return ctrl != nullptr && ctrl->is_Proj() && - ctrl->in(0) != NULL && + ctrl->in(0) != nullptr && ctrl->in(0)->Opcode() == Op_If && ctrl->in(0)->outcnt() == 2 && ctrl->in(0)->as_If()->cmpi_folds(igvn, true) && // Must compare same value - ctrl->in(0)->in(1)->in(1)->in(1) != NULL && + ctrl->in(0)->in(1)->in(1)->in(1) != nullptr && ctrl->in(0)->in(1)->in(1)->in(1) != igvn->C->top() && ctrl->in(0)->in(1)->in(1)->in(1) == in(1)->in(1)->in(1); } @@ -741,23 +741,23 @@ bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) { bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) { ProjNode* otherproj = proj->other_if_proj(); Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null(); - RegionNode* region = (otherproj_ctrl_use != NULL && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : NULL; - success = NULL; - fail = NULL; + RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr; + success = nullptr; + fail = nullptr; - if (otherproj->outcnt() == 1 && region != NULL && !region->has_phi()) { + if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) { for (int i = 0; i < 2; i++) { ProjNode* proj = proj_out(i); - if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) { + if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) { success = proj; - } else if (fail == NULL) { + } else if (fail == nullptr) { fail = proj; } else { - success = fail = NULL; + success = fail = nullptr; } } } - return success != NULL && fail != NULL; + return success != nullptr && fail != nullptr; } bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) { @@ -772,11 +772,11 @@ bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* u // that the call stacks are equal for both JVMStates. JVMState* dom_caller = dom_unc->jvms()->caller(); JVMState* caller = unc->jvms()->caller(); - if ((dom_caller == NULL) != (caller == NULL)) { + if ((dom_caller == nullptr) != (caller == nullptr)) { // The current method must either be inlined into both dom_caller and // caller or must not be inlined at all (top method). Bail out otherwise. return false; - } else if (dom_caller != NULL && !dom_caller->same_calls_as(caller)) { + } else if (dom_caller != nullptr && !dom_caller->same_calls_as(caller)) { return false; } // Check that the bci of the dominating uncommon trap dominates the bci @@ -796,11 +796,11 @@ bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* u ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const { for (int i = 0; i < 2; i++) { call = proj_out(i)->is_uncommon_trap_proj(Deoptimization::Reason_none); - if (call != NULL) { + if (call != nullptr) { return proj_out(i); } } - return NULL; + return nullptr; } // Do this If and the dominating If both branch out to an uncommon trap @@ -808,22 +808,22 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod ProjNode* otherproj = proj->other_if_proj(); CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(Deoptimization::Reason_none); - if (otherproj->outcnt() == 1 && dom_unc != NULL) { + if (otherproj->outcnt() == 1 && dom_unc != nullptr) { // We need to re-execute the folded Ifs after deoptimization from the merged traps if (!dom_unc->jvms()->should_reexecute()) { return false; } - CallStaticJavaNode* unc = NULL; + CallStaticJavaNode* unc = nullptr; ProjNode* unc_proj = uncommon_trap_proj(unc); - if (unc_proj != NULL && unc_proj->outcnt() == 1) { + if (unc_proj != nullptr && unc_proj->outcnt() == 1) { if (dom_unc == unc) { // Allow the uncommon trap to be shared through a region RegionNode* r = unc->in(0)->as_Region(); if (r->outcnt() != 2 || r->req() != 3 || r->find_edge(otherproj) == -1 || r->find_edge(unc_proj) == -1) { return false; } - assert(r->has_phi() == NULL, "simple region shouldn't have a phi"); + assert(r->has_phi() == nullptr, "simple region shouldn't have a phi"); } else if (dom_unc->in(0) != otherproj || unc->in(0) != unc_proj) { return false; } @@ -893,8 +893,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f // Figure out which of the two tests sets the upper bound and which // sets the lower bound if any. - Node* adjusted_lim = NULL; - if (lo_type != NULL && hi_type != NULL && hi_type->_lo > lo_type->_hi && + Node* adjusted_lim = nullptr; + if (lo_type != nullptr && hi_type != nullptr && hi_type->_lo > lo_type->_hi && hi_type->_hi == max_jint && lo_type->_lo == min_jint && lo_test != BoolTest::ne) { assert((dom_bool->_test.is_less() && !proj->_con) || (dom_bool->_test.is_greater() && proj->_con), "incorrect test"); @@ -939,7 +939,7 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f } // this test was canonicalized assert(this_bool->_test.is_less() && fail->_con, "incorrect test"); - } else if (lo_type != NULL && hi_type != NULL && lo_type->_lo > hi_type->_hi && + } else if (lo_type != nullptr && hi_type != nullptr && lo_type->_lo > hi_type->_hi && lo_type->_hi == max_jint && hi_type->_lo == min_jint && lo_test != BoolTest::ne) { // this_bool = < @@ -999,9 +999,9 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f assert(this_bool->_test.is_less() && !fail->_con, "incorrect test"); } else { const TypeInt* failtype = filtered_int_type(igvn, n, proj); - if (failtype != NULL) { + if (failtype != nullptr) { const TypeInt* type2 = filtered_int_type(igvn, n, fail); - if (type2 != NULL) { + if (type2 != nullptr) { failtype = failtype->join(type2)->is_int(); if (failtype->_lo > failtype->_hi) { // previous if determines the result of this if so @@ -1011,8 +1011,8 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f } } } - lo = NULL; - hi = NULL; + lo = nullptr; + hi = nullptr; } if (lo && hi) { @@ -1020,7 +1020,7 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f hook->init_req(0, lo); // Add a use to lo to prevent him from dying // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo)) Node* adjusted_val = igvn->transform(new SubINode(n, lo)); - if (adjusted_lim == NULL) { + if (adjusted_lim == nullptr) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); } hook->destruct(igvn); @@ -1094,10 +1094,10 @@ Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); int flip_test = 0; - Node* l = NULL; - Node* r = NULL; + Node* l = nullptr; + Node* r = nullptr; - if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != NULL) { + if (success->in(0)->as_If()->range_check_trap_proj(flip_test, l, r) != nullptr) { // If this looks like a range check, change the trap to // Reason_range_check so the compiler recognizes it as a range // check and applies the corresponding optimizations @@ -1152,7 +1152,7 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV } } else if (use->is_Mem()) { Node* ctrl = use->in(0); - for (int i = 0; i < 10 && ctrl != NULL && ctrl != fail; i++) { + for (int i = 0; i < 10 && ctrl != nullptr && ctrl != fail; i++) { ctrl = up_one_dom(ctrl); } if (ctrl == fail) { @@ -1182,7 +1182,7 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV } } } - } else if (use->in(0) == NULL && (igvn->type(use)->isa_long() || + } else if (use->in(0) == nullptr && (igvn->type(use)->isa_long() || igvn->type(use)->isa_ptr())) { stack.set_index(i+1); stack.push(use, 0); @@ -1197,16 +1197,16 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV } bool IfNode::is_cmp_with_loadrange(ProjNode* proj) { - if (in(1) != NULL && - in(1)->in(1) != NULL && - in(1)->in(1)->in(2) != NULL) { + if (in(1) != nullptr && + in(1)->in(1) != nullptr && + in(1)->in(1)->in(2) != nullptr) { Node* other = in(1)->in(1)->in(2); if (other->Opcode() == Op_LoadRange && - ((other->in(0) != NULL && other->in(0) == proj) || - (other->in(0) == NULL && - other->in(2) != NULL && + ((other->in(0) != nullptr && other->in(0) == proj) || + (other->in(0) == nullptr && + other->in(2) != nullptr && other->in(2)->is_AddP() && - other->in(2)->in(1) != NULL && + other->in(2)->in(1) != nullptr && other->in(2)->in(1)->Opcode() == Op_CastPP && other->in(2)->in(1)->in(0) == proj))) { return true; @@ -1217,12 +1217,12 @@ bool IfNode::is_cmp_with_loadrange(ProjNode* proj) { bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) { Node* other = in(1)->in(1)->in(2); - if (other->in(MemNode::Address) != NULL && - proj->in(0)->in(1) != NULL && + if (other->in(MemNode::Address) != nullptr && + proj->in(0)->in(1) != nullptr && proj->in(0)->in(1)->is_Bool() && - proj->in(0)->in(1)->in(1) != NULL && + proj->in(0)->in(1)->in(1) != nullptr && proj->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && - proj->in(0)->in(1)->in(1)->in(2) != NULL && + proj->in(0)->in(1)->in(1)->in(2) != nullptr && proj->in(0)->in(1)->in(1)->in(1) == other->in(MemNode::Address)->in(AddPNode::Address)->uncast() && igvn->type(proj->in(0)->in(1)->in(1)->in(2)) == TypePtr::NULL_PTR) { return true; @@ -1233,17 +1233,17 @@ bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) { // Check that the If that is in between the 2 integer comparisons has // no side effect bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) { - if (proj == NULL) { + if (proj == nullptr) { return false; } CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - if (unc != NULL && proj->outcnt() <= 2) { + if (unc != nullptr && proj->outcnt() <= 2) { if (proj->outcnt() == 1 || // Allow simple null check from LoadRange (is_cmp_with_loadrange(proj) && is_null_check(proj, igvn))) { CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); CallStaticJavaNode* dom_unc = proj->in(0)->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - assert(dom_unc != NULL, "is_uncommon_trap_if_pattern returned NULL"); + assert(dom_unc != nullptr, "is_uncommon_trap_if_pattern returned null"); // reroute_side_effect_free_unc changes the state of this // uncommon trap to restart execution at the previous @@ -1298,15 +1298,15 @@ void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, Ph } Node* IfNode::fold_compares(PhaseIterGVN* igvn) { - if (Opcode() != Op_If) return NULL; + if (Opcode() != Op_If) return nullptr; if (cmpi_folds(igvn)) { Node* ctrl = in(0); if (is_ctrl_folds(ctrl, igvn) && ctrl->outcnt() == 1) { // A integer comparison immediately dominated by another integer // comparison - ProjNode* success = NULL; - ProjNode* fail = NULL; + ProjNode* success = nullptr; + ProjNode* fail = nullptr; ProjNode* dom_cmp = ctrl->as_Proj(); if (has_shared_region(dom_cmp, success, fail) && // Next call modifies graph so must be last @@ -1318,11 +1318,11 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) { fold_compares_helper(dom_cmp, success, fail, igvn)) { return merge_uncommon_traps(dom_cmp, success, fail, igvn); } - return NULL; - } else if (ctrl->in(0) != NULL && - ctrl->in(0)->in(0) != NULL) { - ProjNode* success = NULL; - ProjNode* fail = NULL; + return nullptr; + } else if (ctrl->in(0) != nullptr && + ctrl->in(0)->in(0) != nullptr) { + ProjNode* success = nullptr; + ProjNode* fail = nullptr; Node* dom = ctrl->in(0)->in(0); ProjNode* dom_cmp = dom->isa_Proj(); ProjNode* other_cmp = ctrl->isa_Proj(); @@ -1339,7 +1339,7 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) { } } } - return NULL; + return nullptr; } //------------------------------remove_useless_bool---------------------------- @@ -1348,33 +1348,33 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) { // Replace with if( x < y ) { ... } static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) { Node *i1 = iff->in(1); - if( !i1->is_Bool() ) return NULL; + if( !i1->is_Bool() ) return nullptr; BoolNode *bol = i1->as_Bool(); Node *cmp = bol->in(1); - if( cmp->Opcode() != Op_CmpI ) return NULL; + if( cmp->Opcode() != Op_CmpI ) return nullptr; // Must be comparing against a bool const Type *cmp2_t = phase->type( cmp->in(2) ); if( cmp2_t != TypeInt::ZERO && cmp2_t != TypeInt::ONE ) - return NULL; + return nullptr; // Find a prior merge point merging the boolean i1 = cmp->in(1); - if( !i1->is_Phi() ) return NULL; + if( !i1->is_Phi() ) return nullptr; PhiNode *phi = i1->as_Phi(); if( phase->type( phi ) != TypeInt::BOOL ) - return NULL; + return nullptr; // Check for diamond pattern int true_path = phi->is_diamond_phi(); - if( true_path == 0 ) return NULL; + if( true_path == 0 ) return nullptr; // Make sure that iff and the control of the phi are different. This // should really only happen for dead control flow since it requires // an illegal cycle. - if (phi->in(0)->in(1)->in(0) == iff) return NULL; + if (phi->in(0)->in(1)->in(0) == iff) return nullptr; // phi->region->if_proj->ifnode->bool->cmp BoolNode *bol2 = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); @@ -1383,19 +1383,19 @@ static Node *remove_useless_bool(IfNode *iff, PhaseGVN *phase) { // either iff2->in(1) or its complement. int flip = 0; if( bol->_test._test == BoolTest::ne ) flip = 1-flip; - else if( bol->_test._test != BoolTest::eq ) return NULL; + else if( bol->_test._test != BoolTest::eq ) return nullptr; if( cmp2_t == TypeInt::ZERO ) flip = 1-flip; const Type *phi1_t = phase->type( phi->in(1) ); const Type *phi2_t = phase->type( phi->in(2) ); // Check for Phi(0,1) and flip if( phi1_t == TypeInt::ZERO ) { - if( phi2_t != TypeInt::ONE ) return NULL; + if( phi2_t != TypeInt::ONE ) return nullptr; flip = 1-flip; } else { // Check for Phi(1,0) - if( phi1_t != TypeInt::ONE ) return NULL; - if( phi2_t != TypeInt::ZERO ) return NULL; + if( phi1_t != TypeInt::ONE ) return nullptr; + if( phi2_t != TypeInt::ZERO ) return nullptr; } if( true_path == 2 ) { flip = 1-flip; @@ -1419,25 +1419,25 @@ struct RangeCheck { Node* IfNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; // No Def-Use info? - if (!can_reshape) return NULL; + if (!can_reshape) return nullptr; // Don't bother trying to transform a dead if - if (in(0)->is_top()) return NULL; + if (in(0)->is_top()) return nullptr; // Don't bother trying to transform an if with a dead test - if (in(1)->is_top()) return NULL; + if (in(1)->is_top()) return nullptr; // Another variation of a dead test - if (in(1)->is_Con()) return NULL; + if (in(1)->is_Con()) return nullptr; // Another variation of a dead if - if (outcnt() < 2) return NULL; + if (outcnt() < 2) return nullptr; // Canonicalize the test. Node* idt_if = idealize_test(phase, this); - if (idt_if != NULL) return idt_if; + if (idt_if != nullptr) return idt_if; // Try to split the IF PhaseIterGVN *igvn = phase->is_IterGVN(); Node *s = split_if(this, igvn); - if (s != NULL) return s; + if (s != nullptr) return s; return NodeSentinel; } @@ -1457,11 +1457,11 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* bol2 = remove_useless_bool(this, phase); if (bol2) return bol2; - if (in(0) == NULL) return NULL; // Dead loop? + if (in(0) == nullptr) return nullptr; // Dead loop? PhaseIterGVN* igvn = phase->is_IterGVN(); Node* result = fold_compares(igvn); - if (result != NULL) { + if (result != nullptr) { return result; } @@ -1470,7 +1470,7 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (is_If() && in(1)->is_Bool()) { Node* cmp = in(1)->in(1); if (cmp->Opcode() == Op_CmpP && - cmp->in(2) != NULL && // make sure cmp is not already dead + cmp->in(2) != nullptr && // make sure cmp is not already dead cmp->in(2)->bottom_type() == TypePtr::NULL_PTR) { dist = 64; // Limit for null-pointer scans } @@ -1478,7 +1478,7 @@ Node* IfNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* prev_dom = search_identical(dist); - if (prev_dom != NULL) { + if (prev_dom != nullptr) { // Replace dominated IfNode return dominated_by(prev_dom, igvn); } @@ -1504,8 +1504,8 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN *igvn) { // be skipped. For example, range check predicate has two checks // for lower and upper bounds. ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); - if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL || - unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL) { + if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr) { prev_dom = idom; } @@ -1560,21 +1560,21 @@ Node* IfNode::search_identical(int dist) { while (dom->Opcode() != op || // Not same opcode? dom->in(1) != in(1) || // Not same input 1? prev_dom->in(0) != dom) { // One path of test does not dominate? - if (dist < 0) return NULL; + if (dist < 0) return nullptr; dist--; prev_dom = dom; dom = up_one_dom(dom); - if (!dom) return NULL; + if (!dom) return nullptr; } // Check that we did not follow a loop back to ourselves if (this == dom) { - return NULL; + return nullptr; } #ifndef PRODUCT - if (dist > 2) { // Add to count of NULL checks elided + if (dist > 2) { // Add to count of null checks elided explicit_null_checks_elided++; } #endif @@ -1618,26 +1618,26 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) { Node* pre = in(0); if (!pre->is_IfTrue() && !pre->is_IfFalse()) { - return NULL; + return nullptr; } Node* dom = pre->in(0); if (!dom->is_If()) { - return NULL; + return nullptr; } Node* bol = in(1); if (!bol->is_Bool()) { - return NULL; + return nullptr; } Node* cmp = in(1)->in(1); if (!cmp->is_Cmp()) { - return NULL; + return nullptr; } if (!dom->in(1)->is_Bool()) { - return NULL; + return nullptr; } if (dom->in(1)->in(1) != cmp) { // Not same cond? - return NULL; + return nullptr; } int drel = subsuming_bool_test_encode(dom->in(1)); @@ -1645,11 +1645,11 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) { int bout = pre->is_IfFalse() ? 1 : 0; if (drel < 0 || trel < 0) { - return NULL; + return nullptr; } int br = s_short_circuit_map[trel][2*drel+bout]; if (br == na) { - return NULL; + return nullptr; } #ifndef PRODUCT if (TraceIterativeGVN) { @@ -1726,7 +1726,7 @@ Node* IfProjNode::Identity(PhaseGVN* phase) { // CountedLoopEndNode may be eliminated by if subsuming, replace CountedLoopNode with LoopNode to // avoid mismatching between CountedLoopNode and CountedLoopEndNode in the following optimization. Node* head = unique_ctrl_out_or_null(); - if (head != NULL && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) { + if (head != nullptr && head->is_BaseCountedLoop() && head->in(LoopNode::LoopBackControl) == this) { Node* new_head = new LoopNode(head->in(LoopNode::EntryControl), this); phase->is_IterGVN()->register_new_node_with_optimizer(new_head); phase->is_IterGVN()->replace_node(head, new_head); @@ -1751,27 +1751,27 @@ void IfNode::dump_spec(outputStream *st) const { // converted to 'ne', 'le' and 'lt' forms. IfTrue/IfFalse get swapped as // needed. static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) { - assert(iff->in(0) != NULL, "If must be live"); + assert(iff->in(0) != nullptr, "If must be live"); - if (iff->outcnt() != 2) return NULL; // Malformed projections. + if (iff->outcnt() != 2) return nullptr; // Malformed projections. Node* old_if_f = iff->proj_out(false); Node* old_if_t = iff->proj_out(true); // CountedLoopEnds want the back-control test to be TRUE, regardless of // whether they are testing a 'gt' or 'lt' condition. The 'gt' condition // happens in count-down loops - if (iff->is_BaseCountedLoopEnd()) return NULL; - if (!iff->in(1)->is_Bool()) return NULL; // Happens for partially optimized IF tests + if (iff->is_BaseCountedLoopEnd()) return nullptr; + if (!iff->in(1)->is_Bool()) return nullptr; // Happens for partially optimized IF tests BoolNode *b = iff->in(1)->as_Bool(); BoolTest bt = b->_test; // Test already in good order? if( bt.is_canonical() ) - return NULL; + return nullptr; // Flip test to be canonical. Requires flipping the IfFalse/IfTrue and // cloning the IfNode. Node* new_b = phase->transform( new BoolNode(b->in(1), bt.negate()) ); - if( !new_b->is_Bool() ) return NULL; + if( !new_b->is_Bool() ) return nullptr; b = new_b->as_Bool(); PhaseIterGVN *igvn = phase->is_IterGVN(); @@ -1845,7 +1845,7 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) { for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit if (dom->Opcode() == Op_RangeCheck && // Not same opcode? prev_dom->in(0) == dom) { // One path of test does dominate? - if (dom == this) return NULL; // dead loop + if (dom == this) return nullptr; // dead loop // See if this is a range check Node* index2; Node* range2; @@ -1882,18 +1882,18 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) { // ones. Since range checks "fail" by uncommon-trapping to the // interpreter, widening a check can make us speculatively enter // the interpreter. If we see range-check deopt's, do not widen! - if (!phase->C->allow_range_check_smearing()) return NULL; + if (!phase->C->allow_range_check_smearing()) return nullptr; // Didn't find prior covering check, so cannot remove anything. if (nb_checks == 0) { - return NULL; + return nullptr; } // Constant indices only need to check the upper bound. // Non-constant indices must check both low and high. int chk0 = (nb_checks - 1) % NRC; if (index1) { if (nb_checks == 1) { - return NULL; + return nullptr; } else { // If the top range check's constant is the min or max of // all constants we widen the next one to cover the whole @@ -1914,7 +1914,7 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) { // accesses it protects to successfully read/write out of // bounds. if (nb_checks == 2) { - return NULL; + return nullptr; } int chk2 = (nb_checks - 3) % NRC; RangeCheck rc2 = prev_checks[chk2]; @@ -1961,8 +1961,8 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) { } else { prev_dom = search_identical(4); - if (prev_dom == NULL) { - return NULL; + if (prev_dom == nullptr) { + return nullptr; } } diff --git a/src/hotspot/share/opto/indexSet.cpp b/src/hotspot/share/opto/indexSet.cpp index 780d5caff90..45bc5d2e91c 100644 --- a/src/hotspot/share/opto/indexSet.cpp +++ b/src/hotspot/share/opto/indexSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,7 +93,7 @@ IndexSet::BitBlock *IndexSet::alloc_block() { #endif Compile *compile = Compile::current(); BitBlock* free_list = (BitBlock*)compile->indexSet_free_block_list(); - if (free_list == NULL) { + if (free_list == nullptr) { populate_free_list(); free_list = (BitBlock*)compile->indexSet_free_block_list(); } diff --git a/src/hotspot/share/opto/indexSet.hpp b/src/hotspot/share/opto/indexSet.hpp index 7ed34116d1c..7c1fc256cb0 100644 --- a/src/hotspot/share/opto/indexSet.hpp +++ b/src/hotspot/share/opto/indexSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -174,7 +174,7 @@ class IndexSet : public ResourceObj { // from a new arena. It is essential that this method is called whenever // the Arena being used for BitBlock allocation is reset. static void reset_memory(Compile* compile, Arena *arena) { - compile->set_indexSet_free_block_list(NULL); + compile->set_indexSet_free_block_list(nullptr); compile->set_indexSet_arena(arena); // This should probably be done in a static initializer @@ -401,7 +401,7 @@ class IndexSetIterator { // If the iterator was created from a non-const set, we replace // non-canonical empty blocks with the _empty_block pointer. If - // _set is NULL, we do no replacement. + // _set is null, we do no replacement. IndexSet *_set; // Advance to the next non-empty word and return the next @@ -418,7 +418,7 @@ class IndexSetIterator { _next_word(IndexSet::words_per_block), _next_block(0), _max_blocks(set->is_empty() ? 0 : set->_current_block_limit), - _words(NULL), + _words(nullptr), _blocks(set->_blocks), _set(set) { #ifdef ASSERT @@ -435,9 +435,9 @@ class IndexSetIterator { _next_word(IndexSet::words_per_block), _next_block(0), _max_blocks(set->is_empty() ? 0 : set->_current_block_limit), - _words(NULL), + _words(nullptr), _blocks(set->_blocks), - _set(NULL) + _set(nullptr) { #ifdef ASSERT if (CollectIndexSetStatistics) { diff --git a/src/hotspot/share/opto/intrinsicnode.cpp b/src/hotspot/share/opto/intrinsicnode.cpp index 3bc49091b43..5a1b84af3d9 100644 --- a/src/hotspot/share/opto/intrinsicnode.cpp +++ b/src/hotspot/share/opto/intrinsicnode.cpp @@ -44,7 +44,7 @@ uint StrIntrinsicNode::match_edge(uint idx) const { Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node - if (in(0) && in(0)->is_top()) return NULL; + if (in(0) && in(0)->is_top()) return nullptr; if (can_reshape) { Node* mem = phase->transform(in(MemNode::Memory)); @@ -56,7 +56,7 @@ Node* StrIntrinsicNode::Ideal(PhaseGVN* phase, bool can_reshape) { return this; } } - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -72,7 +72,7 @@ uint StrIntrinsicNode::size_of() const { return sizeof(*this); } // Return a node which is more "ideal" than the current node. Strip out // control copies Node* StrCompressedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //============================================================================= @@ -80,7 +80,7 @@ Node* StrCompressedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) { // Return a node which is more "ideal" than the current node. Strip out // control copies Node* StrInflatedCopyNode::Ideal(PhaseGVN* phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } uint VectorizedHashCodeNode::match_edge(uint idx) const { @@ -89,7 +89,7 @@ uint VectorizedHashCodeNode::match_edge(uint idx) const { } Node* VectorizedHashCodeNode::Ideal(PhaseGVN* phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } const Type* VectorizedHashCodeNode::Value(PhaseGVN* phase) const { @@ -109,7 +109,7 @@ uint EncodeISOArrayNode::match_edge(uint idx) const { // Return a node which is more "ideal" than the current node. Strip out // control copies Node* EncodeISOArrayNode::Ideal(PhaseGVN* phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //------------------------------Value------------------------------------------ @@ -171,7 +171,7 @@ Node* CompressBitsNode::Ideal(PhaseGVN* phase, bool can_reshape) { return new AndLNode(compr, src->in(1)); } } - return NULL; + return nullptr; } Node* compress_expand_identity(PhaseGVN* phase, Node* n) { @@ -227,7 +227,7 @@ Node* ExpandBitsNode::Ideal(PhaseGVN* phase, bool can_reshape) { return new AndLNode(src->in(1), mask); } } - return NULL; + return nullptr; } Node* ExpandBitsNode::Identity(PhaseGVN* phase) { diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp index 74819dc2196..9cfcb56d452 100644 --- a/src/hotspot/share/opto/lcm.cpp +++ b/src/hotspot/share/opto/lcm.cpp @@ -41,13 +41,13 @@ // Optimization - Graph Style // Check whether val is not-null-decoded compressed oop, -// i.e. will grab into the base of the heap if it represents NULL. +// i.e. will grab into the base of the heap if it represents null. static bool accesses_heap_base_zone(Node *val) { - if (CompressedOops::base() != NULL) { // Implies UseCompressedOops. + if (CompressedOops::base() != nullptr) { // Implies UseCompressedOops. if (val && val->is_Mach()) { if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) { // This assumes all Decodes with TypePtr::NotNull are matched to nodes that - // decode NULL to point to the heap base (Decode_NN). + // decode null to point to the heap base (Decode_NN). if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) { return true; } @@ -78,8 +78,8 @@ static bool needs_explicit_null_check_for_read(Node *val) { } //------------------------------implicit_null_check---------------------------- -// Detect implicit-null-check opportunities. Basically, find NULL checks -// with suitable memory ops nearby. Use the memory op to do the NULL check. +// Detect implicit-null-check opportunities. Basically, find null checks +// with suitable memory ops nearby. Use the memory op to do the null check. // I can generate a memory op if there is not one nearby. // The proj is the control projection for the not-null case. // The val is the pointer being checked for nullness or @@ -150,12 +150,12 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo bool is_decoden = ((intptr_t)val) & 1; val = (Node*)(((intptr_t)val) & ~1); - assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() && + assert(!is_decoden || (val->in(0) == nullptr) && val->is_Mach() && (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity"); // Search the successor block for a load or store who's base value is also // the tested value. There may be several. - MachNode *best = NULL; // Best found so far + MachNode *best = nullptr; // Best found so far for (DUIterator i = val->outs(); val->has_out(i); i++) { Node *m = val->out(i); if( !m->is_Mach() ) continue; @@ -224,7 +224,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo Node* base; Node* index; const MachOper* oper = mach->memory_inputs(base, index); - if (oper == NULL || oper == (MachOper*)-1) { + if (oper == nullptr || oper == (MachOper*)-1) { continue; // Not an memory op; skip it } if (val == base || @@ -247,7 +247,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // Check that node's control edge is not-null block's head or dominates it, // otherwise we can't hoist it because there are other control dependencies. Node* ctrl = mach->in(0); - if (ctrl != NULL && !(ctrl == not_null_block->head() || + if (ctrl != nullptr && !(ctrl == not_null_block->head() || get_block_for_node(ctrl)->dominates(not_null_block))) { continue; } @@ -255,9 +255,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // check if the offset is not too high for implicit exception { intptr_t offset = 0; - const TypePtr *adr_type = NULL; // Do not need this return value here + const TypePtr *adr_type = nullptr; // Do not need this return value here const Node* base = mach->get_base_and_disp(offset, adr_type); - if (base == NULL || base == NodeSentinel) { + if (base == nullptr || base == NodeSentinel) { // Narrow oop address doesn't have base, only index. // Give up if offset is beyond page size or if heap base is not protected. if (val->bottom_type()->isa_narrowoop() && @@ -354,17 +354,17 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // Make sure this memory op is not already being used for a NullCheck Node *e = mb->end(); if( e->is_MachNullCheck() && e->in(1) == mach ) - continue; // Already being used as a NULL check + continue; // Already being used as a null check // Found a candidate! Pick one with least dom depth - the highest // in the dom tree should be closest to the null check. - if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) { + if (best == nullptr || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) { best = mach; bidx = vidx; } } // No candidate! - if (best == NULL) { + if (best == nullptr) { return; } @@ -415,9 +415,9 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo map_node_to_block(best, block); // Move the control dependence if it is pinned to not-null block. - // Don't change it in other cases: NULL or dominating control. + // Don't change it in other cases: null or dominating control. Node* ctrl = best->in(0); - if (ctrl != NULL && get_block_for_node(ctrl) == not_null_block) { + if (ctrl != nullptr && get_block_for_node(ctrl) == not_null_block) { // Set it to control edge of null check. best->set_req(0, proj->in(0)->in(0)); } @@ -435,10 +435,10 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // proj==Op_True --> ne test; proj==Op_False --> eq test. // One of two graph shapes got matched: - // (IfTrue (If (Bool NE (CmpP ptr NULL)))) - // (IfFalse (If (Bool EQ (CmpP ptr NULL)))) - // NULL checks are always branch-if-eq. If we see a IfTrue projection - // then we are replacing a 'ne' test with a 'eq' NULL check test. + // (IfTrue (If (Bool NE (CmpP ptr null)))) + // (IfFalse (If (Bool EQ (CmpP ptr null)))) + // null checks are always branch-if-eq. If we see a IfTrue projection + // then we are replacing a 'ne' test with a 'eq' null check test. // We need to flip the projections to keep the same semantics. if( proj->Opcode() == Op_IfTrue ) { // Swap order of projections in basic block to swap branch targets @@ -446,11 +446,11 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo Node *tmp2 = block->get_node(block->end_idx()+2); block->map_node(tmp2, block->end_idx()+1); block->map_node(tmp1, block->end_idx()+2); - Node *tmp = new Node(C->top()); // Use not NULL input + Node *tmp = new Node(C->top()); // Use not null input tmp1->replace_by(tmp); tmp2->replace_by(tmp1); tmp->replace_by(tmp2); - tmp->destruct(NULL); + tmp->destruct(nullptr); } // Remove the existing null check; use a new implicit null check instead. @@ -466,7 +466,7 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo // Clean-up any dead code for (uint i3 = 0; i3 < old_tst->req(); i3++) { Node* in = old_tst->in(i3); - old_tst->set_req(i3, NULL); + old_tst->set_req(i3, nullptr); if (in->outcnt() == 0) { // Remove dead input node in->disconnect_inputs(C); @@ -525,7 +525,7 @@ Node* PhaseCFG::select( uint score = 0; // Bigger is better int idx = -1; // Index in worklist int cand_cnt = 0; // Candidate count - bool block_size_threshold_ok = (recalc_pressure_nodes != NULL) && (block->number_of_nodes() > 10); + bool block_size_threshold_ok = (recalc_pressure_nodes != nullptr) && (block->number_of_nodes() > 10); for( uint i=0; ireq(); i++) { bool lrg_ends = false; Node *src_n = n->in(i); - if (src_n == NULL) continue; + if (src_n == nullptr) continue; if (!src_n->is_Mach()) continue; uint src = _regalloc->_lrg_map.find(src_n); if (src == 0) continue; @@ -755,9 +755,9 @@ void PhaseCFG::adjust_register_pressure(Node* n, Block* block, intptr_t* recalc_ // if none, this live range ends and we can adjust register pressure if (lrg_ends) { if (finalize_mode) { - _regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure); + _regalloc->lower_pressure(block, 0, lrg_src, nullptr, _regalloc->_sched_int_pressure, _regalloc->_sched_float_pressure); } else { - _regalloc->lower_pressure(block, 0, lrg_src, NULL, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure); + _regalloc->lower_pressure(block, 0, lrg_src, nullptr, _regalloc->_scratch_int_pressure, _regalloc->_scratch_float_pressure); } } } @@ -805,7 +805,7 @@ void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) { // carry lots of stuff live across a call. void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) { // Find the next control-defining Node in this block - Node* call = NULL; + Node* call = nullptr; for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { Node* m = this_call->fast_out(i); if (get_block_for_node(m) == block && // Local-block user @@ -815,7 +815,7 @@ void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& ne break; } } - if (call == NULL) return; // No next call (e.g., block end is near) + if (call == nullptr) return; // No next call (e.g., block end is near) // Set next-call for all inputs to this call set_next_call(block, call, next_call); } @@ -886,7 +886,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow block->insert_node(proj, node_cnt++); // Select the right register save policy. - const char *save_policy = NULL; + const char *save_policy = nullptr; switch (op) { case Op_CallRuntime: case Op_CallLeaf: @@ -956,7 +956,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray& ready_cnt, Vecto return true; } - bool block_size_threshold_ok = (recalc_pressure_nodes != NULL) && (block->number_of_nodes() > 10); + bool block_size_threshold_ok = (recalc_pressure_nodes != nullptr) && (block->number_of_nodes() > 10); // We track the uses of local definitions as input dependences so that // we know when a given instruction is available to be scheduled. @@ -1002,7 +1002,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray& ready_cnt, Vecto // Check the precedence edges for (uint prec = n->req(); prec < n->len(); prec++) { Node* oop_store = n->in(prec); - if (oop_store != NULL) { + if (oop_store != nullptr) { assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark"); } } @@ -1022,7 +1022,7 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray& ready_cnt, Vecto // and the edge will be lost. This is why this code should be // executed only when Precedent (== TypeFunc::Parms) edge is present. Node *x = n->in(TypeFunc::Parms); - if (x != NULL && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) { + if (x != nullptr && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) { // Old edge to node within same block will get removed, but no precedence // edge will get added because it already exists. Update ready count. int cnt = ready_cnt.at(n->_idx); @@ -1259,7 +1259,7 @@ Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block * use_blk = use_blk->_idom; // Find the successor - Node *fixup = NULL; + Node *fixup = nullptr; uint j; for( j = 0; j < def_blk->_num_succs; j++ ) @@ -1284,14 +1284,14 @@ Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block * for (uint k = 1; k < use_blk->num_preds(); k++) { if (phi->in(k) != inputs[k]) { // Not a match - fixup = NULL; + fixup = nullptr; break; } } } // If an existing PhiNode was not found, make a new one. - if (fixup == NULL) { + if (fixup == nullptr) { Node *new_phi = PhiNode::make(use_blk->head(), def); use_blk->insert_node(new_phi, 1); map_node_to_block(new_phi, use_blk); diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index e82ab937ba7..ecc15a35257 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -65,7 +65,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { if (!m->is_loaded()) { // Do not attempt to inline unloaded methods. - return NULL; + return nullptr; } C2Compiler* compiler = (C2Compiler*)CompileBroker::compiler(CompLevel_full_optimization); @@ -77,7 +77,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { // methods access VM-internal data. VM_ENTRY_MARK; methodHandle mh(THREAD, m->get_Method()); - is_available = compiler != NULL && compiler->is_intrinsic_supported(mh, is_virtual) && + is_available = compiler != nullptr && compiler->is_intrinsic_supported(mh, is_virtual) && !C->directive()->is_intrinsic_disabled(mh) && !vmIntrinsics::is_disabled_by_flags(mh); @@ -91,7 +91,7 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) { vmIntrinsics::does_virtual_dispatch(id), id); } else { - return NULL; + return nullptr; } } @@ -164,7 +164,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) { C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); C->print_inlining_update(this); - return NULL; + return nullptr; } Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { @@ -198,7 +198,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { (is_virtual() ? " virtual='1'" : ""), C->unique() - nodes); } - return slow_ctl; // Could be NULL if the check folds. + return slow_ctl; // Could be null if the check folds. } // The intrinsic bailed out @@ -223,7 +223,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms, int predicate) { } } C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); - return NULL; + return nullptr; } bool LibraryCallKit::try_to_inline(int predicate) { @@ -803,20 +803,20 @@ void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) { // In all cases, GraphKit::control() is updated to the fast path. // The returned value represents the control for the slow path. // The return value is never 'top'; it is either a valid control -// or NULL if it is obvious that the slow path can never be taken. -// Also, if region and the slow control are not NULL, the slow edge +// or null if it is obvious that the slow path can never be taken. +// Also, if region and the slow control are not null, the slow edge // is appended to the region. Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_prob) { if (stopped()) { // Already short circuited. - return NULL; + return nullptr; } // Build an if node and its projections. // If test is true we take the slow path, which we assume is uncommon. if (_gvn.type(test) == TypeInt::ZERO) { // The slow branch is never taken. No need to build this guard. - return NULL; + return nullptr; } IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN); @@ -824,10 +824,10 @@ Node* LibraryCallKit::generate_guard(Node* test, RegionNode* region, float true_ Node* if_slow = _gvn.transform(new IfTrueNode(iff)); if (if_slow == top()) { // The slow branch is never taken. No need to build this guard. - return NULL; + return nullptr; } - if (region != NULL) + if (region != nullptr) region->add_req(if_slow); Node* if_fast = _gvn.transform(new IfFalseNode(iff)); @@ -846,13 +846,13 @@ inline Node* LibraryCallKit::generate_fair_guard(Node* test, RegionNode* region) inline Node* LibraryCallKit::generate_negative_guard(Node* index, RegionNode* region, Node* *pos_index) { if (stopped()) - return NULL; // already stopped + return nullptr; // already stopped if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] - return NULL; // index is already adequately typed + return nullptr; // index is already adequately typed Node* cmp_lt = _gvn.transform(new CmpINode(index, intcon(0))); Node* bol_lt = _gvn.transform(new BoolNode(cmp_lt, BoolTest::lt)); Node* is_neg = generate_guard(bol_lt, region, PROB_MIN); - if (is_neg != NULL && pos_index != NULL) { + if (is_neg != nullptr && pos_index != nullptr) { // Emulate effect of Parse::adjust_map_after_if. Node* ccast = new CastIINode(index, TypeInt::POS); ccast->set_req(0, control()); @@ -880,10 +880,10 @@ inline Node* LibraryCallKit::generate_limit_guard(Node* offset, Node* array_length, RegionNode* region) { if (stopped()) - return NULL; // already stopped + return nullptr; // already stopped bool zero_offset = _gvn.type(offset) == TypeInt::ZERO; if (zero_offset && subseq_length->eqv_uncast(array_length)) - return NULL; // common case of whole-array copy + return nullptr; // common case of whole-array copy Node* last = subseq_length; if (!zero_offset) // last += offset last = _gvn.transform(new AddINode(last, offset)); @@ -931,9 +931,9 @@ Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_o Node* thread_obj_handle = (is_immutable - ? LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), + ? LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(), TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered) - : make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered)); + : make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered)); thread_obj_handle = _gvn.transform(thread_obj_handle); DecoratorSet decorators = IN_NATIVE; @@ -961,7 +961,7 @@ Node* LibraryCallKit::generate_virtual_thread(Node* tls_output) { // characters (depending on 'is_byte'). cnt1 and cnt2 are pointing to Int nodes // containing the lengths of str1 and str2. Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae) { - Node* result = NULL; + Node* result = nullptr; switch (opcode) { case Op_StrIndexOf: result = new StrIndexOfNode(control(), memory(TypeAryPtr::BYTES), @@ -979,7 +979,7 @@ Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node break; default: ShouldNotReachHere(); - return NULL; + return nullptr; } // All these intrinsics have checks. @@ -1035,8 +1035,8 @@ bool LibraryCallKit::inline_string_equals(StrIntrinsicNode::ArgEnc ae) { // Check for arg1_cnt != arg2_cnt Node* cmp = _gvn.transform(new CmpINode(arg1_cnt, arg2_cnt)); Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne)); - Node* if_ne = generate_slow_guard(bol, NULL); - if (if_ne != NULL) { + Node* if_ne = generate_slow_guard(bol, nullptr); + if (if_ne != nullptr) { phi->init_req(2, intcon(0)); region->init_req(2, if_ne); } @@ -1189,7 +1189,7 @@ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) { } Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, result_rgn, result_phi, ae); - if (result != NULL) { + if (result != nullptr) { result_phi->init_req(3, result); result_rgn->init_req(3, control()); } @@ -1235,14 +1235,14 @@ bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) { Node* phi = new PhiNode(region, TypeInt::INT); Node* result = make_indexOf_node(src_start, src_count, tgt_start, tgt_count, region, phi, ae); - if (result != NULL) { + if (result != nullptr) { // The result is index relative to from_index if substring was found, -1 otherwise. // Generate code which will fold into cmove. Node* cmp = _gvn.transform(new CmpINode(result, intcon(0))); Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt)); - Node* if_lt = generate_slow_guard(bol, NULL); - if (if_lt != NULL) { + Node* if_lt = generate_slow_guard(bol, nullptr); + if (if_lt != nullptr) { // result == -1 phi->init_req(3, result); region->init_req(3, if_lt); @@ -1268,8 +1268,8 @@ Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* // Check for substr count > string count Node* cmp = _gvn.transform(new CmpINode(tgt_count, src_count)); Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::gt)); - Node* if_gt = generate_slow_guard(bol, NULL); - if (if_gt != NULL) { + Node* if_gt = generate_slow_guard(bol, nullptr); + if (if_gt != nullptr) { phi->init_req(1, intcon(-1)); region->init_req(1, if_gt); } @@ -1277,8 +1277,8 @@ Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* // Check for substr count == 0 cmp = _gvn.transform(new CmpINode(tgt_count, intcon(0))); bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq)); - Node* if_zero = generate_slow_guard(bol, NULL); - if (if_zero != NULL) { + Node* if_zero = generate_slow_guard(bol, nullptr); + if (if_zero != nullptr) { phi->init_req(2, intcon(0)); region->init_req(2, if_zero); } @@ -1286,7 +1286,7 @@ Node* LibraryCallKit::make_indexOf_node(Node* src_start, Node* src_count, Node* if (!stopped()) { return make_string_method_node(Op_StrIndexOf, src_start, src_count, tgt_start, tgt_count, ae); } - return NULL; + return nullptr; } //-----------------------------inline_string_indexOfChar----------------------- @@ -1334,8 +1334,8 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) { Node* cmp = _gvn.transform(new CmpINode(result, intcon(0))); Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::lt)); - Node* if_lt = generate_slow_guard(bol, NULL); - if (if_lt != NULL) { + Node* if_lt = generate_slow_guard(bol, nullptr); + if (if_lt != nullptr) { // result == -1 phi->init_req(2, result); region->init_req(2, if_lt); @@ -1410,14 +1410,14 @@ bool LibraryCallKit::inline_string_copy(bool compress) { Node* dst_start = array_element_address(dst, dst_offset, dst_elem); // 'src_start' points to src array + scaled offset // 'dst_start' points to dst array + scaled offset - Node* count = NULL; + Node* count = nullptr; if (compress) { count = compress_string(src_start, TypeAryPtr::get_array_body_type(src_elem), dst_start, length); } else { inflate_string(src_start, dst_start, TypeAryPtr::get_array_body_type(dst_elem), length); } - if (alloc != NULL) { + if (alloc != nullptr) { if (alloc->maybe_set_complete(&_gvn)) { // "You break it, you buy it." InitializeNode* init = alloc->initialization(); @@ -1460,7 +1460,7 @@ bool LibraryCallKit::inline_string_toBytesU() { Node* offset = argument(1); Node* length = argument(2); - Node* newcopy = NULL; + Node* newcopy = nullptr; // Set the original stack and the reexecute bit for the interpreter to reexecute // the bytecode that invokes StringUTF16.toBytes() if deoptimization happens. @@ -1494,7 +1494,7 @@ bool LibraryCallKit::inline_string_toBytesU() { Node* klass_node = makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_BYTE))); newcopy = new_array(klass_node, size, 0); // no arguments to push AllocateArrayNode* alloc = tightly_coupled_allocation(newcopy); - guarantee(alloc != NULL, "created above"); + guarantee(alloc != nullptr, "created above"); // Calculate starting addresses. Node* src_start = array_element_address(value, offset, T_CHAR); @@ -1594,7 +1594,7 @@ bool LibraryCallKit::inline_string_getCharsU() { copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM, src_start, dst_start, ConvI2X(length) XTOP); // Do not let reads from the cloned object float above the arraycopy. - if (alloc != NULL) { + if (alloc != nullptr) { if (alloc->maybe_set_complete(&_gvn)) { // "You break it, you buy it." InitializeNode* init = alloc->initialization(); @@ -1627,7 +1627,7 @@ bool LibraryCallKit::inline_string_getCharsU() { bool LibraryCallKit::inline_string_char_access(bool is_store) { Node* value = argument(0); Node* index = argument(1); - Node* ch = is_store ? argument(2) : NULL; + Node* ch = is_store ? argument(2) : nullptr; // This intrinsic accesses byte[] array as char[] array. Computing the offsets // correctly requires matched array shapes. @@ -1671,7 +1671,7 @@ Node* LibraryCallKit::round_double_node(Node* n) { if (Matcher::strict_fp_requires_explicit_rounding) { #ifdef IA32 if (UseSSE < 2) { - n = _gvn.transform(new RoundDoubleNode(NULL, n)); + n = _gvn.transform(new RoundDoubleNode(nullptr, n)); } #else Unimplemented(); @@ -1688,7 +1688,7 @@ Node* LibraryCallKit::round_double_node(Node* n) { // public static double Math.round(double) bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) { Node* arg = round_double_node(argument(0)); - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_dabs: n = new AbsDNode( arg); break; case vmIntrinsics::_dsqrt: @@ -1712,7 +1712,7 @@ bool LibraryCallKit::inline_double_math(vmIntrinsics::ID id) { // public static long Math.abs(long) bool LibraryCallKit::inline_math(vmIntrinsics::ID id) { Node* arg = argument(0); - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_fabs: n = new AbsFNode( arg); break; case vmIntrinsics::_iabs: n = new AbsINode( arg); break; @@ -1733,12 +1733,12 @@ bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, c // Inputs Node* a = round_double_node(argument(0)); - Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL; + Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : nullptr; - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, no_memory_effects, - a, top(), b, b ? top() : NULL); + a, top(), b, b ? top() : nullptr); Node* value = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0)); #ifdef ASSERT Node* value_top = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+1)); @@ -1753,7 +1753,7 @@ bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, c bool LibraryCallKit::inline_math_pow() { Node* exp = round_double_node(argument(2)); const TypeD* d = _gvn.type(exp)->isa_double_constant(); - if (d != NULL) { + if (d != nullptr) { if (d->getd() == 2.0) { // Special case: pow(x, 2.0) => x * x Node* base = round_double_node(argument(0)); @@ -1773,16 +1773,16 @@ bool LibraryCallKit::inline_math_pow() { // -0.0/+0.0 are both excluded since floating-point comparison doesn't distinguish -0.0 from +0.0. Node* test = _gvn.transform(new BoolNode(cmp, BoolTest::le)); - Node* if_pow = generate_slow_guard(test, NULL); + Node* if_pow = generate_slow_guard(test, nullptr); Node* value_sqrt = _gvn.transform(new SqrtDNode(C, control(), base)); phi->init_req(1, value_sqrt); region->init_req(1, control()); - if (if_pow != NULL) { + if (if_pow != nullptr) { set_control(if_pow); - address target = StubRoutines::dpow() != NULL ? StubRoutines::dpow() : + address target = StubRoutines::dpow() != nullptr ? StubRoutines::dpow() : CAST_FROM_FN_PTR(address, SharedRuntime::dpow); - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; Node* trig = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), target, "POW", no_memory_effects, base, top(), exp, top()); Node* value_pow = _gvn.transform(new ProjNode(trig, TypeFunc::Parms+0)); @@ -1803,7 +1803,7 @@ bool LibraryCallKit::inline_math_pow() { } } - return StubRoutines::dpow() != NULL ? + return StubRoutines::dpow() != nullptr ? runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") : runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); } @@ -1812,27 +1812,27 @@ bool LibraryCallKit::inline_math_pow() { bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) { switch (id) { case vmIntrinsics::_dsin: - return StubRoutines::dsin() != NULL ? + return StubRoutines::dsin() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dsin(), "dsin") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN"); case vmIntrinsics::_dcos: - return StubRoutines::dcos() != NULL ? + return StubRoutines::dcos() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dcos(), "dcos") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS"); case vmIntrinsics::_dtan: - return StubRoutines::dtan() != NULL ? + return StubRoutines::dtan() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dtan(), "dtan") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN"); case vmIntrinsics::_dexp: - return StubRoutines::dexp() != NULL ? + return StubRoutines::dexp() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dexp(), "dexp") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); case vmIntrinsics::_dlog: - return StubRoutines::dlog() != NULL ? + return StubRoutines::dlog() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog(), "dlog") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG"); case vmIntrinsics::_dlog10: - return StubRoutines::dlog10() != NULL ? + return StubRoutines::dlog10() != nullptr ? runtime_math(OptoRuntime::Math_D_D_Type(), StubRoutines::dlog10(), "dlog10") : runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10"); @@ -1875,7 +1875,7 @@ bool LibraryCallKit::inline_notify(vmIntrinsics::ID id) { } else { func = OptoRuntime::monitor_notifyAll_Java(); } - Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, NULL, TypeRawPtr::BOTTOM, argument(0)); + Node* call = make_runtime_call(RC_NO_LEAF, ftype, func, nullptr, TypeRawPtr::BOTTOM, argument(0)); make_slow_call_ex(call, env()->Throwable_klass(), false); return true; } @@ -1964,7 +1964,7 @@ bool LibraryCallKit::inline_math_unsignedMultiplyHigh() { Node* LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { - Node* result_val = NULL; + Node* result_val = nullptr; switch (id) { case vmIntrinsics::_min: case vmIntrinsics::_min_strict: @@ -1984,12 +1984,12 @@ LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { inline int LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) { const TypePtr* base_type = TypePtr::NULL_PTR; - if (base != NULL) base_type = _gvn.type(base)->isa_ptr(); - if (base_type == NULL) { + if (base != nullptr) base_type = _gvn.type(base)->isa_ptr(); + if (base_type == nullptr) { // Unknown type. return Type::AnyPtr; } else if (base_type == TypePtr::NULL_PTR) { - // Since this is a NULL+long form, we have to switch to a rawptr. + // Since this is a null+long form, we have to switch to a rawptr. base = _gvn.transform(new CastX2PNode(offset)); offset = MakeConX(0); return Type::RawPtr; @@ -2002,7 +2002,7 @@ LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) } // Offset is small => always a heap address. const TypeX* offset_type = _gvn.type(offset)->isa_intptr_t(); - if (offset_type != NULL && + if (offset_type != nullptr && base_type->offset() == 0 && // (should always be?) offset_type->_lo >= 0 && !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) { @@ -2012,7 +2012,7 @@ LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) // heap. return Type::OopPtr; } - // Otherwise, it might either be oop+off or NULL+addr. + // Otherwise, it might either be oop+off or null+addr. return Type::AnyPtr; } else { // No information: @@ -2077,7 +2077,7 @@ Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType t // inline long Long.reverseBytes(long) bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) { Node* arg = argument(0); - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_numberOfLeadingZeros_i: n = new CountLeadingZerosINode( arg); break; case vmIntrinsics::_numberOfLeadingZeros_l: n = new CountLeadingZerosLNode( arg); break; @@ -2103,7 +2103,7 @@ bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) { // inline long Long.compress(long, long) // inline long Long.expand(long, long) bool LibraryCallKit::inline_bitshuffle_methods(vmIntrinsics::ID id) { - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_compress_i: n = new CompressBitsNode(argument(0), argument(1), TypeInt::INT); break; case vmIntrinsics::_expand_i: n = new ExpandBitsNode(argument(0), argument(1), TypeInt::INT); break; @@ -2121,7 +2121,7 @@ bool LibraryCallKit::inline_bitshuffle_methods(vmIntrinsics::ID id) { bool LibraryCallKit::inline_compare_unsigned(vmIntrinsics::ID id) { Node* arg1 = argument(0); Node* arg2 = (id == vmIntrinsics::_compareUnsigned_l) ? argument(2) : argument(1); - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_compareUnsigned_i: n = new CmpU3Node(arg1, arg2); break; case vmIntrinsics::_compareUnsigned_l: n = new CmpUL3Node(arg1, arg2); break; @@ -2137,7 +2137,7 @@ bool LibraryCallKit::inline_compare_unsigned(vmIntrinsics::ID id) { // inline long Long.divideUnsigned(long, long) // inline long Long.remainderUnsigned(long, long) bool LibraryCallKit::inline_divmod_methods(vmIntrinsics::ID id) { - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_divideUnsigned_i: { zero_check_int(argument(1)); @@ -2185,21 +2185,21 @@ bool LibraryCallKit::inline_divmod_methods(vmIntrinsics::ID id) { const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type) { // Attempt to infer a sharper value type from the offset and base type. - ciKlass* sharpened_klass = NULL; + ciKlass* sharpened_klass = nullptr; // See if it is an instance field, with an object type. - if (alias_type->field() != NULL) { + if (alias_type->field() != nullptr) { if (alias_type->field()->type()->is_klass()) { sharpened_klass = alias_type->field()->type()->as_klass(); } } - const TypeOopPtr* result = NULL; + const TypeOopPtr* result = nullptr; // See if it is a narrow oop array. if (adr_type->isa_aryptr()) { if (adr_type->offset() >= objArrayOopDesc::base_offset_in_bytes()) { const TypeOopPtr* elem_type = adr_type->is_aryptr()->elem()->make_oopptr(); - if (elem_type != NULL && elem_type->is_loaded()) { + if (elem_type != nullptr && elem_type->is_loaded()) { // Sharpen the value type. result = elem_type; } @@ -2208,11 +2208,11 @@ const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_ // The sharpened class might be unloaded if there is no class loader // contraint in place. - if (result == NULL && sharpened_klass != NULL && sharpened_klass->is_loaded()) { + if (result == nullptr && sharpened_klass != nullptr && sharpened_klass->is_loaded()) { // Sharpen the value type. result = TypeOopPtr::make_from_klass(sharpened_klass); } - if (result != NULL) { + if (result != nullptr) { #ifndef PRODUCT if (C->print_intrinsics() || C->print_inlining()) { tty->print(" from base type: "); adr_type->dump(); tty->cr(); @@ -2319,14 +2319,14 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c heap_base_oop = base; // on-heap or mixed access } - // Can base be NULL? Otherwise, always on-heap access. + // Can base be null? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(base)); if (!can_access_non_heap) { decorators |= IN_HEAP; } - Node* val = is_store ? argument(4) : NULL; + Node* val = is_store ? argument(4) : nullptr; const TypePtr* adr_type = _gvn.type(adr)->isa_ptr(); if (adr_type == TypePtr::NULL_PTR) { @@ -2385,7 +2385,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c if (!is_store && type == T_OBJECT) { const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); - if (tjp != NULL) { + if (tjp != nullptr) { value_type = tjp; } } @@ -2400,21 +2400,21 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c // from intended ones in this API. if (!is_store) { - Node* p = NULL; + Node* p = nullptr; // Try to constant fold a load from a constant field ciField* field = alias_type->field(); - if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) { + if (heap_base_oop != top() && field != nullptr && field->is_constant() && !mismatched) { // final or stable field p = make_constant_from_field(field, heap_base_oop); } - if (p == NULL) { // Could not constant fold the load + if (p == nullptr) { // Could not constant fold the load p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); // Normalize the value returned by getBoolean in the following cases if (type == T_BOOLEAN && (mismatched || - heap_base_oop == top() || // - heap_base_oop is NULL or - (can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL + heap_base_oop == top() || // - heap_base_oop is null or + (can_access_non_heap && field == nullptr)) // - heap_base_oop is potentially null // and the unsafe access is made to large offset // (i.e., larger than the maximum offset necessary for any // field access) @@ -2433,7 +2433,7 @@ bool LibraryCallKit::inline_unsafe_access(bool is_store, const BasicType type, c } } if (type == T_ADDRESS) { - p = gvn().transform(new CastP2XNode(NULL, p)); + p = gvn().transform(new CastP2XNode(nullptr, p)); p = ConvX2UL(p); } // The load node has the control of the preceding MemBarCPUOrder. All @@ -2568,11 +2568,11 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". // Get arguments: - Node* receiver = NULL; - Node* base = NULL; - Node* offset = NULL; - Node* oldval = NULL; - Node* newval = NULL; + Node* receiver = nullptr; + Node* base = nullptr; + Node* offset = nullptr; + Node* oldval = nullptr; + Node* newval = nullptr; switch(kind) { case LS_cmp_swap: case LS_cmp_swap_weak: @@ -2590,7 +2590,7 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt receiver = argument(0); // type: oop base = argument(1); // type: oop offset = argument(2); // type: long - oldval = NULL; + oldval = nullptr; newval = argument(4); // type: oop, int, or long break; } @@ -2633,7 +2633,7 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt case LS_cmp_exchange: { if (type == T_OBJECT) { const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); - if (tjp != NULL) { + if (tjp != nullptr) { value_type = tjp; } } @@ -2658,19 +2658,19 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt if (is_reference_type(type)) { decorators |= IN_HEAP | ON_UNKNOWN_OOP_REF; - // Transformation of a value which could be NULL pointer (CastPP #NULL) + // Transformation of a value which could be null pointer (CastPP #null) // could be delayed during Parse (for example, in adjust_map_after_if()). // Execute transformation here to avoid barrier generation in such case. if (_gvn.type(newval) == TypePtr::NULL_PTR) newval = _gvn.makecon(TypePtr::NULL_PTR); - if (oldval != NULL && _gvn.type(oldval) == TypePtr::NULL_PTR) { + if (oldval != nullptr && _gvn.type(oldval) == TypePtr::NULL_PTR) { // Refine the value to a null constant, when it is known to be null oldval = _gvn.makecon(TypePtr::NULL_PTR); } } - Node* result = NULL; + Node* result = nullptr; switch (kind) { case LS_cmp_exchange: { result = access_atomic_cmpxchg_val_at(base, adr, adr_type, alias_idx, @@ -2736,7 +2736,7 @@ bool LibraryCallKit::klass_needs_init_guard(Node* kls) { return true; } const TypeInstKlassPtr* klsptr = kls->bottom_type()->isa_instklassptr(); - if (klsptr == NULL) { + if (klsptr == nullptr) { return true; } ciInstanceKlass* ik = klsptr->instance_klass(); @@ -2802,11 +2802,11 @@ bool LibraryCallKit::inline_unsafe_allocate() { Node* cls = null_check(argument(1)); if (stopped()) return true; - Node* kls = load_klass_from_mirror(cls, false, NULL, 0); + Node* kls = load_klass_from_mirror(cls, false, nullptr, 0); kls = null_check(kls); if (stopped()) return true; // argument was like int.class - Node* test = NULL; + Node* test = nullptr; if (LibraryCallKit::klass_needs_init_guard(kls)) { // Note: The argument might still be an illegal value like // Serializable.class or Object[].class. The runtime will handle it. @@ -2814,7 +2814,7 @@ bool LibraryCallKit::inline_unsafe_allocate() { Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler // can generate code to load it as unsigned byte. - Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); + Node* inst = make_load(nullptr, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); Node* bits = intcon(InstanceKlass::fully_initialized); test = _gvn.transform(new SubINode(inst, bits)); // The 'test' is non-zero if we need to take a slow path. @@ -2830,7 +2830,7 @@ bool LibraryCallKit::inline_unsafe_allocate() { // these have the same type and signature bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) { const TypeFunc* tf = OptoRuntime::void_long_Type(); - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); Node* value = _gvn.transform(new ProjNode(time, TypeFunc::Parms+0)); #ifdef ASSERT @@ -2866,7 +2866,7 @@ bool LibraryCallKit::inline_native_classID() { IdealKit ideal(this); #define __ ideal. IdealVariable result(ideal); __ declarations_done(); - Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), + Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), basic_plus_adr(cls, java_lang_Class::klass_offset()), TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL)); @@ -2896,7 +2896,7 @@ bool LibraryCallKit::inline_native_classID() { ideal.set(result, _gvn.transform(new URShiftLNode(kls_trace_id_raw, ideal.ConI(TRACE_ID_SHIFT)))); } __ else_(); { - Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), + Node* array_kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), basic_plus_adr(cls, java_lang_Class::array_klass_offset()), TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL)); __ if_then(array_kls, BoolTest::ne, null()); { @@ -2927,8 +2927,8 @@ bool LibraryCallKit::inline_native_classID() { * * JfrThreadLocal* const tl = Thread::jfr_thread_local() * jobject h_event_writer = tl->java_event_writer(); - * if (h_event_writer == NULL) { - * return NULL; + * if (h_event_writer == nullptr) { + * return nullptr; * } * oop threadObj = Thread::threadObj(); * oop vthread = java_lang_Thread::vthread(threadObj); @@ -3204,7 +3204,7 @@ bool LibraryCallKit::inline_native_getEventWriter() { // Result value. result_value->init_req(_true_path, _gvn.transform(event_writer)); // return event writer oop - result_value->init_req(_false_path, null()); // return NULL + result_value->init_req(_false_path, null()); // return null // Set output state. set_control(_gvn.transform(result_rgn)); @@ -3346,14 +3346,14 @@ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { //------------------------inline_native_currentCarrierThread------------------ bool LibraryCallKit::inline_native_currentCarrierThread() { - Node* junk = NULL; + Node* junk = nullptr; set_result(generate_current_thread(junk)); return true; } //------------------------inline_native_currentThread------------------ bool LibraryCallKit::inline_native_currentThread() { - Node* junk = NULL; + Node* junk = nullptr; set_result(generate_virtual_thread(junk)); return true; } @@ -3366,10 +3366,10 @@ bool LibraryCallKit::inline_native_setCurrentThread() { Node* thread = _gvn.transform(new ThreadLocalNode()); Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::vthread_offset())); Node* thread_obj_handle - = make_load(NULL, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered); + = make_load(nullptr, p, p->bottom_type()->is_ptr(), T_OBJECT, MemNode::unordered); thread_obj_handle = _gvn.transform(thread_obj_handle); const TypePtr *adr_type = _gvn.type(thread_obj_handle)->isa_ptr(); - access_store_at(NULL, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED); + access_store_at(nullptr, thread_obj_handle, adr_type, arr, _gvn.type(arr), T_OBJECT, IN_NATIVE | MO_UNORDERED); JFR_ONLY(extend_setCurrentThread(thread, arr);) return true; } @@ -3385,9 +3385,9 @@ Node* LibraryCallKit::scopedValueCache_helper() { // We cannot use immutable_memory() because we might flip onto a // different carrier thread, at which point we'll need to use that // carrier thread's cache. - // return _gvn.transform(LoadNode::make(_gvn, NULL, immutable_memory(), p, p->bottom_type()->is_ptr(), + // return _gvn.transform(LoadNode::make(_gvn, nullptr, immutable_memory(), p, p->bottom_type()->is_ptr(), // TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered)); - return make_load(NULL, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered); + return make_load(nullptr, p, p->bottom_type()->is_ptr(), T_ADDRESS, MemNode::unordered); } //------------------------inline_native_scopedValueCache------------------ @@ -3422,7 +3422,7 @@ bool LibraryCallKit::inline_native_setScopedValueCache() { // Given a klass oop, load its java mirror (a java.lang.Class oop). Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); - Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); + Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); // mirror = ((OopHandle)mirror)->resolve(); return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); } @@ -3433,19 +3433,19 @@ Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { // and branch to the given path on the region. // If never_see_null, take an uncommon trap on null, so we can optimistically // compile for the non-null case. -// If the region is NULL, force never_see_null = true. +// If the region is null, force never_see_null = true. Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror, bool never_see_null, RegionNode* region, int null_path, int offset) { - if (region == NULL) never_see_null = true; + if (region == nullptr) never_see_null = true; Node* p = basic_plus_adr(mirror, offset); const TypeKlassPtr* kls_type = TypeInstKlassPtr::OBJECT_OR_NULL; - Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); + Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); Node* null_ctl = top(); kls = null_check_oop(kls, &null_ctl, never_see_null); - if (region != NULL) { + if (region != nullptr) { // Set region->in(null_path) if the mirror is a primitive (e.g, int.class). region->init_req(null_path, null_ctl); } else { @@ -3461,7 +3461,7 @@ Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, // Branch around if the given klass has the given modifier bit set. // Like generate_guard, adds a new path onto the region. Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); - Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered); + Node* mods = make_load(nullptr, modp, TypeInt::INT, T_INT, MemNode::unordered); Node* mask = intcon(modifier_mask); Node* bits = intcon(modifier_bits); Node* mbit = _gvn.transform(new AndINode(mods, mask)); @@ -3527,7 +3527,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { } const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); - if (mirror_con == NULL) return false; // cannot happen? + if (mirror_con == nullptr) return false; // cannot happen? #ifndef PRODUCT if (C->print_intrinsics() || C->print_inlining()) { @@ -3580,12 +3580,12 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { case vmIntrinsics::_getModifiers: p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); - query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered); + query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered); break; case vmIntrinsics::_isInterface: // (To verify this code sequence, check the asserts in JVM_IsInterface.) - if (generate_interface_guard(kls, region) != NULL) + if (generate_interface_guard(kls, region) != nullptr) // A guard was added. If the guard is taken, it was an interface. phi->add_req(intcon(1)); // If we fall through, it's a plain class. @@ -3594,7 +3594,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { case vmIntrinsics::_isArray: // (To verify this code sequence, check the asserts in JVM_IsArrayClass.) - if (generate_array_guard(kls, region) != NULL) + if (generate_array_guard(kls, region) != nullptr) // A guard was added. If the guard is taken, it was an array. phi->add_req(intcon(1)); // If we fall through, it's a plain class. @@ -3607,7 +3607,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { case vmIntrinsics::_isHidden: // (To verify this code sequence, check the asserts in JVM_IsHiddenClass.) - if (generate_hidden_class_guard(kls, region) != NULL) + if (generate_hidden_class_guard(kls, region) != nullptr) // A guard was added. If the guard is taken, it was an hidden class. phi->add_req(intcon(1)); // If we fall through, it's a plain class. @@ -3622,15 +3622,15 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { // Arrays store an intermediate super as _super, but must report Object. // Other types can report the actual _super. // (To verify this code sequence, check the asserts in JVM_IsInterface.) - if (generate_interface_guard(kls, region) != NULL) + if (generate_interface_guard(kls, region) != nullptr) // A guard was added. If the guard is taken, it was an interface. phi->add_req(null()); - if (generate_array_guard(kls, region) != NULL) + if (generate_array_guard(kls, region) != nullptr) // A guard was added. If the guard is taken, it was an array. phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); // If we fall through, it's a plain class. Get its _super. p = basic_plus_adr(kls, in_bytes(Klass::super_offset())); - kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL)); + kls = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeInstKlassPtr::OBJECT_OR_NULL)); null_ctl = top(); kls = null_check_oop(kls, &null_ctl); if (null_ctl != top()) { @@ -3645,7 +3645,7 @@ bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { case vmIntrinsics::_getClassAccessFlags: p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); - query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered); + query_value = make_load(nullptr, p, TypeInt::INT, T_INT, MemNode::unordered); break; default: @@ -3667,10 +3667,10 @@ bool LibraryCallKit::inline_Class_cast() { Node* mirror = argument(0); // Class Node* obj = argument(1); const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); - if (mirror_con == NULL) { + if (mirror_con == nullptr) { return false; // dead path (mirror->is_top()). } - if (obj == NULL || obj->is_top()) { + if (obj == nullptr || obj->is_top()) { return false; // dead path } const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); @@ -3678,8 +3678,8 @@ bool LibraryCallKit::inline_Class_cast() { // First, see if Class.cast() can be folded statically. // java_mirror_type() returns non-null for compile-time Class constants. ciType* tm = mirror_con->java_mirror_type(); - if (tm != NULL && tm->is_klass() && - tp != NULL) { + if (tm != nullptr && tm->is_klass() && + tp != nullptr) { if (!tp->is_loaded()) { // Don't use intrinsic when class is not loaded. return false; @@ -3713,7 +3713,7 @@ bool LibraryCallKit::inline_Class_cast() { return true; } - // Not-subtype or the mirror's klass ptr is NULL (in case it is a primitive). + // Not-subtype or the mirror's klass ptr is null (in case it is a primitive). enum { _bad_type_path = 1, _prim_path = 2, PATH_LIMIT }; RegionNode* region = new RegionNode(PATH_LIMIT); record_for_igvn(region); @@ -3784,7 +3784,7 @@ bool LibraryCallKit::inline_native_subtype_check() { args[which_arg] = arg; Node* p = basic_plus_adr(arg, class_klass_offset); - Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type); + Node* kls = LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, adr_type, kls_type); klasses[which_arg] = _gvn.transform(kls); } @@ -3834,10 +3834,10 @@ bool LibraryCallKit::inline_native_subtype_check() { assert(region->req() == PATH_LIMIT, "sane region"); for (uint i = 1; i < region->req(); i++) { Node* ctl = region->in(i); - if (ctl == NULL || ctl == top()) { + if (ctl == nullptr || ctl == top()) { region->set_req(i, top()); phi ->set_req(i, top()); - } else if (phi->in(i) == NULL) { + } else if (phi->in(i) == nullptr) { phi->set_req(i, intcon(0)); // all other paths produce 'false' } } @@ -3852,7 +3852,7 @@ Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, bool obj_array, bool not_array) { if (stopped()) { - return NULL; + return nullptr; } // If obj_array/non_array==false/false: @@ -3867,15 +3867,15 @@ Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, // Like generate_guard, adds a new path onto the region. jint layout_con = 0; Node* layout_val = get_layout_helper(kls, layout_con); - if (layout_val == NULL) { + if (layout_val == nullptr) { bool query = (obj_array ? Klass::layout_helper_is_objArray(layout_con) : Klass::layout_helper_is_array(layout_con)); if (query == not_array) { - return NULL; // never a branch + return nullptr; // never a branch } else { // always a branch Node* always_branch = control(); - if (region != NULL) + if (region != nullptr) region->add_req(always_branch); set_control(top()); return always_branch; @@ -3933,7 +3933,7 @@ bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) { // ensuing call will throw an exception, or else it // will cache the array klass for next time. PreserveJVMState pjvms(this); - CallJavaNode* slow_call = NULL; + CallJavaNode* slow_call = nullptr; if (uninitialized) { // Generate optimized virtual call (holder class 'Unsafe' is final) slow_call = generate_method_call(vmIntrinsics::_allocateUninitializedArray, false, false); @@ -3985,9 +3985,9 @@ bool LibraryCallKit::inline_native_getLength() { if (stopped()) return true; // Deoptimize if it is a non-array. - Node* non_array = generate_non_array_guard(load_object_klass(array), NULL); + Node* non_array = generate_non_array_guard(load_object_klass(array), nullptr); - if (non_array != NULL) { + if (non_array != nullptr) { PreserveJVMState pjvms(this); set_control(non_array); uncommon_trap(Deoptimization::Reason_intrinsic, @@ -4018,7 +4018,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); - Node* newcopy = NULL; + Node* newcopy = nullptr; // Set the original stack and the reexecute bit for the interpreter to reexecute // the bytecode that invokes Arrays.copyOf if deoptimization happens. @@ -4033,7 +4033,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { Node* orig_length = load_array_length(original); - Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0); + Node* klass_node = load_klass_from_mirror(array_type_mirror, false, nullptr, 0); klass_node = null_check(klass_node); RegionNode* bailout = new RegionNode(1); @@ -4042,7 +4042,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. // Bail out if that is so. Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); - if (not_objArray != NULL) { + if (not_objArray != nullptr) { // Improve the klass node's type from the new optimistic assumption: ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); @@ -4099,7 +4099,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { int test = C->static_subtype_check(superk, subk); if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); - if (t_original->speculative_type() != NULL) { + if (t_original->speculative_type() != nullptr) { original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); } } @@ -4163,7 +4163,7 @@ Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, vtable_index*vtableEntry::size_in_bytes() + vtableEntry::method_offset_in_bytes(); Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); - Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered); + Node* target_call = make_load(nullptr, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered); // Compare the target method with the expected method (e.g., Object.hashCode). const TypePtr* native_call_addr = TypeMetadataPtr::make(method); @@ -4247,7 +4247,7 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); - Node* obj = NULL; + Node* obj = nullptr; if (!is_static) { // Check for hashing null object obj = null_check_receiver(); @@ -4291,9 +4291,9 @@ bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { // Get the header out of the object, use LoadMarkNode when available Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); - // The control of the load must be NULL. Otherwise, the load can move before + // The control of the load must be null. Otherwise, the load can move before // the null check after castPP removal. - Node* no_ctrl = NULL; + Node* no_ctrl = nullptr; Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); // Test the header to see if it is unlocked. @@ -4400,7 +4400,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() { // Cf. JVM_GetCallerClass // NOTE: Start the loop at depth 1 because the current JVM state does // not include the Reflection.getCallerClass() frame. - for (int n = 1; caller_jvms != NULL; caller_jvms = caller_jvms->caller(), n++) { + for (int n = 1; caller_jvms != nullptr; caller_jvms = caller_jvms->caller(), n++) { ciMethod* m = caller_jvms->method(); switch (n) { case 0: @@ -4457,7 +4457,7 @@ bool LibraryCallKit::inline_native_Reflection_getCallerClass() { bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { Node* arg = argument(0); - Node* result = NULL; + Node* result = nullptr; switch (id) { case vmIntrinsics::_floatToRawIntBits: result = new MoveF2INode(arg); break; @@ -4559,7 +4559,7 @@ bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { bool LibraryCallKit::inline_fp_range_check(vmIntrinsics::ID id) { Node* arg = argument(0); - Node* result = NULL; + Node* result = nullptr; switch (id) { case vmIntrinsics::_floatIsInfinite: @@ -4597,7 +4597,7 @@ static bool has_wide_mem(PhaseGVN& gvn, Node* addr, Node* base) { return true; // mixed accesses can touch both on-heap and off-heap memory } if (in_heap) { - bool is_prim_array = (addr_t != NULL) && (addr_t->elem() != Type::BOTTOM); + bool is_prim_array = (addr_t != nullptr) && (addr_t->elem() != Type::BOTTOM); if (!is_prim_array) { // Though Unsafe.copyMemory() ensures at runtime for on-heap accesses that base is a primitive array, // there's not enough type information available to determine proper memory slice for it. @@ -4667,17 +4667,17 @@ bool LibraryCallKit::inline_unsafe_copyMemory() { //------------------------clone_coping----------------------------------- // Helper function for inline_native_clone. void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array) { - assert(obj_size != NULL, ""); + assert(obj_size != nullptr, ""); Node* raw_obj = alloc_obj->in(1); assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), ""); - AllocateNode* alloc = NULL; + AllocateNode* alloc = nullptr; if (ReduceBulkZeroing) { // We will be completely responsible for initializing this object - // mark Initialize node as complete. alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); // The object was just allocated - there should be no any stores! - guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), ""); + guarantee(alloc != nullptr && alloc->maybe_set_complete(&_gvn), ""); // Mark as complete_with_arraycopy so that on AllocateNode // expansion, we know this AllocateNode is initialized by an array // copy and a StoreStore barrier exists after the array copy. @@ -4688,7 +4688,7 @@ void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, b access_clone(obj, alloc_obj, size, is_array); // Do not let reads from the cloned object float above the arraycopy. - if (alloc != NULL) { + if (alloc != nullptr) { // Do not let stores that initialize this object be reordered with // a subsequent store that would make this object accessible by // other threads. @@ -4737,7 +4737,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { // know the number and types of fields to convert the clone to // loads/stores. Maybe a speculative type can help us. if (!obj_type->klass_is_exact() && - obj_type->speculative_type() != NULL && + obj_type->speculative_type() != nullptr && obj_type->speculative_type()->is_instance_klass()) { ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass(); if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem && @@ -4768,21 +4768,21 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { record_for_igvn(result_reg); Node* obj_klass = load_object_klass(obj); - Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); - if (array_ctl != NULL) { + Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)nullptr); + if (array_ctl != nullptr) { // It's an array. PreserveJVMState pjvms(this); set_control(array_ctl); Node* obj_length = load_array_length(obj); - Node* obj_size = NULL; + Node* obj_size = nullptr; Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size, /*deoptimize_on_exception=*/true); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, false, BarrierSetC2::Parsing)) { // If it is an oop array, it requires very special treatment, // because gc barriers are required when accessing the array. - Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); - if (is_obja != NULL) { + Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)nullptr); + if (is_obja != nullptr) { PreserveJVMState pjvms2(this); set_control(is_obja); // Generate a direct call to the right arraycopy function(s). @@ -4848,11 +4848,11 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { if (!stopped()) { // It's an instance, and it passed the slow-path tests. PreserveJVMState pjvms(this); - Node* obj_size = NULL; + Node* obj_size = nullptr; // Need to deoptimize on exception from allocation since Object.clone intrinsic // is reexecuted if deoptimization occurs and there could be problems when merging // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false). - Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); + Node* alloc_obj = new_instance(obj_klass, nullptr, &obj_size, /*deoptimize_on_exception=*/true); copy_to_clone(obj, alloc_obj, obj_size, false); @@ -4896,7 +4896,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { // deoptimization, we'll reexecute the allocation and the // initialization. JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp) { - if (alloc != NULL) { + if (alloc != nullptr) { ciMethod* trap_method = alloc->jvms()->method(); int trap_bci = alloc->jvms()->bci(); @@ -4940,7 +4940,7 @@ JVMState* LibraryCallKit::arraycopy_restore_alloc_state(AllocateArrayNode* alloc } } } - return NULL; + return nullptr; } // Clone the JVMState of the array allocation and create a new safepoint with it. Re-push the array length to the stack @@ -4976,10 +4976,10 @@ SafePointNode* LibraryCallKit::create_safepoint_with_state_before_array_allocati // and the control flow is simple enough. void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards, int saved_reexecute_sp, uint new_idx) { - if (saved_jvms_before_guards != NULL && !stopped()) { + if (saved_jvms_before_guards != nullptr && !stopped()) { replace_unrelated_uncommon_traps_with_alloc_state(alloc, saved_jvms_before_guards); - assert(alloc != NULL, "only with a tightly coupled allocation"); + assert(alloc != nullptr, "only with a tightly coupled allocation"); // restore JVM state to the state at the arraycopy saved_jvms_before_guards->map()->set_control(map()->control()); assert(saved_jvms_before_guards->map()->memory() == map()->memory(), "memory state changed?"); @@ -5008,13 +5008,13 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No Node* init_control = init->proj_out(TypeFunc::Control); Node* alloc_length = alloc->Ideal_length(); #ifdef ASSERT - Node* prev_cast = NULL; + Node* prev_cast = nullptr; #endif for (uint i = 0; i < init_control->outcnt(); i++) { Node* init_out = init_control->raw_out(i); if (init_out->is_CastII() && init_out->in(TypeFunc::Control) == init_control && init_out->in(1) == alloc_length) { #ifdef ASSERT - if (prev_cast == NULL) { + if (prev_cast == nullptr) { prev_cast = init_out; } else { if (prev_cast->cmp(*init_out) == false) { @@ -5042,7 +5042,7 @@ void LibraryCallKit::arraycopy_move_allocation_here(AllocateArrayNode* alloc, No // Update memory as done in GraphKit::set_output_for_allocation() const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength)); const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type(); - if (ary_type->isa_aryptr() && length_type != NULL) { + if (ary_type->isa_aryptr() && length_type != nullptr) { ary_type = ary_type->is_aryptr()->cast_to_size(length_type); } const TypePtr* telemref = ary_type->add_offset(Type::OffsetBot); @@ -5137,10 +5137,10 @@ bool LibraryCallKit::inline_arraycopy() { int saved_reexecute_sp = -1; JVMState* saved_jvms_before_guards = arraycopy_restore_alloc_state(alloc, saved_reexecute_sp); // See arraycopy_restore_alloc_state() comment - // if alloc == NULL we don't have to worry about a tightly coupled allocation so we can emit all needed guards - // if saved_jvms_before_guards != NULL (then alloc != NULL) then we can handle guards and a tightly coupled allocation - // if saved_jvms_before_guards == NULL and alloc != NULL, we can't emit any guards - bool can_emit_guards = (alloc == NULL || saved_jvms_before_guards != NULL); + // if alloc == null we don't have to worry about a tightly coupled allocation so we can emit all needed guards + // if saved_jvms_before_guards is not null (then alloc is not null) then we can handle guards and a tightly coupled allocation + // if saved_jvms_before_guards is null and alloc is not null, we can't emit any guards + bool can_emit_guards = (alloc == nullptr || saved_jvms_before_guards != nullptr); // The following tests must be performed // (1) src and dest are arrays. @@ -5156,12 +5156,12 @@ bool LibraryCallKit::inline_arraycopy() { // (3) src and dest must not be null. // always do this here because we need the JVM state for uncommon traps Node* null_ctl = top(); - src = saved_jvms_before_guards != NULL ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY); + src = saved_jvms_before_guards != nullptr ? null_check_oop(src, &null_ctl, true, true) : null_check(src, T_ARRAY); assert(null_ctl->is_top(), "no null control here"); dest = null_check(dest, T_ARRAY); if (!can_emit_guards) { - // if saved_jvms_before_guards == NULL and alloc != NULL, we don't emit any + // if saved_jvms_before_guards is null and alloc is not null, we don't emit any // guards but the arraycopy node could still take advantage of a // tightly allocated allocation. tightly_coupled_allocation() is // called again to make sure it takes the null check above into @@ -5179,9 +5179,9 @@ bool LibraryCallKit::inline_arraycopy() { const TypeAryPtr* top_dest = dest_type->isa_aryptr(); // Do we have the type of src? - bool has_src = (top_src != NULL && top_src->elem() != Type::BOTTOM); + bool has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM); // Do we have the type of dest? - bool has_dest = (top_dest != NULL && top_dest->elem() != Type::BOTTOM); + bool has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM); // Is the type for src from speculation? bool src_spec = false; // Is the type for dest from speculation? @@ -5197,18 +5197,18 @@ bool LibraryCallKit::inline_arraycopy() { // Do we already have or could we have type information for dest bool could_have_dest = has_dest; - ciKlass* src_k = NULL; + ciKlass* src_k = nullptr; if (!has_src) { src_k = src_type->speculative_type_not_null(); - if (src_k != NULL && src_k->is_array_klass()) { + if (src_k != nullptr && src_k->is_array_klass()) { could_have_src = true; } } - ciKlass* dest_k = NULL; + ciKlass* dest_k = nullptr; if (!has_dest) { dest_k = dest_type->speculative_type_not_null(); - if (dest_k != NULL && dest_k->is_array_klass()) { + if (dest_k != nullptr && dest_k->is_array_klass()) { could_have_dest = true; } } @@ -5219,14 +5219,14 @@ bool LibraryCallKit::inline_arraycopy() { src = maybe_cast_profiled_obj(src, src_k, true); src_type = _gvn.type(src); top_src = src_type->isa_aryptr(); - has_src = (top_src != NULL && top_src->elem() != Type::BOTTOM); + has_src = (top_src != nullptr && top_src->elem() != Type::BOTTOM); src_spec = true; } if (!has_dest) { dest = maybe_cast_profiled_obj(dest, dest_k, true); dest_type = _gvn.type(dest); top_dest = dest_type->isa_aryptr(); - has_dest = (top_dest != NULL && top_dest->elem() != Type::BOTTOM); + has_dest = (top_dest != nullptr && top_dest->elem() != Type::BOTTOM); dest_spec = true; } } @@ -5247,17 +5247,17 @@ bool LibraryCallKit::inline_arraycopy() { bool could_have_src = src_spec; // Do we have the exact type of dest? bool could_have_dest = dest_spec; - ciKlass* src_k = NULL; - ciKlass* dest_k = NULL; + ciKlass* src_k = nullptr; + ciKlass* dest_k = nullptr; if (!src_spec) { src_k = src_type->speculative_type_not_null(); - if (src_k != NULL && src_k->is_array_klass()) { + if (src_k != nullptr && src_k->is_array_klass()) { could_have_src = true; } } if (!dest_spec) { dest_k = dest_type->speculative_type_not_null(); - if (dest_k != NULL && dest_k->is_array_klass()) { + if (dest_k != nullptr && dest_k->is_array_klass()) { could_have_dest = true; } } @@ -5275,7 +5275,7 @@ bool LibraryCallKit::inline_arraycopy() { ciMethod* trap_method = method(); int trap_bci = bci(); - if (saved_jvms_before_guards != NULL) { + if (saved_jvms_before_guards != nullptr) { trap_method = alloc->jvms()->method(); trap_bci = alloc->jvms()->bci(); } @@ -5354,7 +5354,7 @@ bool LibraryCallKit::inline_arraycopy() { return true; } - ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != NULL, negative_length_guard_generated, + ArrayCopyNode* ac = ArrayCopyNode::make(this, true, src, src_offset, dest, dest_offset, length, alloc != nullptr, negative_length_guard_generated, // Create LoadRange and LoadKlass nodes for use during macro expansion here // so the compiler has a chance to eliminate them: during macro expansion, // we have to set their control (CastPP nodes are eliminated). @@ -5381,29 +5381,29 @@ bool LibraryCallKit::inline_arraycopy() { // an allocation, with no intervening tests or other escapes for the object. AllocateArrayNode* LibraryCallKit::tightly_coupled_allocation(Node* ptr) { - if (stopped()) return NULL; // no fast path - if (!C->do_aliasing()) return NULL; // no MergeMems around + if (stopped()) return nullptr; // no fast path + if (!C->do_aliasing()) return nullptr; // no MergeMems around AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(ptr, &_gvn); - if (alloc == NULL) return NULL; + if (alloc == nullptr) return nullptr; Node* rawmem = memory(Compile::AliasIdxRaw); // Is the allocation's memory state untouched? if (!(rawmem->is_Proj() && rawmem->in(0)->is_Initialize())) { // Bail out if there have been raw-memory effects since the allocation. // (Example: There might have been a call or safepoint.) - return NULL; + return nullptr; } rawmem = rawmem->in(0)->as_Initialize()->memory(Compile::AliasIdxRaw); if (!(rawmem->is_Proj() && rawmem->in(0) == alloc)) { - return NULL; + return nullptr; } // There must be no unexpected observers of this allocation. for (DUIterator_Fast imax, i = ptr->fast_outs(imax); i < imax; i++) { Node* obs = ptr->fast_out(i); if (obs != this->map()) { - return NULL; + return nullptr; } } @@ -5497,7 +5497,7 @@ bool LibraryCallKit::inline_multiplyToLen() { assert(UseMultiplyToLenIntrinsic, "not implemented on this platform"); address stubAddr = StubRoutines::multiplyToLen(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } const char* stubName = "multiplyToLen"; @@ -5605,7 +5605,7 @@ bool LibraryCallKit::inline_squareToLen() { assert(UseSquareToLenIntrinsic, "not implemented on this platform"); address stubAddr = StubRoutines::squareToLen(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } const char* stubName = "squareToLen"; @@ -5652,7 +5652,7 @@ bool LibraryCallKit::inline_mulAdd() { assert(UseMulAddIntrinsic, "not implemented on this platform"); address stubAddr = StubRoutines::mulAdd(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } const char* stubName = "mulAdd"; @@ -5699,7 +5699,7 @@ bool LibraryCallKit::inline_mulAdd() { //-------------inline_montgomeryMultiply----------------------------------- bool LibraryCallKit::inline_montgomeryMultiply() { address stubAddr = StubRoutines::montgomeryMultiply(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } @@ -5755,7 +5755,7 @@ bool LibraryCallKit::inline_montgomeryMultiply() { bool LibraryCallKit::inline_montgomerySquare() { address stubAddr = StubRoutines::montgomerySquare(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } @@ -5805,11 +5805,11 @@ bool LibraryCallKit::inline_montgomerySquare() { } bool LibraryCallKit::inline_bigIntegerShift(bool isRightShift) { - address stubAddr = NULL; - const char* stubName = NULL; + address stubAddr = nullptr; + const char* stubName = nullptr; stubAddr = isRightShift? StubRoutines::bigIntegerRightShift(): StubRoutines::bigIntegerLeftShift(); - if (stubAddr == NULL) { + if (stubAddr == nullptr) { return false; // Intrinsic's stub is not implemented on this platform } @@ -5870,8 +5870,8 @@ bool LibraryCallKit::inline_vectorizedMismatch() { const TypeAryPtr* obja_t = _gvn.type(obja)->isa_aryptr(); const TypeAryPtr* objb_t = _gvn.type(objb)->isa_aryptr(); - if (obja_t == NULL || obja_t->elem() == Type::BOTTOM || - objb_t == NULL || objb_t->elem() == Type::BOTTOM || + if (obja_t == nullptr || obja_t->elem() == Type::BOTTOM || + objb_t == nullptr || objb_t->elem() == Type::BOTTOM || scale == top()) { return false; // failed input validation } @@ -5939,7 +5939,7 @@ bool LibraryCallKit::inline_vectorizedMismatch() { Node* cmp_length = _gvn.transform(new CmpINode(length, intcon(inline_limit))); Node* bol_gt = _gvn.transform(new BoolNode(cmp_length, BoolTest::gt)); - call_stub_path = generate_guard(bol_gt, NULL, PROB_MIN); + call_stub_path = generate_guard(bol_gt, nullptr, PROB_MIN); if (!stopped()) { Node* casted_length = _gvn.transform(new CastIINode(control(), length, TypeInt::make(0, inline_limit, Type::WidenMin))); @@ -5964,7 +5964,7 @@ bool LibraryCallKit::inline_vectorizedMismatch() { } } - if (call_stub_path != NULL) { + if (call_stub_path != nullptr) { set_control(call_stub_path); Node* call = make_runtime_call(RC_LEAF, @@ -6135,8 +6135,8 @@ bool LibraryCallKit::inline_updateByteBufferCRC32() { //------------------------------get_table_from_crc32c_class----------------------- Node * LibraryCallKit::get_table_from_crc32c_class(ciInstanceKlass *crc32c_class) { - Node* table = load_field_from_object(NULL, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class); - assert (table != NULL, "wrong version of java.util.zip.CRC32C"); + Node* table = load_field_from_object(nullptr, "byteTable", "[I", /*decorators*/ IN_HEAP, /*is_static*/ true, crc32c_class); + assert (table != nullptr, "wrong version of java.util.zip.CRC32C"); return table; } @@ -6326,8 +6326,8 @@ bool LibraryCallKit::inline_reference_get() { DecoratorSet decorators = IN_HEAP | ON_WEAK_OOP_REF; Node* result = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;", - decorators, /*is_static*/ false, NULL); - if (result == NULL) return false; + decorators, /*is_static*/ false, nullptr); + if (result == nullptr) return false; // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. @@ -6349,8 +6349,8 @@ bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) { DecoratorSet decorators = IN_HEAP | AS_NO_KEEPALIVE; decorators |= (is_phantom ? ON_PHANTOM_OOP_REF : ON_WEAK_OOP_REF); Node* referent = load_field_from_object(reference_obj, "referent", "Ljava/lang/Object;", - decorators, /*is_static*/ false, NULL); - if (referent == NULL) return false; + decorators, /*is_static*/ false, nullptr); + if (referent == nullptr) return false; // Add memory barrier to prevent commoning reads from this field // across safepoint since GC can change its value. @@ -6381,9 +6381,9 @@ bool LibraryCallKit::inline_reference_refersTo0(bool is_phantom) { Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators, bool is_static, ciInstanceKlass* fromKls) { - if (fromKls == NULL) { + if (fromKls == nullptr) { const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr(); - assert(tinst != NULL, "obj is null"); + assert(tinst != nullptr, "obj is null"); assert(tinst->is_loaded(), "obj is not loaded"); fromKls = tinst->instance_klass(); } else { @@ -6393,8 +6393,8 @@ Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldNam ciSymbol::make(fieldTypeString), is_static); - assert(field != NULL, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName); - if (field == NULL) return (Node *) NULL; + assert(field != nullptr, "undefined field %s %s %s", fieldTypeString, fromKls->name()->as_utf8(), fieldName); + if (field == nullptr) return (Node *) nullptr; if (is_static) { const TypeInstPtr* tip = TypeInstPtr::make(fromKls->java_mirror()); @@ -6429,10 +6429,10 @@ Node* LibraryCallKit::load_field_from_object(Node* fromObj, const char* fieldNam Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, bool is_exact /* true */, bool is_static /* false */, - ciInstanceKlass * fromKls /* NULL */) { - if (fromKls == NULL) { + ciInstanceKlass * fromKls /* nullptr */) { + if (fromKls == nullptr) { const TypeInstPtr* tinst = _gvn.type(fromObj)->isa_instptr(); - assert(tinst != NULL, "obj is null"); + assert(tinst != nullptr, "obj is null"); assert(tinst->is_loaded(), "obj is not loaded"); assert(!is_exact || tinst->klass_is_exact(), "klass not exact"); fromKls = tinst->instance_klass(); @@ -6444,7 +6444,7 @@ Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fi ciSymbol::make(fieldTypeString), is_static); - assert(field != NULL, "undefined field"); + assert(field != nullptr, "undefined field"); assert(!field->is_volatile(), "not defined for volatile fields"); if (is_static) { @@ -6463,7 +6463,7 @@ Node * LibraryCallKit::field_address_from_object(Node * fromObj, const char * fi //------------------------------inline_aescrypt_Block----------------------- bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) { - address stubAddr = NULL; + address stubAddr = nullptr; const char *stubName; assert(UseAES, "need AES instruction support"); @@ -6479,7 +6479,7 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) { default: break; } - if (stubAddr == NULL) return false; + if (stubAddr == nullptr) return false; Node* aescrypt_object = argument(0); Node* src = argument(1); @@ -6500,8 +6500,8 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) { // we are just trying to get the call to be generated. Node* src_start = src; Node* dest_start = dest; - if (src_offset != NULL || dest_offset != NULL) { - assert(src_offset != NULL && dest_offset != NULL, ""); + if (src_offset != nullptr || dest_offset != nullptr) { + assert(src_offset != nullptr && dest_offset != nullptr, ""); src_start = array_element_address(src, src_offset, T_BYTE); dest_start = array_element_address(dest, dest_offset, T_BYTE); } @@ -6509,7 +6509,7 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) { // now need to get the start of its expanded key array // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); - if (k_start == NULL) return false; + if (k_start == nullptr) return false; // Call the stub. make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(), @@ -6521,8 +6521,8 @@ bool LibraryCallKit::inline_aescrypt_Block(vmIntrinsics::ID id) { //------------------------------inline_cipherBlockChaining_AESCrypt----------------------- bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { - address stubAddr = NULL; - const char *stubName = NULL; + address stubAddr = nullptr; + const char *stubName = nullptr; assert(UseAES, "need AES instruction support"); @@ -6538,7 +6538,7 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { default: break; } - if (stubAddr == NULL) return false; + if (stubAddr == nullptr) return false; Node* cipherBlockChaining_object = argument(0); Node* src = argument(1); @@ -6559,8 +6559,8 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { // checks are the responsibility of the caller Node* src_start = src; Node* dest_start = dest; - if (src_offset != NULL || dest_offset != NULL) { - assert(src_offset != NULL && dest_offset != NULL, ""); + if (src_offset != nullptr || dest_offset != nullptr) { + assert(src_offset != nullptr && dest_offset != nullptr, ""); src_start = array_element_address(src, src_offset, T_BYTE); dest_start = array_element_address(dest, dest_offset, T_BYTE); } @@ -6571,11 +6571,11 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java Node* embeddedCipherObj = load_field_from_object(cipherBlockChaining_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;"); - if (embeddedCipherObj == NULL) return false; + if (embeddedCipherObj == nullptr) return false; // cast it to what we know it will be at runtime const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr(); - assert(tinst != NULL, "CBC obj is null"); + assert(tinst != nullptr, "CBC obj is null"); assert(tinst->is_loaded(), "CBC obj is not loaded"); ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt")); assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded"); @@ -6588,11 +6588,11 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { // we need to get the start of the aescrypt_object's expanded key array Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); - if (k_start == NULL) return false; + if (k_start == nullptr) return false; // similarly, get the start address of the r vector Node* objRvec = load_field_from_object(cipherBlockChaining_object, "r", "[B"); - if (objRvec == NULL) return false; + if (objRvec == nullptr) return false; Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE); // Call the stub, passing src_start, dest_start, k_start, r_start and src_len @@ -6609,8 +6609,8 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) { //------------------------------inline_electronicCodeBook_AESCrypt----------------------- bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) { - address stubAddr = NULL; - const char *stubName = NULL; + address stubAddr = nullptr; + const char *stubName = nullptr; assert(UseAES, "need AES instruction support"); @@ -6627,7 +6627,7 @@ bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) { break; } - if (stubAddr == NULL) return false; + if (stubAddr == nullptr) return false; Node* electronicCodeBook_object = argument(0); Node* src = argument(1); @@ -6645,8 +6645,8 @@ bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) { // checks are the responsibility of the caller Node* src_start = src; Node* dest_start = dest; - if (src_offset != NULL || dest_offset != NULL) { - assert(src_offset != NULL && dest_offset != NULL, ""); + if (src_offset != nullptr || dest_offset != nullptr) { + assert(src_offset != nullptr && dest_offset != nullptr, ""); src_start = array_element_address(src, src_offset, T_BYTE); dest_start = array_element_address(dest, dest_offset, T_BYTE); } @@ -6657,11 +6657,11 @@ bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) { // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java Node* embeddedCipherObj = load_field_from_object(electronicCodeBook_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;"); - if (embeddedCipherObj == NULL) return false; + if (embeddedCipherObj == nullptr) return false; // cast it to what we know it will be at runtime const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr(); - assert(tinst != NULL, "ECB obj is null"); + assert(tinst != nullptr, "ECB obj is null"); assert(tinst->is_loaded(), "ECB obj is not loaded"); ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt")); assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded"); @@ -6674,7 +6674,7 @@ bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) { // we need to get the start of the aescrypt_object's expanded key array Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); - if (k_start == NULL) return false; + if (k_start == nullptr) return false; // Call the stub, passing src_start, dest_start, k_start, r_start and src_len Node* ecbCrypt = make_runtime_call(RC_LEAF | RC_NO_FP, @@ -6693,13 +6693,13 @@ bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) { assert(UseAES, "need AES instruction support"); if (!UseAESCTRIntrinsics) return false; - address stubAddr = NULL; - const char *stubName = NULL; + address stubAddr = nullptr; + const char *stubName = nullptr; if (id == vmIntrinsics::_counterMode_AESCrypt) { stubAddr = StubRoutines::counterMode_AESCrypt(); stubName = "counterMode_AESCrypt"; } - if (stubAddr == NULL) return false; + if (stubAddr == nullptr) return false; Node* counterMode_object = argument(0); Node* src = argument(1); @@ -6717,8 +6717,8 @@ bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) { // checks are the responsibility of the caller Node* src_start = src; Node* dest_start = dest; - if (src_offset != NULL || dest_offset != NULL) { - assert(src_offset != NULL && dest_offset != NULL, ""); + if (src_offset != nullptr || dest_offset != nullptr) { + assert(src_offset != nullptr && dest_offset != nullptr, ""); src_start = array_element_address(src, src_offset, T_BYTE); dest_start = array_element_address(dest, dest_offset, T_BYTE); } @@ -6728,10 +6728,10 @@ bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) { // so we cast it here safely. // this requires a newer class file that has this array as littleEndian ints, otherwise we revert to java Node* embeddedCipherObj = load_field_from_object(counterMode_object, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;"); - if (embeddedCipherObj == NULL) return false; + if (embeddedCipherObj == nullptr) return false; // cast it to what we know it will be at runtime const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr(); - assert(tinst != NULL, "CTR obj is null"); + assert(tinst != nullptr, "CTR obj is null"); assert(tinst->is_loaded(), "CTR obj is not loaded"); ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt")); assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded"); @@ -6742,14 +6742,14 @@ bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) { aescrypt_object = _gvn.transform(aescrypt_object); // we need to get the start of the aescrypt_object's expanded key array Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); - if (k_start == NULL) return false; + if (k_start == nullptr) return false; // similarly, get the start address of the r vector Node* obj_counter = load_field_from_object(counterMode_object, "counter", "[B"); - if (obj_counter == NULL) return false; + if (obj_counter == nullptr) return false; Node* cnt_start = array_element_address(obj_counter, intcon(0), T_BYTE); Node* saved_encCounter = load_field_from_object(counterMode_object, "encryptedCounter", "[B"); - if (saved_encCounter == NULL) return false; + if (saved_encCounter == nullptr) return false; Node* saved_encCounter_start = array_element_address(saved_encCounter, intcon(0), T_BYTE); Node* used = field_address_from_object(counterMode_object, "used", "I", /*is_exact*/ false); @@ -6773,16 +6773,16 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) // However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption. // The ppc64 stubs of encryption and decryption use the same round keys (sessionK[0]). Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I"); - assert (objSessionK != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); - if (objSessionK == NULL) { - return (Node *) NULL; + assert (objSessionK != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt"); + if (objSessionK == nullptr) { + return (Node *) nullptr; } Node* objAESCryptKey = load_array_element(objSessionK, intcon(0), TypeAryPtr::OOPS, /* set_ctrl */ true); #else Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I"); #endif // PPC64 - assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt"); - if (objAESCryptKey == NULL) return (Node *) NULL; + assert (objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt"); + if (objAESCryptKey == nullptr) return (Node *) nullptr; // now have the array, need to get the start address of the K array Node* k_start = array_element_address(objAESCryptKey, intcon(0), T_INT); @@ -6799,7 +6799,7 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object) // note cipher==plain is more conservative than the original java code but that's OK // Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) { - // The receiver was checked for NULL already. + // The receiver was checked for null already. Node* objCBC = argument(0); Node* src = argument(1); @@ -6812,7 +6812,7 @@ Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypt // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point // will have same classloader as CipherBlockChaining object const TypeInstPtr* tinst = _gvn.type(objCBC)->isa_instptr(); - assert(tinst != NULL, "CBCobj is null"); + assert(tinst != nullptr, "CBCobj is null"); assert(tinst->is_loaded(), "CBCobj is not loaded"); // we want to do an instanceof comparison against the AESCrypt class @@ -6834,11 +6834,11 @@ Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypt Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1))); Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne)); - Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN); // for encryption, we are done if (!decrypting) - return instof_false; // even if it is NULL + return instof_false; // even if it is null // for decryption, we need to add a further check to avoid // taking the intrinsic path when cipher and plain are the same @@ -6848,7 +6848,7 @@ Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypt Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest)); Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq)); - Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN); + Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN); region->init_req(2, src_dest_conjoint); record_for_igvn(region); @@ -6865,7 +6865,7 @@ Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypt // note cipher==plain is more conservative than the original java code but that's OK // Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypting) { - // The receiver was checked for NULL already. + // The receiver was checked for null already. Node* objECB = argument(0); // Load embeddedCipher field of ElectronicCodeBook object. @@ -6875,7 +6875,7 @@ Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypti // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point // will have same classloader as ElectronicCodeBook object const TypeInstPtr* tinst = _gvn.type(objECB)->isa_instptr(); - assert(tinst != NULL, "ECBobj is null"); + assert(tinst != nullptr, "ECBobj is null"); assert(tinst->is_loaded(), "ECBobj is not loaded"); // we want to do an instanceof comparison against the AESCrypt class @@ -6892,11 +6892,11 @@ Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypti Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1))); Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne)); - Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN); // for encryption, we are done if (!decrypting) - return instof_false; // even if it is NULL + return instof_false; // even if it is null // for decryption, we need to add a further check to avoid // taking the intrinsic path when cipher and plain are the same @@ -6907,7 +6907,7 @@ Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypti Node* dest = argument(4); Node* cmp_src_dest = _gvn.transform(new CmpPNode(src, dest)); Node* bool_src_dest = _gvn.transform(new BoolNode(cmp_src_dest, BoolTest::eq)); - Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN); + Node* src_dest_conjoint = generate_guard(bool_src_dest, nullptr, PROB_MIN); region->init_req(2, src_dest_conjoint); record_for_igvn(region); @@ -6925,7 +6925,7 @@ Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypti // Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() { - // The receiver was checked for NULL already. + // The receiver was checked for null already. Node* objCTR = argument(0); // Load embeddedCipher field of CipherBlockChaining object. @@ -6935,7 +6935,7 @@ Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() { // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point // will have same classloader as CipherBlockChaining object const TypeInstPtr* tinst = _gvn.type(objCTR)->isa_instptr(); - assert(tinst != NULL, "CTRobj is null"); + assert(tinst != nullptr, "CTRobj is null"); assert(tinst->is_loaded(), "CTRobj is not loaded"); // we want to do an instanceof comparison against the AESCrypt class @@ -6951,9 +6951,9 @@ Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() { Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt))); Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1))); Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne)); - Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN); - return instof_false; // even if it is NULL + return instof_false; // even if it is null } //------------------------------inline_ghash_processBlocks @@ -6976,11 +6976,11 @@ bool LibraryCallKit::inline_ghash_processBlocks() { data = must_be_not_null(data, true); Node* state_start = array_element_address(state, intcon(0), T_LONG); - assert(state_start, "state is NULL"); + assert(state_start, "state is null"); Node* subkeyH_start = array_element_address(subkeyH, intcon(0), T_LONG); - assert(subkeyH_start, "subkeyH is NULL"); + assert(subkeyH_start, "subkeyH is null"); Node* data_start = array_element_address(data, offset, T_BYTE); - assert(data_start, "data is NULL"); + assert(data_start, "data is null"); Node* ghash = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::ghash_processBlocks_Type(), @@ -7005,9 +7005,9 @@ bool LibraryCallKit::inline_chacha20Block() { result = must_be_not_null(result, true); Node* state_start = array_element_address(state, intcon(0), T_INT); - assert(state_start, "state is NULL"); + assert(state_start, "state is null"); Node* result_start = array_element_address(result, intcon(0), T_BYTE); - assert(result_start, "result is NULL"); + assert(result_start, "result is null"); Node* cc20Blk = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::chacha20Block_Type(), @@ -7040,9 +7040,9 @@ bool LibraryCallKit::inline_base64_encodeBlock() { dest = must_be_not_null(dest, true); Node* src_start = array_element_address(src, intcon(0), T_BYTE); - assert(src_start, "source array is NULL"); + assert(src_start, "source array is null"); Node* dest_start = array_element_address(dest, intcon(0), T_BYTE); - assert(dest_start, "destination array is NULL"); + assert(dest_start, "destination array is null"); Node* base64 = make_runtime_call(RC_LEAF, OptoRuntime::base64_encodeBlock_Type(), @@ -7073,9 +7073,9 @@ bool LibraryCallKit::inline_base64_decodeBlock() { dest = must_be_not_null(dest, true); Node* src_start = array_element_address(src, intcon(0), T_BYTE); - assert(src_start, "source array is NULL"); + assert(src_start, "source array is null"); Node* dest_start = array_element_address(dest, intcon(0), T_BYTE); - assert(dest_start, "destination array is NULL"); + assert(dest_start, "destination array is null"); Node* call = make_runtime_call(RC_LEAF, OptoRuntime::base64_decodeBlock_Type(), @@ -7109,11 +7109,11 @@ bool LibraryCallKit::inline_poly1305_processBlocks() { rlimbs = must_be_not_null(rlimbs, true); Node* input_start = array_element_address(input, input_offset, T_BYTE); - assert(input_start, "input array is NULL"); + assert(input_start, "input array is null"); Node* acc_start = array_element_address(alimbs, intcon(0), T_LONG); - assert(acc_start, "acc array is NULL"); + assert(acc_start, "acc array is null"); Node* r_start = array_element_address(rlimbs, intcon(0), T_LONG); - assert(r_start, "r array is NULL"); + assert(r_start, "r array is null"); Node* call = make_runtime_call(RC_LEAF | RC_NO_FP, OptoRuntime::poly1305_processBlocks_Type(), @@ -7159,8 +7159,8 @@ bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) { // 'src_start' points to src array + offset src = must_be_not_null(src, true); Node* src_start = array_element_address(src, ofs, src_elem); - Node* state = NULL; - Node* block_size = NULL; + Node* state = nullptr; + Node* block_size = nullptr; address stubAddr; const char *stubName; @@ -7195,20 +7195,20 @@ bool LibraryCallKit::inline_digestBase_implCompress(vmIntrinsics::ID id) { stubAddr = StubRoutines::sha3_implCompress(); stubName = "sha3_implCompress"; block_size = get_block_size_from_digest_object(digestBase_obj); - if (block_size == NULL) return false; + if (block_size == nullptr) return false; break; default: fatal_unexpected_iid(id); return false; } - if (state == NULL) return false; + if (state == nullptr) return false; - assert(stubAddr != NULL, "Stub is generated"); - if (stubAddr == NULL) return false; + assert(stubAddr != nullptr, "Stub is generated"); + if (stubAddr == nullptr) return false; // Call the stub. Node* call; - if (block_size == NULL) { + if (block_size == nullptr) { call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompress_Type(false), stubAddr, stubName, TypePtr::BOTTOM, src_start, state); @@ -7232,7 +7232,7 @@ bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) { assert((uint)predicate < 5, "sanity"); assert(callee()->signature()->size() == 3, "digestBase_implCompressMB has 3 parameters"); - Node* digestBase_obj = argument(0); // The receiver was checked for NULL already. + Node* digestBase_obj = argument(0); // The receiver was checked for null already. Node* src = argument(1); // byte[] array Node* ofs = argument(2); // type int Node* limit = argument(3); // type int @@ -7251,9 +7251,9 @@ bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) { src = must_be_not_null(src, false); Node* src_start = array_element_address(src, ofs, src_elem); - const char* klass_digestBase_name = NULL; - const char* stub_name = NULL; - address stub_addr = NULL; + const char* klass_digestBase_name = nullptr; + const char* stub_name = nullptr; + address stub_addr = nullptr; BasicType elem_type = T_INT; switch (predicate) { @@ -7297,13 +7297,13 @@ bool LibraryCallKit::inline_digestBase_implCompressMB(int predicate) { default: fatal("unknown DigestBase intrinsic predicate: %d", predicate); } - if (klass_digestBase_name != NULL) { - assert(stub_addr != NULL, "Stub is generated"); - if (stub_addr == NULL) return false; + if (klass_digestBase_name != nullptr) { + assert(stub_addr != nullptr, "Stub is generated"); + if (stub_addr == nullptr) return false; // get DigestBase klass to lookup for SHA klass const TypeInstPtr* tinst = _gvn.type(digestBase_obj)->isa_instptr(); - assert(tinst != NULL, "digestBase_obj is not instance???"); + assert(tinst != nullptr, "digestBase_obj is not instance???"); assert(tinst->is_loaded(), "DigestBase is not loaded"); ciKlass* klass_digestBase = tinst->instance_klass()->find_klass(ciSymbol::make(klass_digestBase_name)); @@ -7324,17 +7324,17 @@ bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciIn digest_obj = _gvn.transform(digest_obj); Node* state = get_state_from_digest_object(digest_obj, elem_type); - if (state == NULL) return false; + if (state == nullptr) return false; - Node* block_size = NULL; + Node* block_size = nullptr; if (strcmp("sha3_implCompressMB", stubName) == 0) { block_size = get_block_size_from_digest_object(digest_obj); - if (block_size == NULL) return false; + if (block_size == nullptr) return false; } // Call the stub. Node* call; - if (block_size == NULL) { + if (block_size == nullptr) { call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::digestBase_implCompressMB_Type(false), stubAddr, stubName, TypePtr::BOTTOM, @@ -7356,12 +7356,12 @@ bool LibraryCallKit::inline_digestBase_implCompressMB(Node* digestBase_obj, ciIn //------------------------------inline_galoisCounterMode_AESCrypt----------------------- bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() { assert(UseAES, "need AES instruction support"); - address stubAddr = NULL; - const char *stubName = NULL; + address stubAddr = nullptr; + const char *stubName = nullptr; stubAddr = StubRoutines::galoisCounterMode_AESCrypt(); stubName = "galoisCounterMode_AESCrypt"; - if (stubAddr == NULL) return false; + if (stubAddr == nullptr) return false; Node* in = argument(0); Node* inOfs = argument(1); @@ -7385,8 +7385,8 @@ bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() { Node* in_start = in; Node* ct_start = ct; Node* out_start = out; - if (inOfs != NULL || ctOfs != NULL || outOfs != NULL) { - assert(inOfs != NULL && ctOfs != NULL && outOfs != NULL, ""); + if (inOfs != nullptr || ctOfs != nullptr || outOfs != nullptr) { + assert(inOfs != nullptr && ctOfs != nullptr && outOfs != nullptr, ""); in_start = array_element_address(in, inOfs, T_BYTE); ct_start = array_element_address(ct, ctOfs, T_BYTE); out_start = array_element_address(out, outOfs, T_BYTE); @@ -7401,12 +7401,12 @@ bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() { Node* subkeyHtbl = load_field_from_object(ghash_object, "subkeyHtbl", "[J"); Node* state = load_field_from_object(ghash_object, "state", "[J"); - if (embeddedCipherObj == NULL || counter == NULL || subkeyHtbl == NULL || state == NULL) { + if (embeddedCipherObj == nullptr || counter == nullptr || subkeyHtbl == nullptr || state == nullptr) { return false; } // cast it to what we know it will be at runtime const TypeInstPtr* tinst = _gvn.type(gctr_object)->isa_instptr(); - assert(tinst != NULL, "GCTR obj is null"); + assert(tinst != nullptr, "GCTR obj is null"); assert(tinst->is_loaded(), "GCTR obj is not loaded"); ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt")); assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded"); @@ -7417,7 +7417,7 @@ bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() { aescrypt_object = _gvn.transform(aescrypt_object); // we need to get the start of the aescrypt_object's expanded key array Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object); - if (k_start == NULL) return false; + if (k_start == nullptr) return false; // similarly, get the start address of the r vector Node* cnt_start = array_element_address(counter, intcon(0), T_BYTE); Node* state_start = array_element_address(state, intcon(0), T_LONG); @@ -7448,17 +7448,17 @@ bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() { // Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() { - // The receiver was checked for NULL already. + // The receiver was checked for null already. Node* objGCTR = argument(7); // Load embeddedCipher field of GCTR object. Node* embeddedCipherObj = load_field_from_object(objGCTR, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;"); - assert(embeddedCipherObj != NULL, "embeddedCipherObj is null"); + assert(embeddedCipherObj != nullptr, "embeddedCipherObj is null"); // get AESCrypt klass for instanceOf check // AESCrypt might not be loaded yet if some other SymmetricCipher got us to this compile point // will have same classloader as CipherBlockChaining object const TypeInstPtr* tinst = _gvn.type(objGCTR)->isa_instptr(); - assert(tinst != NULL, "GCTR obj is null"); + assert(tinst != nullptr, "GCTR obj is null"); assert(tinst->is_loaded(), "GCTR obj is not loaded"); // we want to do an instanceof comparison against the AESCrypt class @@ -7474,9 +7474,9 @@ Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() { Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt))); Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1))); Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne)); - Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN); - return instof_false; // even if it is NULL + return instof_false; // even if it is null } //------------------------------get_state_from_digest_object----------------------- @@ -7489,8 +7489,8 @@ Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicTy default: ShouldNotReachHere(); } Node* digest_state = load_field_from_object(digest_object, "state", state_type); - assert (digest_state != NULL, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3"); - if (digest_state == NULL) return (Node *) NULL; + assert (digest_state != nullptr, "wrong version of sun.security.provider.MD5/SHA/SHA2/SHA5/SHA3"); + if (digest_state == nullptr) return (Node *) nullptr; // now have the array, need to get the start address of the state array Node* state = array_element_address(digest_state, intcon(0), elem_type); @@ -7500,7 +7500,7 @@ Node * LibraryCallKit::get_state_from_digest_object(Node *digest_object, BasicTy //------------------------------get_block_size_from_sha3_object---------------------------------- Node * LibraryCallKit::get_block_size_from_digest_object(Node *digest_object) { Node* block_size = load_field_from_object(digest_object, "blockSize", "I"); - assert (block_size != NULL, "sanity"); + assert (block_size != nullptr, "sanity"); return block_size; } @@ -7514,15 +7514,15 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) "need MD5/SHA1/SHA256/SHA512/SHA3 instruction support"); assert((uint)predicate < 5, "sanity"); - // The receiver was checked for NULL already. + // The receiver was checked for null already. Node* digestBaseObj = argument(0); // get DigestBase klass for instanceOf check const TypeInstPtr* tinst = _gvn.type(digestBaseObj)->isa_instptr(); - assert(tinst != NULL, "digestBaseObj is null"); + assert(tinst != nullptr, "digestBaseObj is null"); assert(tinst->is_loaded(), "DigestBase is not loaded"); - const char* klass_name = NULL; + const char* klass_name = nullptr; switch (predicate) { case 0: if (UseMD5Intrinsics) { @@ -7558,11 +7558,11 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) fatal("unknown SHA intrinsic predicate: %d", predicate); } - ciKlass* klass = NULL; - if (klass_name != NULL) { + ciKlass* klass = nullptr; + if (klass_name != nullptr) { klass = tinst->instance_klass()->find_klass(ciSymbol::make(klass_name)); } - if ((klass == NULL) || !klass->is_loaded()) { + if ((klass == nullptr) || !klass->is_loaded()) { // if none of MD5/SHA/SHA2/SHA5 is loaded, we never take the intrinsic fast path Node* ctrl = control(); set_control(top()); // no intrinsic path @@ -7573,17 +7573,17 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate) Node* instof = gen_instanceof(digestBaseObj, makecon(TypeKlassPtr::make(instklass))); Node* cmp_instof = _gvn.transform(new CmpINode(instof, intcon(1))); Node* bool_instof = _gvn.transform(new BoolNode(cmp_instof, BoolTest::ne)); - Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); + Node* instof_false = generate_guard(bool_instof, nullptr, PROB_MIN); - return instof_false; // even if it is NULL + return instof_false; // even if it is null } //-------------inline_fma----------------------------------- bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) { - Node *a = NULL; - Node *b = NULL; - Node *c = NULL; - Node* result = NULL; + Node *a = nullptr; + Node *b = nullptr; + Node *c = nullptr; + Node* result = nullptr; switch (id) { case vmIntrinsics::_fmaD: assert(callee()->signature()->size() == 6, "fma has 3 parameters of size 2 each."); @@ -7610,7 +7610,7 @@ bool LibraryCallKit::inline_fma(vmIntrinsics::ID id) { bool LibraryCallKit::inline_character_compare(vmIntrinsics::ID id) { // argument(0) is receiver Node* codePoint = argument(1); - Node* n = NULL; + Node* n = nullptr; switch (id) { case vmIntrinsics::_isDigit : @@ -7643,7 +7643,7 @@ bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) { ciMethodData *md = callee()->method_data(); - if ( md != NULL && md->is_mature() && md->invocation_count() > 0 ) { + if ( md != nullptr && md->is_mature() && md->invocation_count() > 0 ) { ciCallProfile cp = caller()->call_profile_at_bci(bci()); if ( ((double)cp.count()) / ((double)md->invocation_count()) < 0.8 ) { @@ -7669,9 +7669,9 @@ bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) { } */ - Node *a = NULL; - Node *b = NULL; - Node *n = NULL; + Node *a = nullptr; + Node *b = nullptr; + Node *n = nullptr; switch (id) { case vmIntrinsics::_maxF: case vmIntrinsics::_minF: @@ -7720,17 +7720,17 @@ bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) { bool LibraryCallKit::inline_profileBoolean() { Node* counts = argument(1); - const TypeAryPtr* ary = NULL; - ciArray* aobj = NULL; + const TypeAryPtr* ary = nullptr; + ciArray* aobj = nullptr; if (counts->is_Con() - && (ary = counts->bottom_type()->isa_aryptr()) != NULL - && (aobj = ary->const_oop()->as_array()) != NULL + && (ary = counts->bottom_type()->isa_aryptr()) != nullptr + && (aobj = ary->const_oop()->as_array()) != nullptr && (aobj->length() == 2)) { // Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively. jint false_cnt = aobj->element_value(0).as_int(); jint true_cnt = aobj->element_value(1).as_int(); - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("observe source='profileBoolean' false='%d' true='%d'", false_cnt, true_cnt); } @@ -7811,7 +7811,7 @@ bool LibraryCallKit::inline_getObjectSize() { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + int layout_is_con = (layout_val == nullptr); if (layout_is_con) { // Layout helper is constant, can figure out things at compile time. @@ -7859,8 +7859,8 @@ bool LibraryCallKit::inline_getObjectSize() { PhiNode* result_val = new PhiNode(result_reg, TypeLong::LONG); record_for_igvn(result_reg); - Node* array_ctl = generate_array_guard(klass_node, NULL); - if (array_ctl != NULL) { + Node* array_ctl = generate_array_guard(klass_node, nullptr); + if (array_ctl != nullptr) { // Array case: size is round(header + element_size*arraylength). // Since arraylength is different for every array instance, we have to // compute the whole thing at runtime. diff --git a/src/hotspot/share/opto/library_call.hpp b/src/hotspot/share/opto/library_call.hpp index 97feb431dbf..dc7884cf139 100644 --- a/src/hotspot/share/opto/library_call.hpp +++ b/src/hotspot/share/opto/library_call.hpp @@ -75,7 +75,7 @@ class LibraryCallKit : public GraphKit { LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic) : GraphKit(jvms), _intrinsic(intrinsic), - _result(NULL) + _result(nullptr) { // Check if this is a root compile. In that case we don't have a caller. if (!jvms->has_method()) { @@ -85,7 +85,7 @@ class LibraryCallKit : public GraphKit { // and save the stack pointer value so it can used by uncommon_trap. // We find the argument count by looking at the declared signature. bool ignored_will_link; - ciSignature* declared_signature = NULL; + ciSignature* declared_signature = nullptr; ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci())); _reexecute_sp = sp() + nargs; // "push" arguments back on stack @@ -105,7 +105,7 @@ class LibraryCallKit : public GraphKit { void push_result() { // Push the result onto the stack. - if (!stopped() && result() != NULL) { + if (!stopped() && result() != nullptr) { BasicType bt = result()->bottom_type()->basic_type(); push_node(bt, result()); } @@ -116,7 +116,7 @@ class LibraryCallKit : public GraphKit { fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid)); } - void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; } + void set_result(Node* n) { assert(_result == nullptr, "only set once"); _result = n; } void set_result(RegionNode* region, PhiNode* value); Node* result() { return _result; } @@ -128,7 +128,7 @@ class LibraryCallKit : public GraphKit { Node* generate_fair_guard(Node* test, RegionNode* region); Node* generate_negative_guard(Node* index, RegionNode* region, // resulting CastII of index: - Node* *pos_index = NULL); + Node* *pos_index = nullptr); Node* generate_limit_guard(Node* offset, Node* subseq_length, Node* array_length, RegionNode* region); @@ -184,8 +184,8 @@ class LibraryCallKit : public GraphKit { CallJavaNode* generate_method_call_virtual(vmIntrinsics::ID method_id) { return generate_method_call(method_id, true, false); } - Node* load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators = IN_HEAP, bool is_static = false, ciInstanceKlass* fromKls = NULL); - Node* field_address_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, bool is_exact = true, bool is_static = false, ciInstanceKlass* fromKls = NULL); + Node* load_field_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, DecoratorSet decorators = IN_HEAP, bool is_static = false, ciInstanceKlass* fromKls = nullptr); + Node* field_address_from_object(Node* fromObj, const char* fieldName, const char* fieldTypeString, bool is_exact = true, bool is_static = false, ciInstanceKlass* fromKls = nullptr); Node* make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2, StrIntrinsicNode::ArgEnc ae); bool inline_string_compareTo(StrIntrinsicNode::ArgEnc ae); diff --git a/src/hotspot/share/opto/live.cpp b/src/hotspot/share/opto/live.cpp index 7101994b4e3..ea1722702ce 100644 --- a/src/hotspot/share/opto/live.cpp +++ b/src/hotspot/share/opto/live.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ void PhaseLive::compute(uint maxlrg) { _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks()); memset(_deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks()); - _free_IndexSet = NULL; + _free_IndexSet = nullptr; // Blocks having done pass-1 VectorSet first_pass; @@ -176,7 +176,7 @@ void PhaseLive::compute(uint maxlrg) { } } IndexSet *free = _free_IndexSet; - while (free != NULL) { + while (free != nullptr) { IndexSet *temp = free; free = free->next(); temp->clear(); @@ -223,7 +223,7 @@ void PhaseLive::freeset(Block *p) { } f->set_next(_free_IndexSet); _free_IndexSet = f; // Drop onto free list - _deltas[p->_pre_order-1] = NULL; + _deltas[p->_pre_order-1] = nullptr; } // Add a live-out value to a given blocks live-out set. If it is new, then diff --git a/src/hotspot/share/opto/live.hpp b/src/hotspot/share/opto/live.hpp index 8512a1f85f4..d5ff1570fd1 100644 --- a/src/hotspot/share/opto/live.hpp +++ b/src/hotspot/share/opto/live.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,7 +78,7 @@ public: // Compute liveness info void compute(uint maxlrg); // Reset arena storage - void reset() { _live = NULL; } + void reset() { _live = nullptr; } // Return the live-out set for this block IndexSet *live( const Block * b ) { return &_live[b->_pre_order-1]; } diff --git a/src/hotspot/share/opto/locknode.cpp b/src/hotspot/share/opto/locknode.cpp index ea605b7a2cd..640109e6c42 100644 --- a/src/hotspot/share/opto/locknode.cpp +++ b/src/hotspot/share/opto/locknode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ OptoReg::Name BoxLockNode::reg(Node* box) { // Is BoxLock node used for one simple lock region (same box and obj)? bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) { - LockNode* lock = NULL; + LockNode* lock = nullptr; bool has_one_lock = false; for (uint i = 0; i < this->outcnt(); i++) { Node* n = this->raw_out(i); @@ -96,19 +96,19 @@ bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node* // Check lock's box since box could be referenced by Lock's debug info. if (alock->box_node() == this) { if (alock->obj_node()->eqv_uncast(obj)) { - if ((unique_lock != NULL) && alock->is_Lock()) { - if (lock == NULL) { + if ((unique_lock != nullptr) && alock->is_Lock()) { + if (lock == nullptr) { lock = alock->as_Lock(); has_one_lock = true; } else if (lock != alock->as_Lock()) { has_one_lock = false; - if (bad_lock != NULL) { + if (bad_lock != nullptr) { *bad_lock = alock; } } } } else { - if (bad_lock != NULL) { + if (bad_lock != nullptr) { *bad_lock = alock; } return false; // Different objects @@ -132,7 +132,7 @@ bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node* // unlocks are reference only this one object. } #endif - if (unique_lock != NULL && has_one_lock) { + if (unique_lock != nullptr && has_one_lock) { *unique_lock = lock; } return true; diff --git a/src/hotspot/share/opto/locknode.hpp b/src/hotspot/share/opto/locknode.hpp index 1013111be53..4a74e50425f 100644 --- a/src/hotspot/share/opto/locknode.hpp +++ b/src/hotspot/share/opto/locknode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,8 +80,8 @@ public: FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) { init_req(0,ctrl); init_class_id(Class_FastLock); - _rtm_counters = NULL; - _stack_rtm_counters = NULL; + _rtm_counters = nullptr; + _stack_rtm_counters = nullptr; } Node* obj_node() const { return in(1); } Node* box_node() const { return in(2); } diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp index ef691c31d95..8511b3da897 100644 --- a/src/hotspot/share/opto/loopPredicate.cpp +++ b/src/hotspot/share/opto/loopPredicate.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred, } set_loop(n, loop); // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { + if (_idom != nullptr) { set_idom(n, pred, dom_depth(pred)); } } @@ -132,7 +132,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* register_control(rgn, loop, uncommon_proj); _igvn.replace_input_of(call, 0, rgn); // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { + if (_idom != nullptr) { set_idom(call, rgn, dom_depth(rgn)); } // Move nodes pinned on the projection or whose control is set to @@ -146,13 +146,13 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* } Node* entry = iff->in(0); - if (new_entry != NULL) { + if (new_entry != nullptr) { // Cloning the predicate to new location. entry = new_entry; } // Create new_iff IdealLoopTree* lp = get_loop(entry); - IfNode* new_iff = NULL; + IfNode* new_iff = nullptr; if (opcode == Op_If) { new_iff = new IfNode(entry, iff->in(1), iff->_prob, iff->_fcnt); } else { @@ -180,7 +180,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* _igvn.add_input_to(rgn, if_uct); // When called from beautify_loops() idom is not constructed yet. - if (_idom != NULL) { + if (_idom != nullptr) { Node* ridom = idom(rgn); Node* nrdom = dom_lca_internal(ridom, new_iff); set_idom(rgn, nrdom, dom_depth(rgn)); @@ -216,10 +216,10 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* } assert(!has_phi || rgn->req() > 3, "no phis when region is created"); - if (new_entry == NULL) { + if (new_entry == nullptr) { // Attach if_cont to iff _igvn.replace_input_of(iff, 0, if_cont); - if (_idom != NULL) { + if (_idom != nullptr) { set_idom(iff, if_cont, dom_depth(iff)); } } @@ -263,7 +263,7 @@ Node* PhaseIdealLoop::clone_nodes_with_same_ctrl(Node* node, ProjNode* old_ctrl, Dict old_new_mapping = clone_nodes(nodes_with_same_ctrl); // Cloned but not rewired, yet rewire_cloned_nodes_to_ctrl(old_ctrl, new_ctrl, nodes_with_same_ctrl, old_new_mapping); Node* clone_phi_input = static_cast(old_new_mapping[node]); - assert(clone_phi_input != NULL && clone_phi_input->_idx >= last_idx, "must exist and be a proper clone"); + assert(clone_phi_input != nullptr && clone_phi_input->_idx >= last_idx, "must exist and be a proper clone"); return clone_phi_input; } @@ -304,7 +304,7 @@ void PhaseIdealLoop::rewire_inputs_of_clones_to_clones(Node* new_ctrl, Node* clo if (!in->is_Phi()) { assert(!in->is_CFG(), "must be data node"); Node* in_clone = static_cast(old_new_mapping[in]); - if (in_clone != NULL) { + if (in_clone != nullptr) { _igvn.replace_input_of(clone, i, in_clone); set_ctrl(clone, new_ctrl); } @@ -390,7 +390,7 @@ void PhaseIdealLoop::get_skeleton_predicates(Node* predicate, Unique_Node_List& assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); assert(iff->in(1)->in(1)->Opcode() == Op_Opaque1, "unexpected predicate shape"); predicate = iff->in(0); - while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) { + while (predicate != nullptr && predicate->is_Proj() && predicate->in(0)->is_If()) { iff = predicate->in(0)->as_If(); uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con); if (uncommon_proj->unique_ctrl_out() != rgn) { @@ -415,8 +415,8 @@ void PhaseIdealLoop::get_skeleton_predicates(Node* predicate, Unique_Node_List& ProjNode* PhaseIdealLoop::clone_skeleton_predicate_for_unswitched_loops(Node* iff, ProjNode* predicate, Deoptimization::DeoptReason reason, ProjNode* output_proj) { - Node* bol = clone_skeleton_predicate_bool(iff, NULL, NULL, output_proj); - ProjNode* proj = create_new_if_for_predicate(output_proj, NULL, reason, iff->Opcode(), + Node* bol = clone_skeleton_predicate_bool(iff, nullptr, nullptr, output_proj); + ProjNode* proj = create_new_if_for_predicate(output_proj, nullptr, reason, iff->Opcode(), false, predicate->is_IfTrue()); _igvn.replace_input_of(proj->in(0), 1, bol); _igvn.replace_input_of(output_proj->in(0), 0, proj); @@ -432,23 +432,23 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); // Search original predicates - ProjNode* limit_check_proj = NULL; + ProjNode* limit_check_proj = nullptr; limit_check_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (limit_check_proj != NULL) { + if (limit_check_proj != nullptr) { entry = skip_loop_predicates(entry); } - ProjNode* profile_predicate_proj = NULL; - ProjNode* predicate_proj = NULL; + ProjNode* profile_predicate_proj = nullptr; + ProjNode* predicate_proj = nullptr; if (UseProfiledLoopPredicate) { profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (profile_predicate_proj != NULL) { + if (profile_predicate_proj != nullptr) { entry = skip_loop_predicates(entry); } } if (UseLoopPredicate) { predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); } - if (predicate_proj != NULL) { // right pattern that can be used by loop predication + if (predicate_proj != nullptr) { // right pattern that can be used by loop predication // clone predicate iffast_pred = clone_predicate_to_unswitched_loop(predicate_proj, iffast_pred, Deoptimization::Reason_predicate,false); ifslow_pred = clone_predicate_to_unswitched_loop(predicate_proj, ifslow_pred, Deoptimization::Reason_predicate,true); @@ -457,7 +457,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No check_created_predicate_for_unswitching(iffast_pred); check_created_predicate_for_unswitching(ifslow_pred); } - if (profile_predicate_proj != NULL) { // right pattern that can be used by loop predication + if (profile_predicate_proj != nullptr) { // right pattern that can be used by loop predication // clone predicate iffast_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, iffast_pred,Deoptimization::Reason_profile_predicate, false); ifslow_pred = clone_predicate_to_unswitched_loop(profile_predicate_proj, ifslow_pred,Deoptimization::Reason_profile_predicate, true); @@ -466,7 +466,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No check_created_predicate_for_unswitching(iffast_pred); check_created_predicate_for_unswitching(ifslow_pred); } - if (limit_check_proj != NULL && clone_limit_check) { + if (limit_check_proj != nullptr && clone_limit_check) { // Clone loop limit check last to insert it before loop. // Don't clone a limit check which was already finalized // for this counted loop (only one limit check is needed). @@ -480,7 +480,7 @@ void PhaseIdealLoop::clone_predicates_to_unswitched_loop(IdealLoopTree* loop, No #ifndef PRODUCT void PhaseIdealLoop::check_created_predicate_for_unswitching(const Node* new_entry) { - assert(new_entry != NULL, "IfTrue or IfFalse after clone predicate"); + assert(new_entry != nullptr, "IfTrue or IfFalse after clone predicate"); if (TraceLoopPredicate) { tty->print("Loop Predicate cloned: "); debug_only(new_entry->in(0)->dump();); @@ -497,7 +497,7 @@ Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) { Node* rgn = uncommon_proj->unique_ctrl_out(); assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct"); entry = entry->in(0)->in(0); - while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { + while (entry != nullptr && entry->is_Proj() && entry->in(0)->is_If()) { uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con); if (uncommon_proj->unique_ctrl_out() != rgn) break; @@ -531,12 +531,12 @@ ProjNode* PhaseIdealLoop::next_predicate(ProjNode* predicate) { //--------------------------find_predicate_insertion_point------------------- // Find a good location to insert a predicate ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) { - if (start_c == NULL || !start_c->is_Proj()) - return NULL; + if (start_c == nullptr || !start_c->is_Proj()) + return nullptr; if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) { return start_c->as_Proj(); } - return NULL; + return nullptr; } //--------------------------Predicates::Predicates-------------------------- @@ -564,24 +564,24 @@ PhaseIdealLoop::Predicates::Predicates(Node* entry) { //--------------------------find_predicate------------------------------------ // Find a predicate Node* PhaseIdealLoop::find_predicate(Node* entry) { - Node* predicate = NULL; + Node* predicate = nullptr; predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication return entry; } if (UseLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication return entry; } } if (UseProfiledLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication return entry; } } - return NULL; + return nullptr; } //------------------------------Invariance----------------------------------- @@ -594,7 +594,7 @@ class Invariance : public StackObj { Node_List _old_new; // map of old to new (clone) IdealLoopTree* _lpt; PhaseIdealLoop* _phase; - Node* _data_dependency_on; // The projection into the loop on which data nodes are dependent or NULL otherwise + Node* _data_dependency_on; // The projection into the loop on which data nodes are dependent or null otherwise // Helper function to set up the invariance for invariance computation // If n is a known invariant, set up directly. Otherwise, look up the @@ -606,7 +606,7 @@ class Invariance : public StackObj { Node *n_ctrl = _phase->ctrl_or_self(n); Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG if (_phase->is_dominator(n_ctrl, u_ctrl)) { - _stack.push(n, n->in(0) == NULL ? 1 : 0); + _stack.push(n, n->in(0) == nullptr ? 1 : 0); } } } @@ -625,7 +625,7 @@ class Invariance : public StackObj { bool all_inputs_invariant = true; for (uint i = 0; i < n->req(); i++) { Node* in = n->in(i); - if (in == NULL) continue; + if (in == nullptr) continue; assert(_visited.test(in->_idx), "must have visited input"); if (!_invariant.test(in->_idx)) { // bad guy all_inputs_invariant = false; @@ -637,14 +637,14 @@ class Invariance : public StackObj { // loop, it was marked invariant but n is only invariant if // it depends only on that test. Otherwise, unless that test // is out of the loop, it's not invariant. - if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == NULL || !_phase->is_member(_lpt, n->in(0))) { + if (n->is_CFG() || n->depends_only_on_test() || n->in(0) == nullptr || !_phase->is_member(_lpt, n->in(0))) { _invariant.set(n->_idx); // I am a invariant too } } } else { // process next input _stack.set_index(idx + 1); Node* m = n->in(idx); - if (m != NULL && !_visited.test_set(m->_idx)) { + if (m != nullptr && !_visited.test_set(m->_idx)) { visit(n, m); } } @@ -660,7 +660,7 @@ class Invariance : public StackObj { _old_new.map(n->_idx, n); } else { // to be cloned assert(!n->is_CFG(), "should not see CFG here"); - _stack.push(n, n->in(0) == NULL ? 1 : 0); + _stack.push(n, n->in(0) == nullptr ? 1 : 0); } } @@ -678,13 +678,13 @@ class Invariance : public StackObj { _phase->register_new_node(n_cl, ctrl); for (uint i = 0; i < n->req(); i++) { Node* in = n_cl->in(i); - if (in == NULL) continue; + if (in == nullptr) continue; n_cl->set_req(i, _old_new[in->_idx]); } } else { // process next input _stack.set_index(idx + 1); Node* m = n->in(idx); - if (m != NULL && !_clone_visited.test_set(m->_idx)) { + if (m != nullptr && !_clone_visited.test_set(m->_idx)) { clone_visit(m); // visit the input } } @@ -697,7 +697,7 @@ class Invariance : public StackObj { _stack(area, 10 /* guess */), _clone_visited(area), _old_new(area), _lpt(lpt), _phase(lpt->_phase), - _data_dependency_on(NULL) + _data_dependency_on(nullptr) { LoopNode* head = _lpt->_head->as_Loop(); Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); @@ -730,7 +730,7 @@ class Invariance : public StackObj { } // Did we explicitly mark some nodes non-loop-invariant? If so, return the entry node on which some data nodes - // are dependent that prevent loop predication. Otherwise, return NULL. + // are dependent that prevent loop predication. Otherwise, return null. Node* data_dependency_on() { return _data_dependency_on; } @@ -786,7 +786,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT range = cmp->in(2); if (range->Opcode() != Op_LoadRange) { const TypeInteger* tinteger = phase->_igvn.type(range)->isa_integer(bt); - if (tinteger == NULL || tinteger->empty() || tinteger->lo_as_long() < 0) { + if (tinteger == nullptr || tinteger->empty() || tinteger->lo_as_long() < 0) { // Allow predication on positive values that aren't LoadRanges. // This allows optimization of loops where the length of the // array is a known value and doesn't need to be loaded back @@ -797,7 +797,7 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT assert(bt == T_INT, "no LoadRange for longs"); } scale = 0; - offset = NULL; + offset = nullptr; if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, bt, &scale, &offset)) { return false; } @@ -805,8 +805,8 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, BasicT } bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar DEBUG_ONLY(COMMA ProjNode *predicate_proj)) const { - Node* range = NULL; - Node* offset = NULL; + Node* range = nullptr; + Node* offset = nullptr; jlong scale = 0; Node* iv = _head->as_BaseCountedLoop()->phi(); Compile* C = Compile::current(); @@ -817,12 +817,12 @@ bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invari if (!invar.is_invariant(range)) { return false; } - if (offset != NULL) { + if (offset != nullptr) { if (!invar.is_invariant(offset)) { // offset must be invariant return false; } Node* data_dependency_on = invar.data_dependency_on(); - if (data_dependency_on != NULL && old_unique_idx < C->unique()) { + if (data_dependency_on != nullptr && old_unique_idx < C->unique()) { // 'offset' node was newly created in is_range_check_if(). Check that it does not depend on the entry projection // into the loop. If it does, we cannot perform loop predication (see Invariant::Invariant()). assert(!offset->is_CFG(), "offset must be a data node"); @@ -868,21 +868,21 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl, int scale, Node* offset, Node* init, Node* limit, jint stride, Node* range, bool upper, bool &overflow, bool negate) { - jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0; + jint con_limit = (limit != nullptr && limit->is_Con()) ? limit->get_int() : 0; jint con_init = init->is_Con() ? init->get_int() : 0; jint con_offset = offset->is_Con() ? offset->get_int() : 0; - stringStream* predString = NULL; + stringStream* predString = nullptr; if (TraceLoopPredicate) { predString = new (mtCompiler) stringStream(); predString->print("rc_predicate "); } overflow = false; - Node* max_idx_expr = NULL; + Node* max_idx_expr = nullptr; const TypeInt* idx_type = TypeInt::INT; if ((stride > 0) == (scale > 0) == upper) { - guarantee(limit != NULL, "sanity"); + guarantee(limit != nullptr, "sanity"); if (TraceLoopPredicate) { if (limit->is_Con()) { predString->print("(%d ", con_limit); @@ -983,7 +983,7 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree *loop, Node* ctrl, register_new_node(max_idx_expr, ctrl); } - CmpNode* cmp = NULL; + CmpNode* cmp = nullptr; if (overflow) { // Integer expressions may overflow, do long comparison range = new ConvI2LNode(range); @@ -1011,7 +1011,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop return false; } - if (predicate_proj == NULL) { + if (predicate_proj == nullptr) { return false; } @@ -1019,15 +1019,15 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop bool follow_branches = true; IdealLoopTree* l = loop->_child; // For leaf loops and loops with a single inner loop - while (l != NULL && follow_branches) { + while (l != nullptr && follow_branches) { IdealLoopTree* child = l; - if (child->_child != NULL && + if (child->_child != nullptr && child->_head->is_OuterStripMinedLoop()) { - assert(child->_child->_next == NULL, "only one inner loop for strip mined loop"); + assert(child->_child->_next == nullptr, "only one inner loop for strip mined loop"); assert(child->_child->_head->is_CountedLoop() && child->_child->_head->as_CountedLoop()->is_strip_mined(), "inner loop should be strip mined"); child = child->_child; } - if (child->_child != NULL || child->_irreducible) { + if (child->_child != nullptr || child->_irreducible) { follow_branches = false; } l = l->_next; @@ -1040,7 +1040,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree *loop loop_trip_cnt = head->profile_trip_cnt(); if (head->is_CountedLoop()) { CountedLoopNode* cl = head->as_CountedLoop(); - if (cl->phi() != NULL) { + if (cl->phi() != nullptr) { const TypeInt* t = _igvn.type(cl->phi())->is_int(); float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con()); if (worst_case_trip_cnt < loop_trip_cnt) { @@ -1202,7 +1202,7 @@ float PathFrequency::to(Node* n) { assert(con >= CatchProjNode::catch_all_index, "what else?"); _freqs.at_put_grow(c->_idx, 0, -1); } - } else if (c->unique_ctrl_out_or_null() == NULL && !c->is_If() && !c->is_Jump()) { + } else if (c->unique_ctrl_out_or_null() == nullptr && !c->is_If() && !c->is_Jump()) { ShouldNotReachHere(); } else { c = c->in(0); @@ -1261,7 +1261,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* CountedLoopNode *cl, ConNode* zero, Invariance& invar, Deoptimization::DeoptReason reason) { // Following are changed to nonnull when a predicate can be hoisted - ProjNode* new_predicate_proj = NULL; + ProjNode* new_predicate_proj = nullptr; IfNode* iff = proj->in(0)->as_If(); Node* test = iff->in(1); if (!test->is_Bool()){ //Conv2B, ... @@ -1270,7 +1270,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* BoolNode* bol = test->as_Bool(); if (invar.is_invariant(bol)) { // Invariant test - new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL, + new_predicate_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, iff->Opcode()); Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0); @@ -1295,7 +1295,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* loop->dump_head(); } #endif - } else if (cl != NULL && loop->is_range_check_if(iff, this, invar DEBUG_ONLY(COMMA predicate_proj))) { + } else if (cl != nullptr && loop->is_range_check_if(iff, this, invar DEBUG_ONLY(COMMA predicate_proj))) { // Range check for counted loops const Node* cmp = bol->in(1)->as_Cmp(); Node* idx = cmp->in(1); @@ -1336,7 +1336,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* // Test the lower bound BoolNode* lower_bound_bol = rc_predicate(loop, ctrl, scale, offset, init, limit, stride, rng, false, overflow, negate); - ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); + ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode()); IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If(); _igvn.hash_delete(lower_bound_iff); lower_bound_iff->set_req(1, lower_bound_bol); @@ -1345,7 +1345,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* // Test the upper bound BoolNode* upper_bound_bol = rc_predicate(loop, lower_bound_proj, scale, offset, init, limit, stride, rng, true, overflow, negate); - ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); + ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode()); assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate"); IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If(); _igvn.hash_delete(upper_bound_iff); @@ -1369,7 +1369,7 @@ bool PhaseIdealLoop::loop_predication_impl_helper(IdealLoopTree *loop, ProjNode* // with uncommon trap. return false; } - assert(new_predicate_proj != NULL, "sanity"); + assert(new_predicate_proj != nullptr, "sanity"); // Success - attach condition (new_predicate_bol) to predicate if invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate @@ -1401,7 +1401,7 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over C->add_skeleton_predicate_opaq(opaque_bol); register_new_node(opaque_bol, upper_bound_proj); - ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); + ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode()); _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol); assert(opaque_init->outcnt() > 0, "should be used"); @@ -1423,7 +1423,7 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); C->add_skeleton_predicate_opaq(opaque_bol); register_new_node(opaque_bol, new_proj); - new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode()); + new_proj = create_new_if_for_predicate(predicate_proj, nullptr, reason, overflow ? Op_If : iff->Opcode()); _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol); assert(max_value->outcnt() > 0, "should be used"); assert(skeleton_predicate_has_opaque(new_proj->in(0)->as_If()), "unexpected"); @@ -1451,7 +1451,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { return false; } - CountedLoopNode *cl = NULL; + CountedLoopNode *cl = nullptr; if (head->is_valid_counted_loop(T_INT)) { cl = head->as_CountedLoop(); // do nothing for iteration-splitted loops @@ -1459,21 +1459,21 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { // Avoid RCE if Counted loop's test is '!='. BoolTest::mask bt = cl->loopexit()->test_trip(); if (bt != BoolTest::lt && bt != BoolTest::gt) - cl = NULL; + cl = nullptr; } Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); - ProjNode *loop_limit_proj = NULL; - ProjNode *predicate_proj = NULL; - ProjNode *profile_predicate_proj = NULL; + ProjNode *loop_limit_proj = nullptr; + ProjNode *predicate_proj = nullptr; + ProjNode *profile_predicate_proj = nullptr; // Loop limit check predicate should be near the loop. loop_limit_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (loop_limit_proj != NULL) { + if (loop_limit_proj != nullptr) { entry = skip_loop_predicates(loop_limit_proj); } bool has_profile_predicates = false; profile_predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (profile_predicate_proj != NULL) { + if (profile_predicate_proj != nullptr) { Node* n = skip_loop_predicates(entry); // Check if predicates were already added to the profile predicate // block @@ -1488,7 +1488,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { bool follow_branches = loop_predication_should_follow_branches(loop, profile_predicate_proj, loop_trip_cnt); assert(!follow_branches || loop_trip_cnt >= 0, "negative trip count?"); - if (predicate_proj == NULL && !follow_branches) { + if (predicate_proj == nullptr && !follow_branches) { #ifndef PRODUCT if (TraceLoopPredicate) { tty->print("missing predicate:"); @@ -1537,7 +1537,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { IfNode* iff = proj->in(0)->as_If(); CallStaticJavaNode* call = proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - if (call == NULL) { + if (call == nullptr) { if (loop->is_loop_exit(iff)) { // stop processing the remaining projs in the list because the execution of them // depends on the condition of "iff" (iff->in(1)). @@ -1558,7 +1558,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) { break; } - if (predicate_proj != NULL) { + if (predicate_proj != nullptr) { hoisted = loop_predication_impl_helper(loop, proj, predicate_proj, cl, zero, invar, Deoptimization::Reason_predicate) | hoisted; } } // end while diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp index 9103da6d3fc..cb704b076a4 100644 --- a/src/hotspot/share/opto/loopTransform.cpp +++ b/src/hotspot/share/opto/loopTransform.cpp @@ -44,10 +44,10 @@ #include "runtime/stubRoutines.hpp" //------------------------------is_loop_exit----------------------------------- -// Given an IfNode, return the loop-exiting projection or NULL if both +// Given an IfNode, return the loop-exiting projection or null if both // arms remain in the loop. Node *IdealLoopTree::is_loop_exit(Node *iff) const { - if (iff->outcnt() != 2) return NULL; // Ignore partially dead tests + if (iff->outcnt() != 2) return nullptr; // Ignore partially dead tests PhaseIdealLoop *phase = _phase; // Test is an IfNode, has 2 projections. If BOTH are in the loop // we need loop unswitching instead of peeling. @@ -55,7 +55,7 @@ Node *IdealLoopTree::is_loop_exit(Node *iff) const { return iff->raw_out(0); if (!is_member(phase->get_loop(iff->raw_out(1)))) return iff->raw_out(1); - return NULL; + return nullptr; } @@ -73,19 +73,19 @@ void IdealLoopTree::record_for_igvn() { if (_head->is_CountedLoop() && _head->as_Loop()->is_strip_mined()) { CountedLoopNode* l = _head->as_CountedLoop(); Node* outer_loop = l->outer_loop(); - assert(outer_loop != NULL, "missing piece of strip mined loop"); + assert(outer_loop != nullptr, "missing piece of strip mined loop"); _phase->_igvn._worklist.push(outer_loop); Node* outer_loop_tail = l->outer_loop_tail(); - assert(outer_loop_tail != NULL, "missing piece of strip mined loop"); + assert(outer_loop_tail != nullptr, "missing piece of strip mined loop"); _phase->_igvn._worklist.push(outer_loop_tail); Node* outer_loop_end = l->outer_loop_end(); - assert(outer_loop_end != NULL, "missing piece of strip mined loop"); + assert(outer_loop_end != nullptr, "missing piece of strip mined loop"); _phase->_igvn._worklist.push(outer_loop_end); Node* outer_safepoint = l->outer_safepoint(); - assert(outer_safepoint != NULL, "missing piece of strip mined loop"); + assert(outer_safepoint != nullptr, "missing piece of strip mined loop"); _phase->_igvn._worklist.push(outer_safepoint); Node* cle_out = _head->as_CountedLoop()->loopexit()->proj_out(false); - assert(cle_out != NULL, "missing piece of strip mined loop"); + assert(cle_out != nullptr, "missing piece of strip mined loop"); _phase->_igvn._worklist.push(cle_out); } } @@ -115,7 +115,7 @@ void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase) { Node* init_n = cl->init_trip(); Node* limit_n = cl->limit(); - if (init_n != NULL && limit_n != NULL) { + if (init_n != nullptr && limit_n != nullptr) { // Use longs to avoid integer overflow. int stride_con = cl->stride_con(); const TypeInt* init_type = phase->_igvn.type(init_n)->is_int(); @@ -208,7 +208,7 @@ void IdealLoopTree::compute_profile_trip_cnt(PhaseIdealLoop *phase) { // Now compute a loop exit count float loop_exit_cnt = 0.0f; - if (_child == NULL) { + if (_child == nullptr) { for (uint i = 0; i < _body.size(); i++) { Node *n = _body[i]; loop_exit_cnt += compute_profile_trip_cnt_helper(n); @@ -264,10 +264,10 @@ int IdealLoopTree::find_invariant(Node* n, PhaseIdealLoop *phase) { //---------------------is_associative----------------------------- // Return TRUE if "n" is an associative binary node. If "base" is -// not NULL, "n" must be re-associative with it. +// not null, "n" must be re-associative with it. bool IdealLoopTree::is_associative(Node* n, Node* base) { int op = n->Opcode(); - if (base != NULL) { + if (base != nullptr) { assert(is_associative(base), "Base node should be associative"); int base_op = base->Opcode(); if (base_op == Op_AddI || base_op == Op_SubI) { @@ -319,7 +319,7 @@ Node* IdealLoopTree::reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, P neg_inv2 = !neg_inv2; } - bool is_int = n1->bottom_type()->isa_int() != NULL; + bool is_int = n1->bottom_type()->isa_int() != nullptr; Node* inv1_c = phase->get_ctrl(inv1); Node* n_inv1; if (neg_inv1) { @@ -375,21 +375,21 @@ Node* IdealLoopTree::reassociate_add_sub(Node* n1, int inv1_idx, int inv2_idx, P // inv1 op (x op inv2) => (inv1 op inv2) op x // Node* IdealLoopTree::reassociate(Node* n1, PhaseIdealLoop *phase) { - if (!is_associative(n1) || n1->outcnt() == 0) return NULL; - if (is_invariant(n1)) return NULL; + if (!is_associative(n1) || n1->outcnt() == 0) return nullptr; + if (is_invariant(n1)) return nullptr; // Don't mess with add of constant (igvn moves them to expression tree root.) - if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; + if (n1->is_Add() && n1->in(2)->is_Con()) return nullptr; int inv1_idx = find_invariant(n1, phase); - if (!inv1_idx) return NULL; + if (!inv1_idx) return nullptr; Node* n2 = n1->in(3 - inv1_idx); - if (!is_associative(n2, n1)) return NULL; + if (!is_associative(n2, n1)) return nullptr; int inv2_idx = find_invariant(n2, phase); - if (!inv2_idx) return NULL; + if (!inv2_idx) return nullptr; - if (!phase->may_require_nodes(10, 10)) return NULL; + if (!phase->may_require_nodes(10, 10)) return nullptr; - Node* result = NULL; + Node* result = nullptr; switch (n1->Opcode()) { case Op_AddI: case Op_AddL: @@ -417,7 +417,7 @@ Node* IdealLoopTree::reassociate(Node* n1, PhaseIdealLoop *phase) { ShouldNotReachHere(); } - assert(result != NULL, ""); + assert(result != nullptr, ""); phase->register_new_node(result, phase->get_ctrl(n1)); phase->_igvn.replace_node(n1, result); assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); @@ -432,7 +432,7 @@ void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { Node *n = _body.at(i); for (int j = 0; j < 5; j++) { Node* nn = reassociate(n, phase); - if (nn == NULL) break; + if (nn == nullptr) break; n = nn; // again } } @@ -515,12 +515,12 @@ void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree* loop, Node_List& old_ne Node* test = prev->in(0); while (test != loop->_head) { // Scan till run off top of loop int p_op = prev->Opcode(); - assert(test != NULL, "test cannot be NULL"); - Node* test_cond = NULL; + assert(test != nullptr, "test cannot be null"); + Node* test_cond = nullptr; if ((p_op == Op_IfFalse || p_op == Op_IfTrue) && test->is_If()) { test_cond = test->in(1); } - if (test_cond != NULL && // Test? + if (test_cond != nullptr && // Test? !test_cond->is_Con() && // And not already obvious? // And condition is not a member of this loop? !loop->is_member(get_loop(get_ctrl(test_cond)))) { @@ -950,14 +950,14 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) { Node *init_n = cl->init_trip(); Node *limit_n = cl->limit(); - if (limit_n == NULL) return false; // We will dereference it below. + if (limit_n == nullptr) return false; // We will dereference it below. // Non-constant bounds. // Protect against over-unrolling when init or/and limit are not constant // (so that trip_count's init value is maxint) but iv range is known. - if (init_n == NULL || !init_n->is_Con() || !limit_n->is_Con()) { + if (init_n == nullptr || !init_n->is_Con() || !limit_n->is_Con()) { Node* phi = cl->phi(); - if (phi != NULL) { + if (phi != nullptr) { assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); int next_stride = stride_con * 2; // stride after this unroll @@ -1172,8 +1172,8 @@ bool IdealLoopTree::policy_range_check(PhaseIdealLoop* phase, bool provisional, // Try to pattern match with either cmp inputs, do not check // whether one of the inputs is loop independent as it may not // have had a chance to be hoisted yet. - if (!phase->is_scaled_iv_plus_offset(cmp->in(1), trip_counter, bt, NULL, NULL) && - !phase->is_scaled_iv_plus_offset(cmp->in(2), trip_counter, bt, NULL, NULL)) { + if (!phase->is_scaled_iv_plus_offset(cmp->in(1), trip_counter, bt, nullptr, nullptr) && + !phase->is_scaled_iv_plus_offset(cmp->in(2), trip_counter, bt, nullptr, nullptr)) { continue; } } else { @@ -1193,7 +1193,7 @@ bool IdealLoopTree::policy_range_check(PhaseIdealLoop* phase, bool provisional, } } - if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, bt, NULL, NULL)) { + if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, bt, nullptr, nullptr)) { continue; } } @@ -1244,13 +1244,13 @@ Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctr // Only visit once if (visited.test_set(n->_idx)) { Node *x = clones.find(n->_idx); - return (x != NULL) ? x : n; + return (x != nullptr) ? x : n; } - Node *x = NULL; // If required, a clone of 'n' + Node *x = nullptr; // If required, a clone of 'n' // Check for 'n' being pinned in the backedge. if (n->in(0) && n->in(0) == back_ctrl) { - assert(clones.find(n->_idx) == NULL, "dead loop"); + assert(clones.find(n->_idx) == nullptr, "dead loop"); x = n->clone(); // Clone a copy of 'n' to preheader clones.push(x, n->_idx); x->set_req(0, preheader_ctrl); // Fix x's control input to preheader @@ -1263,7 +1263,7 @@ Node *PhaseIdealLoop::clone_up_backedge_goo(Node *back_ctrl, Node *preheader_ctr Node *g = clone_up_backedge_goo(back_ctrl, preheader_ctrl, n->in(i), visited, clones); if (g != n->in(i)) { if (!x) { - assert(clones.find(n->_idx) == NULL, "dead loop"); + assert(clones.find(n->_idx) == nullptr, "dead loop"); x = n->clone(); clones.push(x, n->_idx); } @@ -1290,19 +1290,19 @@ Node* PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) return castii; } } - return NULL; + return nullptr; } #ifdef ASSERT void PhaseIdealLoop::ensure_zero_trip_guard_proj(Node* node, bool is_main_loop) { assert(node->is_IfProj(), "must be the zero trip guard If node"); Node* zer_bol = node->in(0)->in(1); - assert(zer_bol != NULL && zer_bol->is_Bool(), "must be Bool"); + assert(zer_bol != nullptr && zer_bol->is_Bool(), "must be Bool"); Node* zer_cmp = zer_bol->in(1); - assert(zer_cmp != NULL && zer_cmp->Opcode() == Op_CmpI, "must be CmpI"); + assert(zer_cmp != nullptr && zer_cmp->Opcode() == Op_CmpI, "must be CmpI"); // For the main loop, the opaque node is the second input to zer_cmp, for the post loop it's the first input node Node* zer_opaq = zer_cmp->in(is_main_loop ? 2 : 1); - assert(zer_opaq != NULL && zer_opaq->Opcode() == Op_OpaqueZeroTripGuard, "must be OpaqueZeroTripGuard"); + assert(zer_opaq != nullptr && zer_opaq->Opcode() == Op_OpaqueZeroTripGuard, "must be OpaqueZeroTripGuard"); } #endif @@ -1319,7 +1319,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new) { - if (predicate != NULL) { + if (predicate != nullptr) { #ifdef ASSERT ensure_zero_trip_guard_proj(zero_trip_guard_proj_main, true); ensure_zero_trip_guard_proj(zero_trip_guard_proj_post, false); @@ -1337,7 +1337,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat Node* opaque_stride = new OpaqueLoopStrideNode(C, stride); register_new_node(opaque_stride, outer_main_head->in(LoopNode::EntryControl)); - while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) { + while (predicate != nullptr && predicate->is_Proj() && predicate->in(0)->is_If()) { iff = predicate->in(0)->as_If(); uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con); if (uncommon_proj->unique_ctrl_out() != rgn) @@ -1347,7 +1347,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat // Clone the skeleton predicate twice and initialize one with the initial // value of the loop induction variable. Leave the other predicate // to be initialized when increasing the stride during loop unrolling. - prev_proj = clone_skeleton_predicate_and_initialize(iff, opaque_init, NULL, predicate, uncommon_proj, + prev_proj = clone_skeleton_predicate_and_initialize(iff, opaque_init, nullptr, predicate, uncommon_proj, current_proj, outer_loop, prev_proj); assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), ""); @@ -1363,7 +1363,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat // Change the control if 'loop_node' is part of the main loop. If there is an old->new mapping and the index of // 'pre_loop_node' is greater than idx_before_pre_post, then we know that 'loop_node' was cloned and is part of // the main loop (and 'pre_loop_node' is part of the pre loop). - if (!loop_node->is_CFG() && (pre_loop_node != NULL && pre_loop_node->_idx > idx_after_post_before_pre)) { + if (!loop_node->is_CFG() && (pre_loop_node != nullptr && pre_loop_node->_idx > idx_after_post_before_pre)) { // 'loop_node' is a data node and part of the main loop. Rewire the control to the projection of the zero-trip guard if node // of the main loop that is immediately preceding the cloned predicates. _igvn.replace_input_of(loop_node, 0, zero_trip_guard_proj_main); @@ -1371,7 +1371,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat } else if (loop_node->_idx > idx_before_pre_post && loop_node->_idx < idx_after_post_before_pre) { // 'loop_node' is a data node and part of the post loop. Rewire the control to the projection of the zero-trip guard if node // of the post loop that is immediately preceding the post loop header node (there are no cloned predicates for the post loop). - assert(pre_loop_node == NULL, "a node belonging to the post loop should not have an old_new mapping at this stage"); + assert(pre_loop_node == nullptr, "a node belonging to the post loop should not have an old_new mapping at this stage"); _igvn.replace_input_of(loop_node, 0, zero_trip_guard_proj_post); --i; } @@ -1442,7 +1442,7 @@ bool PhaseIdealLoop::skeleton_predicate_has_opaque(IfNode* iff) { } else { for (uint j = 1; j < n->req(); j++) { Node* m = n->in(j); - if (m != NULL) { + if (m != nullptr) { wq.push(m); } } @@ -1466,7 +1466,7 @@ void PhaseIdealLoop::count_opaque_loop_nodes(Node* n, uint& init, uint& stride) if (skeleton_follow_inputs(n)) { for (uint j = 1; j < n->req(); j++) { Node* m = n->in(j); - if (m != NULL) { + if (m != nullptr) { wq.push(m); } } @@ -1482,14 +1482,14 @@ void PhaseIdealLoop::count_opaque_loop_nodes(Node* n, uint& init, uint& stride) // Clone the skeleton predicate bool for a main or unswitched loop: // Main loop: Set new_init and new_stride nodes as new inputs. -// Unswitched loop: new_init and new_stride are both NULL. Clone OpaqueLoopInit and OpaqueLoopStride instead. +// Unswitched loop: new_init and new_stride are both null. Clone OpaqueLoopInit and OpaqueLoopStride instead. Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, Node* new_stride, Node* control) { Node_Stack to_clone(2); to_clone.push(iff->in(1), 1); uint current = C->unique(); - Node* result = NULL; - bool is_unswitched_loop = new_init == NULL && new_stride == NULL; - assert(new_init != NULL || is_unswitched_loop, "new_init must be set when new_stride is non-null"); + Node* result = nullptr; + bool is_unswitched_loop = new_init == nullptr && new_stride == nullptr; + assert(new_init != nullptr || is_unswitched_loop, "new_init must be set when new_stride is non-null"); // Look for the opaque node to replace with the new value // and clone everything in between. We keep the Opaque4 node // so the duplicated predicates are eliminated once loop @@ -1510,18 +1510,18 @@ Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, N } int op = m->Opcode(); if (op == Op_OpaqueLoopInit) { - if (is_unswitched_loop && m->_idx < current && new_init == NULL) { + if (is_unswitched_loop && m->_idx < current && new_init == nullptr) { new_init = m->clone(); register_new_node(new_init, control); } n->set_req(i, new_init); } else { assert(op == Op_OpaqueLoopStride, "unexpected opaque node"); - if (is_unswitched_loop && m->_idx < current && new_stride == NULL) { + if (is_unswitched_loop && m->_idx < current && new_stride == nullptr) { new_stride = m->clone(); register_new_node(new_stride, control); } - if (new_stride != NULL) { + if (new_stride != nullptr) { n->set_req(i, new_stride); } } @@ -1551,9 +1551,9 @@ Node* PhaseIdealLoop::clone_skeleton_predicate_bool(Node* iff, Node* new_init, N next->set_req(j, cur); } } - } while (result == NULL); + } while (result == nullptr); assert(result->_idx >= current, "new node expected"); - assert(!is_unswitched_loop || new_init != NULL, "new_init must always be found and cloned"); + assert(!is_unswitched_loop || new_init != nullptr, "new_init must always be found and cloned"); return result; } @@ -1589,15 +1589,15 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_ Node* zero_trip_guard_proj_post, const Node_List &old_new) { if (UseLoopPredicate) { Node* entry = pre_head->in(LoopNode::EntryControl); - Node* predicate = NULL; + Node* predicate = nullptr; predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL) { + if (predicate != nullptr) { entry = skip_loop_predicates(entry); } - Node* profile_predicate = NULL; + Node* profile_predicate = nullptr; if (UseProfiledLoopPredicate) { profile_predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (profile_predicate != NULL) { + if (profile_predicate != nullptr) { entry = skip_loop_predicates(entry); } } @@ -1658,7 +1658,7 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n // Add the post loop const uint idx_before_pre_post = Compile::current()->unique(); - CountedLoopNode *post_head = NULL; + CountedLoopNode *post_head = nullptr; Node* post_incr = incr; Node* main_exit = insert_post_loop(loop, old_new, main_head, main_end, post_incr, limit, post_head); const uint idx_after_post_before_pre = Compile::current()->unique(); @@ -1764,7 +1764,7 @@ void PhaseIdealLoop::insert_pre_post_loops(IdealLoopTree *loop, Node_List &old_n // CastII for the main loop: Node* castii = cast_incr_before_loop(pre_incr, min_taken, main_head); - assert(castii != NULL, "no castII inserted"); + assert(castii != nullptr, "no castII inserted"); assert(post_head->in(1)->is_IfProj(), "must be zero-trip guard If node projection of the post loop"); copy_skeleton_predicates_to_main_loop(pre_head, castii, stride, outer_loop, outer_main_head, dd_main_head, idx_before_pre_post, idx_after_post_before_pre, min_taken, post_head->in(1), old_new); @@ -1889,7 +1889,7 @@ void PhaseIdealLoop::insert_vector_post_loop(IdealLoopTree *loop, Node_List &old Node *limit = main_end->limit(); // In this case we throw away the result as we are not using it to connect anything else. - CountedLoopNode *post_head = NULL; + CountedLoopNode *post_head = nullptr; insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head); copy_skeleton_predicates_to_post_loop(main_head->skip_strip_mined(), post_head, incr, main_head->stride()); @@ -1936,7 +1936,7 @@ void PhaseIdealLoop::insert_scalar_rced_post_loop(IdealLoopTree *loop, Node_List Node *limit = main_end->limit(); // In this case we throw away the result as we are not using it to connect anything else. - CountedLoopNode *post_head = NULL; + CountedLoopNode *post_head = nullptr; insert_post_loop(loop, old_new, main_head, main_end, incr, limit, post_head); copy_skeleton_predicates_to_post_loop(main_head->skip_strip_mined(), post_head, incr, main_head->stride()); @@ -2042,7 +2042,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree* loop, Node_List& old_new, // CastII for the new post loop: incr = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); - assert(incr != NULL, "no castII inserted"); + assert(incr != nullptr, "no castII inserted"); return new_main_exit; } @@ -2074,7 +2074,7 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo Node* max_value = _igvn.intcon(new_stride_con); set_ctrl(max_value, C->root()); - while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) { + while (entry != nullptr && entry->is_Proj() && entry->in(0)->is_If()) { IfNode* iff = entry->in(0)->as_If(); ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con); if (proj->unique_ctrl_out()->Opcode() != Op_Halt) { @@ -2112,7 +2112,7 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_post_loop(LoopNode* main_loop_h Node* ctrl = main_loop_entry; Node* prev_proj = post_loop_entry; - while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If()) { + while (ctrl != nullptr && ctrl->is_Proj() && ctrl->in(0)->is_If()) { IfNode* iff = ctrl->in(0)->as_If(); ProjNode* proj = iff->proj_out(1 - ctrl->as_Proj()->_con); if (proj->unique_ctrl_out()->Opcode() != Op_Halt) { @@ -2215,7 +2215,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj Node *init = loop_head->init_trip(); Node *stride = loop_head->stride(); - Node *opaq = NULL; + Node *opaq = nullptr; if (adjust_min_trip) { // If not maximally unrolling, need adjustment // Search for zero-trip guard. @@ -2223,7 +2223,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj // graph shape is encountered, the compiler bails out loop unrolling; // compilation of the method will still succeed. opaq = loop_head->is_canonical_loop_entry(); - if (opaq == NULL) { + if (opaq == nullptr) { return; } // Zero-trip test uses an 'opaque' node which is not shared. @@ -2232,7 +2232,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj C->set_major_progress(); - Node* new_limit = NULL; + Node* new_limit = nullptr; int stride_con = stride->get_int(); int stride_p = (stride_con > 0) ? stride_con : -stride_con; uint old_trip_count = loop_head->trip_count(); @@ -2274,7 +2274,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj // adjustment underflows or overflows, then the main loop is skipped. Node* cmp = loop_end->cmp_node(); assert(cmp->in(2) == limit, "sanity"); - assert(opaq != NULL && opaq->in(1) == limit, "sanity"); + assert(opaq != nullptr && opaq->in(1) == limit, "sanity"); // Verify that policy_unroll result is still valid. const TypeInt* limit_type = _igvn.type(limit)->is_int(); @@ -2308,9 +2308,9 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); Node* underflow_clamp = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); set_ctrl(underflow_clamp, C->root()); - Node* limit_before_underflow = NULL; - Node* prev_limit = NULL; - Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; + Node* limit_before_underflow = nullptr; + Node* prev_limit = nullptr; + Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : nullptr; if (loop_head->unrolled_count() > 1 && limit->is_CMove() && limit->Opcode() == Op_CMoveI && limit->in(CMoveNode::IfTrue) == underflow_clamp && @@ -2340,7 +2340,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj // | | | // CMoveINode ([min_jint..hi] / [lo..max_jing]) // - assert(limit_before_underflow != NULL && prev_limit != NULL, "must find them"); + assert(limit_before_underflow != nullptr && prev_limit != nullptr, "must find them"); Node* new_limit_with_underflow = new SubINode(prev_limit, stride); register_new_node(new_limit_with_underflow, ctrl); // We must compare with limit_before_underflow, prev_limit may already have underflowed. @@ -2359,7 +2359,7 @@ void PhaseIdealLoop::do_unroll(IdealLoopTree *loop, Node_List &old_new, bool adj register_new_node(new_limit, ctrl); } - assert(new_limit != NULL, ""); + assert(new_limit != nullptr, ""); // Replace in loop test. assert(loop_end->in(1)->in(1) == cmp, "sanity"); if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { @@ -2513,9 +2513,9 @@ void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) { // For definitions which are loop inclusive and not tripcounts. Node* def_node = phi->in(LoopNode::LoopBackControl); - if (def_node != NULL) { + if (def_node != nullptr) { Node* n_ctrl = get_ctrl(def_node); - if (n_ctrl != NULL && loop->is_member(get_loop(n_ctrl))) { + if (n_ctrl != nullptr && loop->is_member(get_loop(n_ctrl))) { // Now test it to see if it fits the standard pattern for a reduction operator. int opc = def_node->Opcode(); if (opc != ReductionNode::opcode(opc, def_node->bottom_type()->basic_type()) @@ -2566,7 +2566,7 @@ void PhaseIdealLoop::mark_reductions(IdealLoopTree *loop) { Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* offset, Node* rc_limit, Node* old_limit, Node* pre_ctrl, bool round) { Node* sub = new SubLNode(rc_limit, offset); register_new_node(sub, pre_ctrl); - Node* limit = new DivLNode(NULL, sub, scale); + Node* limit = new DivLNode(nullptr, sub, scale); register_new_node(limit, pre_ctrl); // When the absolute value of scale is greater than one, the division @@ -2622,8 +2622,8 @@ Node* PhaseIdealLoop::adjust_limit(bool is_positive_stride, Node* scale, Node* o // holds true in the main-loop. Stride, scale, offset and limit are all loop // invariant. Further, stride and scale are constants (offset and limit often are). void PhaseIdealLoop::add_constraint(jlong stride_con, jlong scale_con, Node* offset, Node* low_limit, Node* upper_limit, Node* pre_ctrl, Node** pre_limit, Node** main_limit) { - assert(_igvn.type(offset)->isa_long() != NULL && _igvn.type(low_limit)->isa_long() != NULL && - _igvn.type(upper_limit)->isa_long() != NULL, "arguments should be long values"); + assert(_igvn.type(offset)->isa_long() != nullptr && _igvn.type(low_limit)->isa_long() != nullptr && + _igvn.type(upper_limit)->isa_long() != nullptr, "arguments should be long values"); // For a positive stride, we need to reduce the main-loop limit and // increase the pre-loop limit. This is reversed for a negative stride. @@ -2728,10 +2728,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc exp = exp->uncast(); //strip casts assert(exp_bt == T_INT || exp_bt == T_LONG, "unexpected int type"); if (is_iv(exp, iv, exp_bt)) { - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = 1; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { *p_short_scale = false; } return true; @@ -2751,10 +2751,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc if (scale == 0) { return false; // might be top } - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { // (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough... *p_short_scale = (exp_bt != bt && scale != 1); } @@ -2772,10 +2772,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc } else if (exp_bt == T_LONG) { scale = java_shift_left((jlong)1, (julong)shift_amount); } - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { // (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough... *p_short_scale = (exp_bt != bt && scale != 1); } @@ -2797,10 +2797,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc // overflow but that's fine because result wraps. return false; } - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale_sum; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { *p_short_scale = short_scale_l && short_scale_r; } return true; @@ -2815,10 +2815,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc return false; } scale = java_multiply(scale, (jlong)-1); - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { // (ConvI2L (MulI iv K)) can be 64-bit linear if iv is kept small enough... *p_short_scale = *p_short_scale || (exp_bt != bt && scale != 1); } @@ -2840,10 +2840,10 @@ bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, BasicType bt, jlong* p_sc // result may also overflow but that's fine because result wraps. return false; } - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale_diff; } - if (p_short_scale != NULL) { + if (p_short_scale != nullptr) { *p_short_scale = short_scale_l && short_scale_r; } return true; @@ -2876,10 +2876,10 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, BasicType exp_bt = bt; exp = exp->uncast(); if (is_scaled_iv(exp, iv, exp_bt, &scale, p_short_scale)) { - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_offset != NULL) { + if (p_offset != nullptr) { Node *zero = _igvn.zerocon(bt); set_ctrl(zero, C->root()); *p_offset = zero; @@ -2895,16 +2895,16 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, } int opc = exp->Opcode(); int which = 0; // this is which subexpression we find the iv in - Node* offset = NULL; + Node* offset = nullptr; if (opc == Op_Add(exp_bt)) { // Check for a scaled IV in (AddX (MulX iv S) E) or (AddX E (MulX iv S)). if (is_scaled_iv(exp->in(which = 1), iv, bt, &scale, p_short_scale) || is_scaled_iv(exp->in(which = 2), iv, bt, &scale, p_short_scale)) { offset = exp->in(which == 1 ? 2 : 1); // the other argument - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_offset != NULL) { + if (p_offset != nullptr) { *p_offset = offset; } return true; @@ -2927,10 +2927,10 @@ bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, } scale = java_multiply(scale, (jlong)-1); } - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = scale; } - if (p_offset != NULL) { + if (p_offset != nullptr) { if (which == 1) { // must negate the extracted offset Node *zero = _igvn.integercon(0, exp_bt); set_ctrl(zero, C->root()); @@ -2957,12 +2957,12 @@ bool PhaseIdealLoop::is_scaled_iv_plus_extra_offset(Node* exp1, Node* offset3, N // By the time we reach here, it is unlikely that exp1 is a simple iv*K. // If is a linear iv transform, it is probably an add or subtract. // Let's collect the internal offset2 from it. - Node* offset2 = NULL; + Node* offset2 = nullptr; if (offset3->is_Con() && depth < 2 && is_scaled_iv_plus_offset(exp1, iv, bt, p_scale, &offset2, p_short_scale, depth+1)) { - if (p_offset != NULL) { + if (p_offset != nullptr) { Node* ctrl_off2 = get_ctrl(offset2); Node* offset = AddNode::make(offset2, offset3, bt); register_new_node(offset, ctrl_off2); @@ -2979,10 +2979,10 @@ Node* PhaseIdealLoop::add_range_check_predicate(IdealLoopTree* loop, CountedLoop Node* predicate_proj, int scale_con, Node* offset, Node* limit, jint stride_con, Node* value) { bool overflow = false; - BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, value, NULL, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow, false); + BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, value, nullptr, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow, false); Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); register_new_node(opaque_bol, predicate_proj); - IfNode* new_iff = NULL; + IfNode* new_iff = nullptr; if (overflow) { new_iff = new IfNode(predicate_proj, opaque_bol, PROB_MAX, COUNT_UNKNOWN); } else { @@ -3030,7 +3030,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) { // Check graph shape. Cannot optimize a loop if zero-trip // Opaque1 node is optimized away and then another round // of loop opts attempted. - if (cl->is_canonical_loop_entry() == NULL) { + if (cl->is_canonical_loop_entry() == nullptr) { return; } @@ -3065,7 +3065,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) { // Ensure the original loop limit is available from the // pre-loop Opaque1 node. Node *orig_limit = pre_opaq->original_loop_limit(); - if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) { + if (orig_limit == nullptr || _igvn.type(orig_limit) == Type::TOP) { return; } // Must know if its a count-up or count-down loop @@ -3131,7 +3131,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) { } // Check for scaled induction variable plus an offset - Node *offset = NULL; + Node *offset = nullptr; if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { continue; @@ -3252,7 +3252,7 @@ void PhaseIdealLoop::do_range_check(IdealLoopTree *loop, Node_List &old_new) { if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop // Allow the load to float around in the loop, or before it // but NOT before the pre-loop. - _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL + _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not null --i; --imax; } @@ -3341,7 +3341,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop } // Find RCE'd post loop so that we can stage its guard. - if (legacy_cl->is_canonical_loop_entry() == NULL) { + if (legacy_cl->is_canonical_loop_entry() == nullptr) { return multi_version_succeeded; } Node* ctrl = legacy_cl->in(LoopNode::EntryControl); @@ -3349,19 +3349,19 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop // Now we test that both the post loops are connected Node* post_loop_region = iffm->in(0); - if (post_loop_region == NULL) return multi_version_succeeded; + if (post_loop_region == nullptr) return multi_version_succeeded; if (!post_loop_region->is_Region()) return multi_version_succeeded; Node* covering_region = post_loop_region->in(RegionNode::Control+1); - if (covering_region == NULL) return multi_version_succeeded; + if (covering_region == nullptr) return multi_version_succeeded; if (!covering_region->is_Region()) return multi_version_succeeded; Node* p_f = covering_region->in(RegionNode::Control); - if (p_f == NULL) return multi_version_succeeded; + if (p_f == nullptr) return multi_version_succeeded; if (!p_f->is_IfFalse()) return multi_version_succeeded; if (!p_f->in(0)->is_CountedLoopEnd()) return multi_version_succeeded; CountedLoopEndNode* rce_loop_end = p_f->in(0)->as_CountedLoopEnd(); - if (rce_loop_end == NULL) return multi_version_succeeded; + if (rce_loop_end == nullptr) return multi_version_succeeded; CountedLoopNode* rce_cl = rce_loop_end->loopnode(); - if (rce_cl == NULL || !rce_cl->is_post_loop()) return multi_version_succeeded; + if (rce_cl == nullptr || !rce_cl->is_post_loop()) return multi_version_succeeded; CountedLoopNode *known_rce_cl = rce_loop->_head->as_CountedLoop(); if (rce_cl != known_rce_cl) return multi_version_succeeded; @@ -3385,7 +3385,7 @@ bool PhaseIdealLoop::multi_version_post_loops(IdealLoopTree *rce_loop, IdealLoop // we have a work list. Now we will try to transform the if guard to cause // the loop pair to be multi version executed with the determination left to runtime // or the optimizer if full information is known about the given arrays at compile time. - Node *last_min = NULL; + Node *last_min = nullptr; multi_version_succeeded = true; while (worklist.size()) { Node* rc_iffm = worklist.pop(); @@ -3556,7 +3556,7 @@ void IdealLoopTree::remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop * } // Can we find the main loop? - if (_next == NULL) { + if (_next == nullptr) { return; } @@ -3616,11 +3616,11 @@ bool IdealLoopTree::do_remove_empty_loop(PhaseIdealLoop *phase) { #ifdef ASSERT // Ensure at most one used phi exists, which is the iv. - Node* iv = NULL; + Node* iv = nullptr; for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { Node* n = cl->fast_out(i); if ((n->Opcode() == Op_Phi) && (n->outcnt() > 0)) { - assert(iv == NULL, "Too many phis"); + assert(iv == nullptr, "Too many phis"); iv = n; } } @@ -3830,7 +3830,7 @@ void IdealLoopTree::collect_loop_core_nodes(PhaseIdealLoop* phase, Unique_Node_L Node* n = wq.at(i); for (uint j = 0; j < n->req(); ++j) { Node* in = n->in(j); - if (in != NULL) { + if (in != nullptr) { if (phase->get_loop(phase->ctrl_or_self(in)) == this) { wq.push(in); } @@ -4076,21 +4076,21 @@ bool PhaseIdealLoop::do_intrinsify_fill() { // value in a unit stride loop, bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, Node*& shift, Node*& con) { - const char* msg = NULL; - Node* msg_node = NULL; + const char* msg = nullptr; + Node* msg_node = nullptr; - store_value = NULL; - con = NULL; - shift = NULL; + store_value = nullptr; + con = nullptr; + shift = nullptr; // Process the loop looking for stores. If there are multiple // stores or extra control flow give at this point. CountedLoopNode* head = lpt->_head->as_CountedLoop(); - for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { + for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) { Node* n = lpt->_body.at(i); if (n->outcnt() == 0) continue; // Ignore dead if (n->is_Store()) { - if (store != NULL) { + if (store != nullptr) { msg = "multiple stores"; break; } @@ -4113,12 +4113,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st } } - if (store == NULL) { + if (store == nullptr) { // No store in loop return false; } - if (msg == NULL && head->stride_con() != 1) { + if (msg == nullptr && head->stride_con() != 1) { // could handle negative strides too if (head->stride_con() < 0) { msg = "negative stride"; @@ -4127,12 +4127,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st } } - if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { + if (msg == nullptr && !store->in(MemNode::Address)->is_AddP()) { msg = "can't handle store address"; msg_node = store->in(MemNode::Address); } - if (msg == NULL && + if (msg == nullptr && (!store->in(MemNode::Memory)->is_Phi() || store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { msg = "store memory isn't proper phi"; @@ -4142,17 +4142,17 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st // Make sure there is an appropriate fill routine BasicType t = store->as_Mem()->memory_type(); const char* fill_name; - if (msg == NULL && - StubRoutines::select_fill_function(t, false, fill_name) == NULL) { + if (msg == nullptr && + StubRoutines::select_fill_function(t, false, fill_name) == nullptr) { msg = "unsupported store"; msg_node = store; } - if (msg != NULL) { + if (msg != nullptr) { #ifndef PRODUCT if (TraceOptimizeFill) { tty->print_cr("not fill intrinsic candidate: %s", msg); - if (msg_node != NULL) msg_node->dump(); + if (msg_node != nullptr) msg_node->dump(); } #endif return false; @@ -4161,15 +4161,15 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st // Make sure the address expression can be handled. It should be // head->phi * elsize + con. head->phi might have a ConvI2L(CastII()). Node* elements[4]; - Node* cast = NULL; - Node* conv = NULL; + Node* cast = nullptr; + Node* conv = nullptr; bool found_index = false; int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); for (int e = 0; e < count; e++) { Node* n = elements[e]; - if (n->is_Con() && con == NULL) { + if (n->is_Con() && con == nullptr) { con = n; - } else if (n->Opcode() == Op_LShiftX && shift == NULL) { + } else if (n->Opcode() == Op_LShiftX && shift == nullptr) { Node* value = n->in(1); #ifdef _LP64 if (value->Opcode() == Op_ConvI2L) { @@ -4193,7 +4193,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st shift = n; } } - } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { + } else if (n->Opcode() == Op_ConvI2L && conv == nullptr) { conv = n; n = n->in(1); if (n->Opcode() == Op_CastII && @@ -4226,16 +4226,16 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st } // byte sized items won't have a shift - if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { + if (msg == nullptr && shift == nullptr && t != T_BYTE && t != T_BOOLEAN) { msg = "can't find shift"; msg_node = store; } - if (msg != NULL) { + if (msg != nullptr) { #ifndef PRODUCT if (TraceOptimizeFill) { tty->print_cr("not fill intrinsic: %s", msg); - if (msg_node != NULL) msg_node->dump(); + if (msg_node != nullptr) msg_node->dump(); } #endif return false; @@ -4264,7 +4264,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st if (cast) ok.set(cast->_idx); if (conv) ok.set(conv->_idx); - for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { + for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) { Node* n = lpt->_body.at(i); if (n->outcnt() == 0) continue; // Ignore dead if (ok.test(n->_idx)) continue; @@ -4278,7 +4278,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st } // Make sure no unexpected values are used outside the loop - for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { + for (uint i = 0; msg == nullptr && i < lpt->_body.size(); i++) { Node* n = lpt->_body.at(i); // These values can be replaced with other nodes if they are used // outside the loop. @@ -4295,9 +4295,9 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st #ifdef ASSERT if (TraceOptimizeFill) { - if (msg != NULL) { + if (msg != nullptr) { tty->print_cr("no fill intrinsic: %s", msg); - if (msg_node != NULL) msg_node->dump(); + if (msg_node != nullptr) msg_node->dump(); } else { tty->print_cr("fill intrinsic for:"); } @@ -4308,7 +4308,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st } #endif - return msg == NULL; + return msg == nullptr; } @@ -4329,16 +4329,16 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { // Check that the body only contains a store of a loop invariant // value that is indexed by the loop phi. - Node* store = NULL; - Node* store_value = NULL; - Node* shift = NULL; - Node* offset = NULL; + Node* store = nullptr; + Node* store_value = nullptr; + Node* shift = nullptr; + Node* offset = nullptr; if (!match_fill_loop(lpt, store, store_value, shift, offset)) { return false; } Node* exit = head->loopexit()->proj_out_or_null(0); - if (exit == NULL) { + if (exit == nullptr) { return false; } @@ -4359,7 +4359,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { index = new ConvI2LNode(index); _igvn.register_new_node_with_optimizer(index); #endif - if (shift != NULL) { + if (shift != nullptr) { // byte arrays don't require a shift but others do. index = new LShiftXNode(index, shift->in(2)); _igvn.register_new_node_with_optimizer(index); @@ -4368,10 +4368,10 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { _igvn.register_new_node_with_optimizer(from); // For normal array fills, C2 uses two AddP nodes for array element // addressing. But for array fills with Unsafe call, there's only one - // AddP node adding an absolute offset, so we do a NULL check here. - assert(offset != NULL || C->has_unsafe_access(), + // AddP node adding an absolute offset, so we do a null check here. + assert(offset != nullptr || C->has_unsafe_access(), "Only array fills with unsafe have no extra offset"); - if (offset != NULL) { + if (offset != nullptr) { from = new AddPNode(base, from, offset); _igvn.register_new_node_with_optimizer(from); } @@ -4394,7 +4394,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { BasicType t = store->as_Mem()->memory_type(); bool aligned = false; - if (offset != NULL && head->init_trip()->is_Con()) { + if (offset != nullptr && head->init_trip()->is_Con()) { int element_size = type2aelembytes(t); aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; } @@ -4402,7 +4402,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { // Build a call to the fill routine const char* fill_name; address fill = StubRoutines::select_fill_function(t, aligned, fill_name); - assert(fill != NULL, "what?"); + assert(fill != nullptr, "what?"); // Convert float/double to int/long for fill routines if (t == T_FLOAT) { @@ -4446,7 +4446,7 @@ bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { // If this fill is tightly coupled to an allocation and overwrites // the whole body, allow it to take over the zeroing. AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); - if (alloc != NULL && alloc->is_AllocateArray()) { + if (alloc != nullptr && alloc->is_AllocateArray()) { Node* length = alloc->as_AllocateArray()->Ideal_length(); if (head->limit() == length && head->init_trip() == _igvn.intcon(0)) { diff --git a/src/hotspot/share/opto/loopUnswitch.cpp b/src/hotspot/share/opto/loopUnswitch.cpp index a141b8e15aa..cecceb445e8 100644 --- a/src/hotspot/share/opto/loopUnswitch.cpp +++ b/src/hotspot/share/opto/loopUnswitch.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ bool IdealLoopTree::policy_unswitching( PhaseIdealLoop *phase ) const { if (head->unswitch_count() + 1 > head->unswitch_max()) { return false; } - if (phase->find_unswitching_candidate(this) == NULL) { + if (phase->find_unswitching_candidate(this) == nullptr) { return false; } @@ -88,7 +88,7 @@ IfNode* PhaseIdealLoop::find_unswitching_candidate(const IdealLoopTree *loop) co // Find first invariant test that doesn't exit the loop LoopNode *head = loop->_head->as_Loop(); - IfNode* unswitch_iff = NULL; + IfNode* unswitch_iff = nullptr; Node* n = head->in(LoopNode::LoopBackControl); while (n != head) { Node* n_dom = idom(n); @@ -120,9 +120,9 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) { LoopNode *head = loop->_head->as_Loop(); Node* entry = head->skip_strip_mined()->in(LoopNode::EntryControl); - if (find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check) != NULL - || (UseProfiledLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate) != NULL) - || (UseLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_predicate) != NULL)) { + if (find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check) != nullptr + || (UseProfiledLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate) != nullptr) + || (UseLoopPredicate && find_predicate_insertion_point(entry, Deoptimization::Reason_predicate) != nullptr)) { assert(entry->is_IfProj(), "sanity - must be ifProj since there is at least one predicate"); if (entry->outcnt() > 1) { // Bailout if there are loop predicates from which there are additional control dependencies (i.e. from @@ -133,7 +133,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) { } // Find first invariant test that doesn't exit the loop IfNode* unswitch_iff = find_unswitching_candidate((const IdealLoopTree *)loop); - assert(unswitch_iff != NULL, "should be at least one"); + assert(unswitch_iff != nullptr, "should be at least one"); #ifndef PRODUCT if (TraceLoopOpts) { @@ -155,7 +155,7 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) { assert(proj_true->is_IfTrue(), "must be true projection"); entry = head->skip_strip_mined()->in(LoopNode::EntryControl); Node* predicate = find_predicate(entry); - if (predicate == NULL) { + if (predicate == nullptr) { // No empty predicate Node* uniqc = proj_true->unique_ctrl_out(); assert((uniqc == head && !head->is_strip_mined()) || (uniqc == head->in(LoopNode::EntryControl) @@ -166,13 +166,13 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) { Node* proj_before_first_empty_predicate = skip_loop_predicates(entry); if (UseProfiledLoopPredicate) { predicate = find_predicate(proj_before_first_empty_predicate); - if (predicate != NULL) { + if (predicate != nullptr) { proj_before_first_empty_predicate = skip_loop_predicates(predicate); } } if (UseLoopPredicate) { predicate = find_predicate(proj_before_first_empty_predicate); - if (predicate != NULL) { + if (predicate != nullptr) { proj_before_first_empty_predicate = skip_loop_predicates(predicate); } } @@ -343,9 +343,9 @@ LoopNode* PhaseIdealLoop::create_reserve_version_of_loop(IdealLoopTree *loop, Co CountedLoopReserveKit::CountedLoopReserveKit(PhaseIdealLoop* phase, IdealLoopTree *loop, bool active = true) : _phase(phase), _lpt(loop), - _lp(NULL), - _iff(NULL), - _lp_reserved(NULL), + _lp(nullptr), + _iff(nullptr), + _lp_reserved(nullptr), _has_reserved(false), _use_new(false), _active(active) diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index 49f2a762665..e2c009331df 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -75,12 +75,12 @@ bool LoopNode::is_valid_counted_loop(BasicType bt) const { if (is_BaseCountedLoop() && as_BaseCountedLoop()->bt() == bt) { BaseCountedLoopNode* l = as_BaseCountedLoop(); BaseCountedLoopEndNode* le = l->loopexit_or_null(); - if (le != NULL && + if (le != nullptr && le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) { Node* phi = l->phi(); Node* exit = le->proj_out_or_null(0 /* false */); - if (exit != NULL && exit->Opcode() == Op_IfFalse && - phi != NULL && phi->is_Phi() && + if (exit != nullptr && exit->Opcode() == Op_IfFalse && + phi != nullptr && phi->is_Phi() && phi->in(LoopNode::LoopBackControl) == l->incr() && le->loopnode() == l && le->stride_is_con()) { return true; @@ -174,7 +174,7 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) { // that doesn't branch to an UNC, we stop. The code that process // expensive nodes will notice the loop and skip over it to try to // move the node further up. - if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { + if (ctl->is_CountedLoop() && ctl->in(1) != nullptr && ctl->in(1)->in(0) != nullptr && ctl->in(1)->in(0)->is_If()) { if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { break; } @@ -184,9 +184,9 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) { // the single control projection for its parent: same code path, // if it's a If with UNC or fallthrough of a call. Node* parent_ctl = ctl->in(0); - if (parent_ctl == NULL) { + if (parent_ctl == nullptr) { break; - } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { + } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != nullptr) { next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); } else if (parent_ctl->is_If()) { if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { @@ -250,7 +250,7 @@ void PhaseIdealLoop::set_early_ctrl(Node* n, bool update_body) { // Record earliest legal location set_ctrl(n, early); IdealLoopTree *loop = get_loop(early); - if (update_body && loop->_child == NULL) { + if (update_body && loop->_child == nullptr) { loop->_body.push(n); } } @@ -290,7 +290,7 @@ IdealLoopTree* PhaseIdealLoop::insert_outer_loop(IdealLoopTree* loop, LoopNode* outer_ilt->_child = loop; outer_ilt->_nest = loop->_nest; loop->_parent = outer_ilt; - loop->_next = NULL; + loop->_next = nullptr; loop->_nest++; assert(loop->_nest <= SHRT_MAX, "sanity"); return outer_ilt; @@ -342,7 +342,7 @@ IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Nod } void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol) { - Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, NULL, + Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, nullptr, Deoptimization::Reason_loop_limit_check, Op_If); Node* iff = new_predicate_proj->in(0); @@ -367,19 +367,19 @@ void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* c } Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) { - // Counted loop head must be a good RegionNode with only 3 not NULL + // Counted loop head must be a good RegionNode with only 3 not null // control input edges: Self, Entry, LoopBack. - if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) { - return NULL; + if (x->in(LoopNode::Self) == nullptr || x->req() != 3 || loop->_irreducible) { + return nullptr; } Node *init_control = x->in(LoopNode::EntryControl); Node *back_control = x->in(LoopNode::LoopBackControl); - if (init_control == NULL || back_control == NULL) { // Partially dead - return NULL; + if (init_control == nullptr || back_control == nullptr) { // Partially dead + return nullptr; } // Must also check for TOP when looking for a dead loop if (init_control->is_top() || back_control->is_top()) { - return NULL; + return nullptr; } // Allow funny placement of Safepoint @@ -395,13 +395,13 @@ Node* PhaseIdealLoop::loop_exit_control(Node* x, IdealLoopTree* loop) { // I have a weird back-control. Probably the loop-exit test is in // the middle of the loop and I am looking at some trailing control-flow // merge point. To fix this I would have to partially peel the loop. - return NULL; // Obscure back-control + return nullptr; // Obscure back-control } // Get boolean guarding loop-back test Node *iff = iftrue->in(0); if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) { - return NULL; + return nullptr; } return iftrue; } @@ -420,7 +420,7 @@ Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, No // Get backedge compare Node* cmp = test->in(1); if (!cmp->is_Cmp()) { - return NULL; + return nullptr; } // Find the trip-counter increment & limit. Limit must be loop invariant. @@ -438,10 +438,10 @@ Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, No bt = BoolTest(bt).commute(); // And commute the exit test } if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant - return NULL; + return nullptr; } if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant - return NULL; + return nullptr; } return cmp; } @@ -449,12 +449,12 @@ Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, No Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr) { if (incr->is_Phi()) { if (incr->as_Phi()->region() != x || incr->req() != 3) { - return NULL; // Not simple trip counter expression + return nullptr; // Not simple trip counter expression } phi_incr = incr; incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant - return NULL; + return nullptr; } } return incr; @@ -467,7 +467,7 @@ Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xph Node *stride = incr->in(2); if (!stride->is_Con()) { // Oops, swap these if (!xphi->is_Con()) { // Is the other guy a constant? - return NULL; // Nope, unknown stride, bail out + return nullptr; // Nope, unknown stride, bail out } Node *tmp = xphi; // 'incr' is commutative, so ok to swap xphi = stride; @@ -478,16 +478,16 @@ Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xph PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop) { if (!xphi->is_Phi()) { - return NULL; // Too much math on the trip counter + return nullptr; // Too much math on the trip counter } - if (phi_incr != NULL && phi_incr != xphi) { - return NULL; + if (phi_incr != nullptr && phi_incr != xphi) { + return nullptr; } PhiNode *phi = xphi->as_Phi(); // Phi must be of loop header; backedge must wrap to increment if (phi->region() != x) { - return NULL; + return nullptr; } return phi; } @@ -574,13 +574,13 @@ void PhaseIdealLoop::add_empty_predicate(Deoptimization::DeoptReason reason, Nod int trap_request = Deoptimization::make_trap_request(reason, Deoptimization::Action_maybe_recompile); address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; JVMState* jvms = sfpt->jvms(); CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", no_memory_effects); - Node* mem = NULL; - Node* i_o = NULL; + Node* mem = nullptr; + Node* i_o = nullptr; if (sfpt->is_Call()) { mem = sfpt->proj_out(TypeFunc::Memory); i_o = sfpt->proj_out(TypeFunc::I_O); @@ -623,7 +623,7 @@ void PhaseIdealLoop::add_empty_predicate(Deoptimization::DeoptReason reason, Nod // SafePointNode so we can use its jvm state to create empty // predicates. static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, MergeMemNode* mm, PhaseIdealLoop* phase) { - SafePointNode* safepoint = NULL; + SafePointNode* safepoint = nullptr; for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { Node* u = x->fast_out(i); if (u->is_Phi() && u->bottom_type() == Type::MEMORY) { @@ -677,7 +677,7 @@ static bool no_side_effect_since_safepoint(Compile* C, Node* x, Node* mem, Merge SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, IdealLoopTree* loop) { IfNode* exit_test = back_control->in(0)->as_If(); - SafePointNode* safepoint = NULL; + SafePointNode* safepoint = nullptr; if (exit_test->in(0)->is_SafePoint() && exit_test->in(0)->outcnt() == 1) { safepoint = exit_test->in(0)->as_SafePoint(); } else { @@ -690,8 +690,8 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal safepoint = c->as_SafePoint(); } - if (safepoint == NULL) { - return NULL; + if (safepoint == nullptr) { + return nullptr; } Node* mem = safepoint->in(TypeFunc::Memory); @@ -699,7 +699,7 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal // We can only use that safepoint if there's no side effect between the backedge and the safepoint. // mm is used for book keeping - MergeMemNode* mm = NULL; + MergeMemNode* mm = nullptr; #ifdef ASSERT if (mem->is_MergeMem()) { mm = mem->clone()->as_MergeMem(); @@ -712,12 +712,12 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal } #endif if (!no_side_effect_since_safepoint(C, x, mem, mm, this)) { - safepoint = NULL; + safepoint = nullptr; } else { - assert(mm == NULL|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed"); + assert(mm == nullptr|| _igvn.transform(mm) == mem->as_MergeMem()->base_memory(), "all memory state should have been processed"); } #ifdef ASSERT - if (mm != NULL) { + if (mm != nullptr) { _igvn.remove_dead_node(mm); } #endif @@ -779,7 +779,7 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, Node* x, Ideal bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { Node* x = loop->_head; // Only for inner loops - if (loop->_child != NULL || !x->is_BaseCountedLoop() || x->as_Loop()->is_loop_nest_outer_loop()) { + if (loop->_child != nullptr || !x->is_BaseCountedLoop() || x->as_Loop()->is_loop_nest_outer_loop()) { return false; } @@ -869,7 +869,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { if (bt == T_INT && head->as_CountedLoop()->is_strip_mined()) { // Loop is strip mined: use the safepoint of the outer strip mined loop OuterStripMinedLoopNode* outer_loop = head->as_CountedLoop()->outer_loop(); - assert(outer_loop != NULL, "no outer loop"); + assert(outer_loop != nullptr, "no outer loop"); safepoint = outer_loop->outer_safepoint(); outer_loop->transform_to_counted_loop(&_igvn, this); exit_test = head->loopexit(); @@ -913,7 +913,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { outer_phi->set_req(0, outer_head); register_new_node(outer_phi, outer_head); - Node* inner_iters_max = NULL; + Node* inner_iters_max = nullptr; if (stride_con > 0) { inner_iters_max = MaxNode::max_diff_with_zero(limit, outer_phi, TypeInteger::bottom(bt), _igvn); } else { @@ -946,7 +946,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { set_ctrl(int_stride, C->root()); Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT); Node* inner_incr = new AddINode(inner_phi, int_stride); - Node* inner_cmp = NULL; + Node* inner_cmp = nullptr; inner_cmp = new CmpINode(inner_incr, inner_iters_actual_int); Node* inner_bol = new BoolNode(inner_cmp, exit_test->in(1)->as_Bool()->_test._test); inner_phi->set_req(LoopNode::EntryControl, int_zero); @@ -1004,7 +1004,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { // == new IR nodes (just before final peel) => // // entry_control: {...} - // long adjusted_limit = limit + stride; //because phi_incr != NULL + // long adjusted_limit = limit + stride; //because phi_incr != nullptr // assert(!limit_check_required || (extralong)limit + stride == adjusted_limit); // else deopt // ulong inner_iters_limit = max_jint - ABS(stride) - 1; //near 0x7FFFFFF0 // outer_head: @@ -1047,14 +1047,14 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { // of the peeled iteration to insert empty predicates. If no well // positioned safepoint peel to guarantee a safepoint in the outer // loop. - if (safepoint != NULL || !loop->_has_call) { + if (safepoint != nullptr || !loop->_has_call) { old_new.clear(); do_peeling(loop, old_new); } else { C->set_major_progress(); } - if (safepoint != NULL) { + if (safepoint != nullptr) { SafePointNode* cloned_sfpt = old_new[safepoint->_idx]->as_SafePoint(); if (UseLoopPredicate) { @@ -1087,9 +1087,9 @@ int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong s Node* c = loop->_body.at(i); if (c->is_IfProj() && c->in(0)->is_RangeCheck()) { CallStaticJavaNode* call = c->as_IfProj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - if (call != NULL) { - Node* range = NULL; - Node* offset = NULL; + if (call != nullptr) { + Node* range = nullptr; + Node* offset = nullptr; jlong scale = 0; RangeCheckNode* rc = c->in(0)->as_RangeCheck(); if (loop->is_range_check_if(rc, this, T_LONG, phi, range, offset, scale) && @@ -1254,7 +1254,7 @@ void PhaseIdealLoop::transform_long_range_checks(int stride_con, const Node_List ProjNode* unc_proj = proj->other_if_proj(); RangeCheckNode* rc = proj->in(0)->as_RangeCheck(); jlong scale = 0; - Node* offset = NULL; + Node* offset = nullptr; Node* rc_bol = rc->in(1); Node* rc_cmp = rc_bol->in(1); if (rc_cmp->Opcode() == Op_CmpU) { @@ -1436,35 +1436,35 @@ LoopNode* PhaseIdealLoop::create_inner_head(IdealLoopTree* loop, BaseCountedLoop #ifdef ASSERT void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, BasicType bt) { Node* back_control = loop_exit_control(x, loop); - assert(back_control != NULL, "no back control"); + assert(back_control != nullptr, "no back control"); BoolTest::mask mask = BoolTest::illegal; float cl_prob = 0; - Node* incr = NULL; - Node* limit = NULL; + Node* incr = nullptr; + Node* limit = nullptr; Node* cmp = loop_exit_test(back_control, loop, incr, limit, mask, cl_prob); - assert(cmp != NULL && cmp->Opcode() == Op_Cmp(bt), "no exit test"); + assert(cmp != nullptr && cmp->Opcode() == Op_Cmp(bt), "no exit test"); - Node* phi_incr = NULL; + Node* phi_incr = nullptr; incr = loop_iv_incr(incr, x, loop, phi_incr); - assert(incr != NULL && incr->Opcode() == Op_Add(bt), "no incr"); + assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr"); - Node* xphi = NULL; + Node* xphi = nullptr; Node* stride = loop_iv_stride(incr, loop, xphi); - assert(stride != NULL, "no stride"); + assert(stride != nullptr, "no stride"); PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop); - assert(phi != NULL && phi->in(LoopNode::LoopBackControl) == incr, "No phi"); + assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi"); jlong stride_con = stride->get_integer_as_long(bt); assert(condition_stride_ok(mask, stride_con), "illegal condition"); assert(mask != BoolTest::ne, "unexpected condition"); - assert(phi_incr == NULL, "bad loop shape"); + assert(phi_incr == nullptr, "bad loop shape"); assert(cmp->in(1) == incr, "bad exit test shape"); // Safepoint on backedge not supported @@ -1490,12 +1490,12 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l break; } case Op_CmpI: { - Node* clone = new CmpLNode(NULL, NULL); + Node* clone = new CmpLNode(nullptr, nullptr); old_new.map(n->_idx, clone); break; } case Op_AddI: { - Node* clone = new AddLNode(NULL, NULL); + Node* clone = new AddLNode(nullptr, nullptr); old_new.map(n->_idx, clone); break; } @@ -1510,7 +1510,7 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l for (uint i = 1; i < n->req(); i++) { Node* in = n->in(i); - if (in == NULL) { + if (in == nullptr) { continue; } if (loop->is_member(get_loop(get_ctrl(in)))) { @@ -1523,7 +1523,7 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l for (uint i = 0; i < iv_nodes.size(); i++) { Node* n = iv_nodes.at(i); Node* clone = old_new[n->_idx]; - if (clone != NULL) { + if (clone != nullptr) { _igvn.remove_dead_node(clone); } } @@ -1535,20 +1535,20 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l Node* clone = old_new[n->_idx]; for (uint i = 1; i < n->req(); i++) { Node* in = n->in(i); - if (in == NULL) { + if (in == nullptr) { continue; } Node* in_clone = old_new[in->_idx]; - if (in_clone == NULL) { + if (in_clone == nullptr) { assert(_igvn.type(in)->isa_int(), ""); in_clone = new ConvI2LNode(in); _igvn.register_new_node_with_optimizer(in_clone); set_subtree_ctrl(in_clone, false); } - if (in_clone->in(0) == NULL) { + if (in_clone->in(0) == nullptr) { in_clone->set_req(0, C->top()); clone->set_req(i, in_clone); - in_clone->set_req(0, NULL); + in_clone->set_req(0, nullptr); } else { clone->set_req(i, in_clone); } @@ -1561,13 +1561,13 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l Node* n = iv_nodes.at(i); Node* clone = old_new[n->_idx]; set_subtree_ctrl(clone, false); - Node* m = n->Opcode() == Op_CmpI ? clone : NULL; + Node* m = n->Opcode() == Op_CmpI ? clone : nullptr; for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* u = n->fast_out(i); if (iv_nodes.member(u)) { continue; } - if (m == NULL) { + if (m == nullptr) { m = new ConvL2INode(clone); _igvn.register_new_node_with_optimizer(m); set_subtree_ctrl(m, false); @@ -1586,16 +1586,16 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ PhaseGVN *gvn = &_igvn; Node* back_control = loop_exit_control(x, loop); - if (back_control == NULL) { + if (back_control == nullptr) { return false; } BoolTest::mask bt = BoolTest::illegal; float cl_prob = 0; - Node* incr = NULL; - Node* limit = NULL; + Node* incr = nullptr; + Node* limit = nullptr; Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); - if (cmp == NULL || cmp->Opcode() != Op_Cmp(iv_bt)) { + if (cmp == nullptr || cmp->Opcode() != Op_Cmp(iv_bt)) { return false; // Avoid pointer & float & 64-bit compares } @@ -1604,25 +1604,25 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ incr = incr->in(1); } - Node* phi_incr = NULL; + Node* phi_incr = nullptr; incr = loop_iv_incr(incr, x, loop, phi_incr); - if (incr == NULL) { + if (incr == nullptr) { return false; } - Node* trunc1 = NULL; - Node* trunc2 = NULL; - const TypeInteger* iv_trunc_t = NULL; + Node* trunc1 = nullptr; + Node* trunc2 = nullptr; + const TypeInteger* iv_trunc_t = nullptr; Node* orig_incr = incr; if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t, iv_bt))) { return false; // Funny increment opcode } assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code"); - Node* xphi = NULL; + Node* xphi = nullptr; Node* stride = loop_iv_stride(incr, loop, xphi); - if (stride == NULL) { + if (stride == nullptr) { return false; } @@ -1636,9 +1636,9 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop); - if (phi == NULL || - (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) || - (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) { + if (phi == nullptr || + (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || + (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { return false; } @@ -1648,7 +1648,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ BoolNode* test = iff->in(1)->as_Bool(); const TypeInteger* limit_t = gvn->type(limit)->is_integer(iv_bt); - if (trunc1 != NULL) { + if (trunc1 != nullptr) { // When there is a truncation, we must be sure that after the truncation // the trip counter will end up higher than the limit, otherwise we are looking // at an endless loop. Can happen with range checks. @@ -1677,7 +1677,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ // If iv trunc type is smaller than int, check for possible wrap. if (!TypeInteger::bottom(iv_bt)->higher_equal(iv_trunc_t)) { - assert(trunc1 != NULL, "must have found some truncation"); + assert(trunc1 != nullptr, "must have found some truncation"); // Get a better type for the phi (filtered thru if's) const TypeInteger* phi_ft = filtered_type(phi); @@ -1707,7 +1707,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ // No possibility of wrap so truncation can be discarded // Promote iv type to Int } else { - assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int"); + assert(trunc1 == nullptr && trunc2 == nullptr, "no truncation for int"); } if (!condition_stride_ok(bt, stride_con)) { @@ -1726,7 +1726,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ } } - if (phi_incr != NULL && bt != BoolTest::ne) { + if (phi_incr != nullptr && bt != BoolTest::ne) { // check if there is a possibility of IV overflowing after the first increment if (stride_con > 0) { if (init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) { @@ -1769,7 +1769,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ // to be adjusted to keep trip count the same and the // adjusted limit should be checked for int overflow. Node* adjusted_limit = limit; - if (phi_incr != NULL) { + if (phi_incr != nullptr) { stride_m += stride_con; } @@ -1870,13 +1870,13 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ } } - Node* sfpt = NULL; - if (loop->_child == NULL) { + Node* sfpt = nullptr; + if (loop->_child == nullptr) { sfpt = find_safepoint(back_control, x, loop); } else { sfpt = iff->in(0); if (sfpt->Opcode() != Op_SafePoint) { - sfpt = NULL; + sfpt = nullptr; } } @@ -1884,7 +1884,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ Node* backedge_sfpt = x->in(LoopNode::LoopBackControl); if (((iv_bt == T_INT && LoopStripMiningIter != 0) || iv_bt == T_LONG) && - sfpt == NULL) { + sfpt == nullptr) { // Leaving the safepoint on the backedge and creating a // CountedLoop will confuse optimizations. We can't move the // safepoint around because its jvm state wouldn't match a new @@ -1893,7 +1893,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ } if (is_deleteable_safept(backedge_sfpt)) { lazy_replace(backedge_sfpt, iftrue); - if (loop->_safepts != NULL) { + if (loop->_safepts != nullptr) { loop->_safepts->yank(backedge_sfpt); } loop->_tail = iftrue; @@ -1905,13 +1905,13 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ if (iv_bt == T_INT && !x->as_Loop()->is_loop_nest_inner_loop() && StressLongCountedLoop > 0 && - trunc1 == NULL && + trunc1 == nullptr && convert_to_long_loop(cmp, phi, loop)) { return false; } #endif - if (phi_incr != NULL) { + if (phi_incr != nullptr) { // If compare points directly to the phi we need to adjust // the compare so that it points to the incr. Limit have // to be adjusted to keep trip count the same and we @@ -2012,10 +2012,10 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ Node* entry_control = init_control; bool strip_mine_loop = iv_bt == T_INT && - loop->_child == NULL && - sfpt != NULL && + loop->_child == nullptr && + sfpt != nullptr && !loop->_has_call; - IdealLoopTree* outer_ilt = NULL; + IdealLoopTree* outer_ilt = nullptr; if (strip_mine_loop) { outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop, cl_prob, le->_fcnt, entry_control, @@ -2039,7 +2039,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ if (iv_bt == T_INT && (LoopStripMiningIter == 0 || strip_mine_loop)) { // Check for immediately preceding SafePoint and remove - if (sfpt != NULL && (strip_mine_loop || is_deleteable_safept(sfpt))) { + if (sfpt != nullptr && (strip_mine_loop || is_deleteable_safept(sfpt))) { if (strip_mine_loop) { Node* outer_le = outer_ilt->_tail->in(0); Node* sfpt_clone = sfpt->clone(); @@ -2061,7 +2061,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ set_idom(outer_le, sfpt_clone, dom_depth(sfpt_clone)); } lazy_replace(sfpt, sfpt->in(TypeFunc::Control)); - if (loop->_safepts != NULL) { + if (loop->_safepts != nullptr) { loop->_safepts->yank(sfpt); } } @@ -2116,7 +2116,7 @@ Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { // Loop limit is exact with stride == 1. And loop may already have exact limit. return cl->limit(); } - Node *limit = NULL; + Node *limit = nullptr; #ifdef ASSERT BoolTest::mask bt = cl->loopexit()->test_trip(); assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); @@ -2139,7 +2139,7 @@ Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride()); register_new_node(limit, cl->in(LoopNode::EntryControl)); } - assert(limit != NULL, "sanity"); + assert(limit != nullptr, "sanity"); return limit; } @@ -2155,8 +2155,8 @@ Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { #ifdef ASSERT void LoopNode::verify_strip_mined(int expect_skeleton) const { - const OuterStripMinedLoopNode* outer = NULL; - const CountedLoopNode* inner = NULL; + const OuterStripMinedLoopNode* outer = nullptr; + const CountedLoopNode* inner = nullptr; if (is_strip_mined()) { if (!is_valid_counted_loop(T_INT)) { return; // Skip malformed counted loop @@ -2170,8 +2170,8 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const { assert(inner->is_valid_counted_loop(T_INT) && inner->is_strip_mined(), "OuterStripMinedLoop should have been removed"); assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined"); } - if (inner != NULL || outer != NULL) { - assert(inner != NULL && outer != NULL, "missing loop in strip mined nest"); + if (inner != nullptr || outer != nullptr) { + assert(inner != nullptr && outer != nullptr, "missing loop in strip mined nest"); Node* outer_tail = outer->in(LoopNode::LoopBackControl); Node* outer_le = outer_tail->in(0); assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If"); @@ -2306,19 +2306,19 @@ Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (phase->type(in(Init)) == Type::TOP || phase->type(in(Limit)) == Type::TOP || phase->type(in(Stride)) == Type::TOP) - return NULL; // Dead + return nullptr; // Dead int stride_con = phase->type(in(Stride))->is_int()->get_con(); if (stride_con == 1) - return NULL; // Identity + return nullptr; // Identity if (in(Init)->is_Con() && in(Limit)->is_Con()) - return NULL; // Value + return nullptr; // Value // Delay following optimizations until all loop optimizations // done to keep Ideal graph simple. if (!can_reshape || !phase->C->post_loop_opts_phase()) { - return NULL; + return nullptr; } const TypeInt* init_t = phase->type(in(Init) )->is_int(); @@ -2380,7 +2380,7 @@ Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { return new AddINode(span_int, in(Init)); // exact limit } - return NULL; // No progress + return nullptr; // No progress } //------------------------------Identity--------------------------------------- @@ -2396,15 +2396,15 @@ Node* LoopLimitNode::Identity(PhaseGVN* phase) { //----------------------match_incr_with_optional_truncation-------------------- // Match increment with optional truncation: // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 -// Return NULL for failure. Success returns the increment node. +// Return null for failure. Success returns the increment node. Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInteger** trunc_type, BasicType bt) { // Quick cutouts: - if (expr == NULL || expr->req() != 3) return NULL; + if (expr == nullptr || expr->req() != 3) return nullptr; - Node *t1 = NULL; - Node *t2 = NULL; + Node *t1 = nullptr; + Node *t2 = nullptr; Node* n1 = expr; int n1op = n1->Opcode(); const TypeInteger* trunc_t = TypeInteger::bottom(bt); @@ -2420,7 +2420,7 @@ Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** tr n1op = n1->Opcode(); trunc_t = TypeInt::CHAR; } else if (n1op == Op_RShiftI && - n1->in(1) != NULL && + n1->in(1) != nullptr && n1->in(1)->Opcode() == Op_LShiftI && n1->in(2) == n1->in(1)->in(2) && n1->in(2)->is_Con()) { @@ -2449,11 +2449,11 @@ Node* CountedLoopNode::match_incr_with_optional_truncation(Node* expr, Node** tr } // failed - return NULL; + return nullptr; } LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) { - if (is_strip_mined() && in(EntryControl) != NULL && in(EntryControl)->is_OuterStripMinedLoop()) { + if (is_strip_mined() && in(EntryControl) != nullptr && in(EntryControl)->is_OuterStripMinedLoop()) { verify_strip_mined(expect_skeleton); return in(EntryControl)->as_Loop(); } @@ -2463,76 +2463,76 @@ LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) { OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const { assert(is_strip_mined(), "not a strip mined loop"); Node* c = in(EntryControl); - if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) { - return NULL; + if (c == nullptr || c->is_top() || !c->is_OuterStripMinedLoop()) { + return nullptr; } return c->as_OuterStripMinedLoop(); } IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const { Node* c = in(LoopBackControl); - if (c == NULL || c->is_top()) { - return NULL; + if (c == nullptr || c->is_top()) { + return nullptr; } return c->as_IfTrue(); } IfTrueNode* CountedLoopNode::outer_loop_tail() const { LoopNode* l = outer_loop(); - if (l == NULL) { - return NULL; + if (l == nullptr) { + return nullptr; } return l->outer_loop_tail(); } OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const { IfTrueNode* proj = outer_loop_tail(); - if (proj == NULL) { - return NULL; + if (proj == nullptr) { + return nullptr; } Node* c = proj->in(0); - if (c == NULL || c->is_top() || c->outcnt() != 2) { - return NULL; + if (c == nullptr || c->is_top() || c->outcnt() != 2) { + return nullptr; } return c->as_OuterStripMinedLoopEnd(); } OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const { LoopNode* l = outer_loop(); - if (l == NULL) { - return NULL; + if (l == nullptr) { + return nullptr; } return l->outer_loop_end(); } IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const { IfNode* le = outer_loop_end(); - if (le == NULL) { - return NULL; + if (le == nullptr) { + return nullptr; } Node* c = le->proj_out_or_null(false); - if (c == NULL) { - return NULL; + if (c == nullptr) { + return nullptr; } return c->as_IfFalse(); } IfFalseNode* CountedLoopNode::outer_loop_exit() const { LoopNode* l = outer_loop(); - if (l == NULL) { - return NULL; + if (l == nullptr) { + return nullptr; } return l->outer_loop_exit(); } SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const { IfNode* le = outer_loop_end(); - if (le == NULL) { - return NULL; + if (le == nullptr) { + return nullptr; } Node* c = le->in(0); - if (c == NULL || c->is_top()) { - return NULL; + if (c == nullptr || c->is_top()) { + return nullptr; } assert(c->Opcode() == Op_SafePoint, "broken outer loop"); return c->as_SafePoint(); @@ -2540,16 +2540,16 @@ SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const { SafePointNode* CountedLoopNode::outer_safepoint() const { LoopNode* l = outer_loop(); - if (l == NULL) { - return NULL; + if (l == nullptr) { + return nullptr; } return l->outer_safepoint(); } Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) { - while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0) != NULL && ctrl->in(0)->is_If() && + while (ctrl != nullptr && ctrl->is_Proj() && ctrl->in(0) != nullptr && ctrl->in(0)->is_If() && !is_zero_trip_guard_if(ctrl->in(0)->as_If()) && - (ctrl->in(0)->as_If()->proj_out_or_null(1-ctrl->as_Proj()->_con) == NULL || + (ctrl->in(0)->as_If()->proj_out_or_null(1-ctrl->as_Proj()->_con) == nullptr || (ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 && ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt))) { ctrl = ctrl->in(0)->in(0); @@ -2559,16 +2559,16 @@ Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) { } bool CountedLoopNode::is_zero_trip_guard_if(const IfNode* iff) { - if (iff->in(1) == NULL || !iff->in(1)->is_Bool()) { + if (iff->in(1) == nullptr || !iff->in(1)->is_Bool()) { return false; } - if (iff->in(1)->in(1) == NULL || iff->in(1)->in(1)->Opcode() != Op_CmpI) { + if (iff->in(1)->in(1) == nullptr || iff->in(1)->in(1)->Opcode() != Op_CmpI) { return false; } - if (iff->in(1)->in(1)->in(1) != NULL && iff->in(1)->in(1)->in(1)->Opcode() == Op_OpaqueZeroTripGuard) { + if (iff->in(1)->in(1)->in(1) != nullptr && iff->in(1)->in(1)->in(1)->Opcode() == Op_OpaqueZeroTripGuard) { return true; } - if (iff->in(1)->in(1)->in(2) != NULL && iff->in(1)->in(1)->in(2)->Opcode() == Op_OpaqueZeroTripGuard) { + if (iff->in(1)->in(1)->in(2) != nullptr && iff->in(1)->in(1)->in(2)->Opcode() == Op_OpaqueZeroTripGuard) { return true; } return false; @@ -2588,7 +2588,7 @@ Node* CountedLoopNode::skip_predicates() { int CountedLoopNode::stride_con() const { CountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->stride_con() : 0; + return cle != nullptr ? cle->stride_con() : 0; } BaseCountedLoopNode* BaseCountedLoopNode::make(Node* entry, Node* backedge, BasicType bt) { @@ -2621,21 +2621,21 @@ void OuterStripMinedLoopNode::fix_sunk_stores(CountedLoopEndNode* inner_cle, Loo } Node* last = u; for (;;) { - Node* next = NULL; + Node* next = nullptr; for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) { Node* uu = last->fast_out(j); if (uu->is_Store() && uu->in(0) == cle_out) { - assert(next == NULL, "only one in the outer loop"); + assert(next == nullptr, "only one in the outer loop"); next = uu; assert(igvn->C->get_alias_index(next->adr_type()) == alias_idx, ""); } } - if (next == NULL) { + if (next == nullptr) { break; } last = next; } - Node* phi = NULL; + Node* phi = nullptr; for (DUIterator_Fast jmax, j = inner_cl->fast_outs(jmax); j < jmax; j++) { Node* uu = inner_cl->fast_out(j); if (uu->is_Phi()) { @@ -2645,7 +2645,7 @@ void OuterStripMinedLoopNode::fix_sunk_stores(CountedLoopEndNode* inner_cle, Loo } if (be == last || be == first->in(MemNode::Memory)) { assert(igvn->C->get_alias_index(uu->adr_type()) == alias_idx || igvn->C->get_alias_index(uu->adr_type()) == Compile::AliasIdxBot, "unexpected alias"); - assert(phi == NULL, "only one phi"); + assert(phi == nullptr, "only one phi"); phi = uu; } } @@ -2682,7 +2682,7 @@ void OuterStripMinedLoopNode::fix_sunk_stores(CountedLoopEndNode* inner_cle, Loo } } #endif - if (phi == NULL) { + if (phi == nullptr) { // If an entire chains was sunk, the // inner loop has no phi for that memory // slice, create one for the outer loop @@ -2728,11 +2728,11 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { return; } if (LoopStripMiningIter == 1) { - transform_to_counted_loop(igvn, NULL); + transform_to_counted_loop(igvn, nullptr); return; } Node* inner_iv_phi = inner_cl->phi(); - if (inner_iv_phi == NULL) { + if (inner_iv_phi == nullptr) { IfNode* outer_le = outer_loop_end(); Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); igvn->replace_node(outer_le, iff); @@ -2800,18 +2800,18 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { for (uint next = 0; next < backedge_nodes.size(); next++) { Node *n = old_new[backedge_nodes.at(next)->_idx]; for (uint i = 1; i < n->req(); i++) { - if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) { + if (n->in(i) != nullptr && old_new[n->in(i)->_idx] != nullptr) { n->set_req(i, old_new[n->in(i)->_idx]); } } - if (n->in(0) != NULL && n->in(0) == cle_tail) { + if (n->in(0) != nullptr && n->in(0) == cle_tail) { n->set_req(0, le_tail); } igvn->register_new_node_with_optimizer(n); } } - Node* iv_phi = NULL; + Node* iv_phi = nullptr; // Make a clone of each phi in the inner loop // for the outer loop for (uint i = 0; i < inner_cl->outcnt(); i++) { @@ -2821,7 +2821,7 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { Node* phi = u->clone(); phi->set_req(0, this); Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx]; - if (be != NULL) { + if (be != nullptr) { phi->set_req(LoopNode::LoopBackControl, be); } phi = igvn->transform(phi); @@ -2832,14 +2832,14 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { } } - if (iv_phi != NULL) { + if (iv_phi != nullptr) { // Now adjust the inner loop's exit condition Node* limit = inner_cl->limit(); // If limit < init for stride > 0 (or limit > init for stride < 0), // the loop body is run only once. Given limit - init (init - limit resp.) // would be negative, the unsigned comparison below would cause // the loop body to be run for LoopStripMiningIter. - Node* max = NULL; + Node* max = nullptr; if (stride > 0) { max = MaxNode::max_diff_with_zero(limit, iv_phi, TypeInt::INT, *igvn); } else { @@ -2853,7 +2853,7 @@ void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { // unsigned_min(max(limit - iv_phi, 0), scaled_iters) if stride > 0 // unsigned_min(max(iv_phi - limit, 0), scaled_iters) if stride < 0 - Node* new_limit = NULL; + Node* new_limit = nullptr; if (stride > 0) { new_limit = igvn->transform(new AddINode(min, iv_phi)); } else { @@ -2891,14 +2891,14 @@ void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, Phas // make counted loop exit test always fail ConINode* zero = igvn->intcon(0); - if (iloop != NULL) { + if (iloop != nullptr) { iloop->set_ctrl(zero, igvn->C->root()); } igvn->replace_input_of(cle, 1, zero); // replace outer loop end with CountedLoopEndNode with formers' CLE's exit test Node* new_end = new CountedLoopEndNode(outer_le->in(0), inner_test, cle->_prob, cle->_fcnt); register_control(new_end, inner_cl, outer_le->in(0), igvn, iloop); - if (iloop == NULL) { + if (iloop == nullptr) { igvn->replace_node(outer_le, new_end); } else { iloop->lazy_replace(outer_le, new_end); @@ -2906,14 +2906,14 @@ void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, Phas // the backedge of the inner loop must be rewired to the new loop end Node* backedge = cle->proj_out(true); igvn->replace_input_of(backedge, 0, new_end); - if (iloop != NULL) { + if (iloop != nullptr) { iloop->set_idom(backedge, new_end, iloop->dom_depth(new_end) + 1); } // make the outer loop go away igvn->replace_input_of(in(LoopBackControl), 0, igvn->C->top()); igvn->replace_input_of(this, LoopBackControl, igvn->C->top()); inner_cl->clear_strip_mined(); - if (iloop != NULL) { + if (iloop != nullptr) { Unique_Node_List wq; wq.push(safepoint); @@ -2924,7 +2924,7 @@ void OuterStripMinedLoopNode::transform_to_counted_loop(PhaseIterGVN* igvn, Phas Node* n = wq.at(i); for (uint j = 0; j < n->req(); ++j) { Node* in = n->in(j); - if (in == NULL || in->is_CFG()) { + if (in == nullptr || in->is_CFG()) { continue; } if (iloop->get_loop(iloop->get_ctrl(in)) != outer_loop_ilt) { @@ -2953,7 +2953,7 @@ void OuterStripMinedLoopNode::remove_outer_loop_and_safepoint(PhaseIterGVN* igvn } Node* OuterStripMinedLoopNode::register_new_node(Node* node, LoopNode* ctrl, PhaseIterGVN* igvn, PhaseIdealLoop* iloop) { - if (iloop == NULL) { + if (iloop == nullptr) { return igvn->transform(node); } iloop->register_new_node(node, ctrl); @@ -2962,7 +2962,7 @@ Node* OuterStripMinedLoopNode::register_new_node(Node* node, LoopNode* ctrl, Pha Node* OuterStripMinedLoopNode::register_control(Node* node, Node* loop, Node* idom, PhaseIterGVN* igvn, PhaseIdealLoop* iloop) { - if (iloop == NULL) { + if (iloop == nullptr) { return igvn->transform(node); } iloop->register_control(node, iloop->get_loop(loop), idom); @@ -2986,10 +2986,10 @@ bool OuterStripMinedLoopEndNode::is_expanded(PhaseGVN *phase) const { // The outer strip mined loop head only has Phi uses after expansion if (phase->is_IterGVN()) { Node* backedge = proj_out_or_null(true); - if (backedge != NULL) { + if (backedge != nullptr) { Node* head = backedge->unique_ctrl_out_or_null(); - if (head != NULL && head->is_OuterStripMinedLoop()) { - if (head->find_out_with(Op_Phi) != NULL) { + if (head != nullptr && head->is_OuterStripMinedLoop()) { + if (head->find_out_with(Op_Phi) != nullptr) { return true; } } @@ -3001,7 +3001,7 @@ bool OuterStripMinedLoopEndNode::is_expanded(PhaseGVN *phase) const { Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; - return NULL; + return nullptr; } //------------------------------filtered_type-------------------------------- @@ -3022,22 +3022,22 @@ Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { // const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { assert(n && n->bottom_type()->is_int(), "must be int"); - const TypeInt* filtered_t = NULL; + const TypeInt* filtered_t = nullptr; if (!n->is_Phi()) { - assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control"); + assert(n_ctrl != nullptr || n_ctrl == C->top(), "valid control"); filtered_t = filtered_type_from_dominators(n, n_ctrl); } else { Node* phi = n->as_Phi(); Node* region = phi->in(0); - assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region"); + assert(n_ctrl == nullptr || n_ctrl == region, "ctrl parameter must be region"); if (region && region != C->top()) { for (uint i = 1; i < phi->req(); i++) { Node* val = phi->in(i); Node* use_c = region->in(i); const TypeInt* val_t = filtered_type_from_dominators(val, use_c); - if (val_t != NULL) { - if (filtered_t == NULL) { + if (val_t != nullptr) { + if (filtered_t == nullptr) { filtered_t = val_t; } else { filtered_t = filtered_t->meet(val_t)->is_int(); @@ -3047,7 +3047,7 @@ const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { } } const TypeInt* n_t = _igvn.type(n)->is_int(); - if (filtered_t != NULL) { + if (filtered_t != nullptr) { n_t = n_t->join(filtered_t)->is_int(); } return n_t; @@ -3061,7 +3061,7 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u return val->bottom_type()->is_int(); } uint if_limit = 10; // Max number of dominating if's visited - const TypeInt* rtn_t = NULL; + const TypeInt* rtn_t = nullptr; if (use_ctrl && use_ctrl != C->top()) { Node* val_ctrl = get_ctrl(val); @@ -3072,8 +3072,8 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) { if_cnt++; const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred); - if (if_t != NULL) { - if (rtn_t == NULL) { + if (if_t != nullptr) { + if (rtn_t == nullptr) { rtn_t = if_t; } else { rtn_t = rtn_t->join(if_t)->is_int(); @@ -3081,7 +3081,7 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u } } pred = idom(pred); - if (pred == NULL || pred == C->top()) { + if (pred == nullptr || pred == C->top()) { break; } // Stop if going beyond definition block of val @@ -3098,7 +3098,7 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u // Dump special per-node info #ifndef PRODUCT void CountedLoopEndNode::dump_spec(outputStream *st) const { - if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) { + if( in(TestValue) != nullptr && in(TestValue)->is_Bool() ) { BoolTest bt( test_trip()); // Added this for g++. st->print("["); @@ -3284,7 +3284,7 @@ static float estimate_path_freq( Node *n ) { ciMethodData* methodData = jvms->method()->method_data(); if (!methodData->is_mature()) return 0.0f; // No call-site data ciProfileData* data = methodData->bci_to_data(jvms->bci()); - if ((data == NULL) || !data->is_CounterData()) { + if ((data == nullptr) || !data->is_CounterData()) { // no call profile available, try call's control input n = n->in(0); continue; @@ -3339,7 +3339,7 @@ void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { // them all except optionally hot_idx. PhaseIterGVN &igvn = phase->_igvn; - Node *hot_tail = NULL; + Node *hot_tail = nullptr; // Make a Region for the merge point Node *r = new RegionNode(1); for( i = 2; i < _head->req(); i++ ) { @@ -3359,7 +3359,7 @@ void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { if( out->is_Phi() ) { PhiNode* n = out->as_Phi(); igvn.hash_delete(n); // Delete from hash before hacking edges - Node *hot_phi = NULL; + Node *hot_phi = nullptr; Node *phi = new PhiNode(r, n->type(), n->adr_type()); // Check all inputs for the ones to peel out uint j = 1; @@ -3406,7 +3406,7 @@ void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { while( *cp ) cp = &(*cp)->_next; // Find end of child list *cp = ilt->_next; // Hang next list at end of child list *pilt = ilt->_child; // Move child up to replace ilt - ilt->_head = NULL; // Flag as a loop UNIONED into parent + ilt->_head = nullptr; // Flag as a loop UNIONED into parent ilt = ilt->_child; // Repeat using new ilt continue; // do not advance over ilt->_child } @@ -3520,7 +3520,7 @@ void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) // Terminate this path } else if (n->Opcode() == Op_SafePoint) { if (_phase->get_loop(n) != this) { - if (_required_safept == NULL) _required_safept = new Node_List(); + if (_required_safept == nullptr) _required_safept = new Node_List(); _required_safept->push(n); // save the one closest to the tail } // Terminate this path @@ -3598,10 +3598,10 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { if (_child) _child->check_safepts(visited, stack); if (_next) _next ->check_safepts(visited, stack); - if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) { - bool has_call = false; // call on dom-path - bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth - Node* nonlocal_ncsfpt = NULL; // ncsfpt on dom-path at a deeper depth + if (!_head->is_CountedLoop() && !_has_sfpt && _parent != nullptr && !_irreducible) { + bool has_call = false; // call on dom-path + bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth + Node* nonlocal_ncsfpt = nullptr; // ncsfpt on dom-path at a deeper depth // Scan the dom-path nodes from tail to head for (Node* n = tail(); n != _head; n = _phase->idom(n)) { if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { @@ -3613,7 +3613,7 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { has_local_ncsfpt = true; break; } - if (nonlocal_ncsfpt == NULL) { + if (nonlocal_ncsfpt == nullptr) { nonlocal_ncsfpt = n; // save the one closest to the tail } } else { @@ -3652,9 +3652,9 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { } // Record safept's that this loop needs preserved when an // inner loop attempts to delete it's safepoints. - if (_child != NULL && !has_call && !has_local_ncsfpt) { - if (nonlocal_ncsfpt != NULL) { - if (_required_safept == NULL) _required_safept = new Node_List(); + if (_child != nullptr && !has_call && !has_local_ncsfpt) { + if (nonlocal_ncsfpt != nullptr) { + if (_required_safept == nullptr) _required_safept = new Node_List(); _required_safept->push(nonlocal_ncsfpt); } else { // Failed to find a suitable safept on the dom-path. Now use @@ -3670,9 +3670,9 @@ void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) { assert(sfpt->Opcode() == Op_SafePoint, ""); IdealLoopTree* lp = get_loop(sfpt)->_parent; - while (lp != NULL) { + while (lp != nullptr) { Node_List* sfpts = lp->_required_safept; - if (sfpts != NULL) { + if (sfpts != nullptr) { for (uint i = 0; i < sfpts->size(); i++) { if (sfpt == sfpts->at(i)) return false; @@ -3692,7 +3692,7 @@ void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { return; // skip malformed counted loop } Node *incr = cl->incr(); - if (incr == NULL) { + if (incr == nullptr) { return; // Dead loop? } Node *init = cl->init_trip(); @@ -3784,7 +3784,7 @@ void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { } void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) { - Node* keep = NULL; + Node* keep = nullptr; if (keep_one) { // Look for a safepoint on the idom-path. for (Node* i = tail(); i != _head; i = phase->idom(i)) { @@ -3798,12 +3798,12 @@ void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) { // Don't remove any safepoints if it is requested to keep a single safepoint and // no safepoint was found on idom-path. It is not safe to remove any safepoint // in this case since there's no safepoint dominating all paths in the loop body. - bool prune = !keep_one || keep != NULL; + bool prune = !keep_one || keep != nullptr; // Delete other safepoints in this loop. Node_List* sfpts = _safepts; - if (prune && sfpts != NULL) { - assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint"); + if (prune && sfpts != nullptr) { + assert(keep == nullptr || keep->Opcode() == Op_SafePoint, "not safepoint"); for (uint i = 0; i < sfpts->size(); i++) { Node* n = sfpts->at(i); assert(phase->get_loop(n) == this, ""); @@ -3843,7 +3843,7 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { remove_safepoints(phase, true); } else { assert(!_head->is_Loop() || !_head->as_Loop()->is_loop_nest_inner_loop(), "transformation to counted loop should not fail"); - if (_parent != NULL && !_irreducible) { + if (_parent != nullptr && !_irreducible) { // Not a counted loop. Keep one safepoint. bool keep_one_sfpt = true; remove_safepoints(phase, keep_one_sfpt); @@ -3852,7 +3852,7 @@ void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { // Recursively assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?"); - assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops"); + assert(loop->_child != this || (loop->_child->_child == nullptr && loop->_child->_next == nullptr), "would miss some loops"); if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase); if (loop->_next) loop->_next ->counted_loop(phase); } @@ -3918,14 +3918,14 @@ uint IdealLoopTree::est_loop_flow_merge_sz() const { for (uint k = 0; k < outcnt; k++) { Node* out = node->raw_out(k); - if (out == NULL) continue; + if (out == nullptr) continue; if (out->is_CFG()) { if (!is_member(_phase->get_loop(out))) { ctrl_edge_out_cnt++; } } else if (_phase->has_ctrl(out)) { Node* ctrl = _phase->get_ctrl(out); - assert(ctrl != NULL, "must be"); + assert(ctrl != nullptr, "must be"); assert(ctrl->is_CFG(), "must be"); if (!is_member(_phase->get_loop(ctrl))) { data_edge_out_cnt++; @@ -3951,20 +3951,20 @@ void IdealLoopTree::dump_head() { if (_irreducible) tty->print(" IRREDUCIBLE"); Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL ) { + if (predicate != nullptr ) { tty->print(" limit_check"); entry = PhaseIdealLoop::skip_loop_predicates(entry); } if (UseProfiledLoopPredicate) { predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (predicate != NULL) { + if (predicate != nullptr) { tty->print(" profile_predicated"); entry = PhaseIdealLoop::skip_loop_predicates(entry); } } if (UseLoopPredicate) { predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); - if (predicate != NULL) { + if (predicate != nullptr) { tty->print(" predicated"); } } @@ -3973,12 +3973,12 @@ void IdealLoopTree::dump_head() { tty->print(" counted"); Node* init_n = cl->init_trip(); - if (init_n != NULL && init_n->is_Con()) + if (init_n != nullptr && init_n->is_Con()) tty->print(" [%d,", cl->init_trip()->get_int()); else tty->print(" [int,"); Node* limit_n = cl->limit(); - if (limit_n != NULL && limit_n->is_Con()) + if (limit_n != nullptr && limit_n->is_Con()) tty->print("%d),", cl->limit()->get_int()); else tty->print("int),"); @@ -3999,10 +3999,10 @@ void IdealLoopTree::dump_head() { if (_has_call) tty->print(" has_call"); if (_has_sfpt) tty->print(" has_sfpt"); if (_rce_candidate) tty->print(" rce"); - if (_safepts != NULL && _safepts->size() > 0) { + if (_safepts != nullptr && _safepts->size() > 0) { tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }"); } - if (_required_safept != NULL && _required_safept->size() > 0) { + if (_required_safept != nullptr && _required_safept->size() > 0) { tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }"); } if (Verbose) { @@ -4026,14 +4026,14 @@ void IdealLoopTree::dump() { static void log_loop_tree_helper(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) { if (loop == root) { - if (loop->_child != NULL) { + if (loop->_child != nullptr) { log->begin_head("loop_tree"); log->end_head(); log_loop_tree_helper(root, loop->_child, log); log->tail("loop_tree"); - assert(loop->_next == NULL, "what?"); + assert(loop->_next == nullptr, "what?"); } - } else if (loop != NULL) { + } else if (loop != nullptr) { Node* head = loop->_head; log->begin_head("loop"); log->print(" idx='%d' ", head->_idx); @@ -4055,7 +4055,7 @@ static void log_loop_tree_helper(IdealLoopTree* root, IdealLoopTree* loop, Compi } void PhaseIdealLoop::log_loop_tree() { - if (C->log() != NULL) { + if (C->log() != nullptr) { log_loop_tree_helper(_ltree_root, _ltree_root, C->log()); } } @@ -4076,14 +4076,14 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(IdealLoopTree* loop, Node* entry = lpn->in(LoopNode::EntryControl); Node* predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one entry = skip_loop_predicates(entry); } if (UseProfiledLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one get_skeleton_predicates(entry, useful_predicates, true); entry = skip_loop_predicates(entry); @@ -4092,7 +4092,7 @@ void PhaseIdealLoop::collect_potentially_useful_predicates(IdealLoopTree* loop, if (UseLoopPredicate) { predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); - if (predicate != NULL) { // right pattern that can be used by loop predication + if (predicate != nullptr) { // right pattern that can be used by loop predication useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one get_skeleton_predicates(entry, useful_predicates, true); } @@ -4227,7 +4227,7 @@ bool PhaseIdealLoop::only_has_infinite_loops() { ResourceMark rm; Unique_Node_List worklist; // start traversal at all loop heads of first-level loops - for (IdealLoopTree* l = _ltree_root->_child; l != NULL; l = l->_next) { + for (IdealLoopTree* l = _ltree_root->_child; l != nullptr; l = l->_next) { Node* head = l->_head; assert(head->is_Region(), ""); worklist.push(head); @@ -4269,7 +4269,7 @@ void PhaseIdealLoop::build_and_optimize() { VectorSet visited; // Pre-grow the mapping from Nodes to IdealLoopTrees. - _nodes.map(C->unique(), NULL); + _nodes.map(C->unique(), nullptr); memset(_nodes.adr(), 0, wordSize * C->unique()); // Pre-build the top-level outermost loop tree entry @@ -4280,9 +4280,9 @@ void PhaseIdealLoop::build_and_optimize() { // Initialize Dominators. // Checked in clone_loop_predicate() during beautify_loops(). _idom_size = 0; - _idom = NULL; - _dom_depth = NULL; - _dom_stk = NULL; + _idom = nullptr; + _dom_depth = nullptr; + _dom_stk = nullptr; // Empty pre-order array allocate_preorders(); @@ -4301,7 +4301,7 @@ void PhaseIdealLoop::build_and_optimize() { // has_loops() flag but adds NeverBranch nodes so the next loop opts // verification pass finds a non empty loop tree. When the back edge // is an exception edge, parsing doesn't set has_loops(). - assert(_ltree_root->_child == NULL || C->has_loops() || only_has_infinite_loops() || C->has_exception_backedge(), "parsing found no loops but there are some"); + assert(_ltree_root->_child == nullptr || C->has_loops() || only_has_infinite_loops() || C->has_exception_backedge(), "parsing found no loops but there are some"); // No loops after all if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false); @@ -4334,7 +4334,7 @@ void PhaseIdealLoop::build_and_optimize() { C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); if( _ltree_root->_child->beautify_loops( this ) ) { // Re-build loop tree! - _ltree_root->_child = NULL; + _ltree_root->_child = nullptr; _nodes.clear(); reallocate_preorders(); build_loop_tree(); @@ -4349,13 +4349,13 @@ void PhaseIdealLoop::build_and_optimize() { } } - // Build Dominators for elision of NULL checks & loop finding. + // Build Dominators for elision of null checks & loop finding. // Since nodes do not have a slot for immediate dominator, make // a persistent side array for that info indexed on node->_idx. _idom_size = C->unique(); _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); - _dom_stk = NULL; // Allocated on demand in recompute_dom_depth + _dom_stk = nullptr; // Allocated on demand in recompute_dom_depth memset( _dom_depth, 0, _idom_size * sizeof(uint) ); Dominators(); @@ -4655,7 +4655,7 @@ void PhaseIdealLoop::verify() const { verify_compare(C->root(), &loop_verify, visited); assert(fail == 0, "verify loops failed"); // Verify loop structure is the same - _ltree_root->verify_tree(loop_verify._ltree_root, NULL); + _ltree_root->verify_tree(loop_verify._ltree_root, nullptr); // Reset major-progress. It was cleared by creating a verify version of // PhaseIdealLoop. C->restore_major_progress(old_progress); @@ -4814,12 +4814,12 @@ void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent } - if (_child != NULL) _child->verify_tree(loop->_child, this); - if (_next != NULL) _next ->verify_tree(loop->_next, parent); + if (_child != nullptr) _child->verify_tree(loop->_child, this); + if (_next != nullptr) _next ->verify_tree(loop->_next, parent); // Innermost loops need to verify loop bodies, // but only if no 'major_progress' int fail = 0; - if (!Compile::current()->major_progress() && _child == NULL) { + if (!Compile::current()->major_progress() && _child == nullptr) { for( uint i = 0; i < _body.size(); i++ ) { Node *n = _body.at(i); if (n->outcnt() == 0) continue; // Ignore dead @@ -4887,17 +4887,17 @@ void PhaseIdealLoop::recompute_dom_depth() { uint i; // Initialize depth to "no depth yet" and realize all lazy updates for (i = 0; i < _idom_size; i++) { - // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitialized). - if (_dom_depth[i] > 0 && _idom[i] != NULL) { + // Only indices with a _dom_depth has a Node* or null (otherwise uninitialized). + if (_dom_depth[i] > 0 && _idom[i] != nullptr) { _dom_depth[i] = no_depth_marker; // heal _idom if it has a fwd mapping in _nodes - if (_idom[i]->in(0) == NULL) { + if (_idom[i]->in(0) == nullptr) { idom(i); } } } - if (_dom_stk == NULL) { + if (_dom_stk == nullptr) { uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size. if (init_size < 10) init_size = 10; _dom_stk = new GrowableArray(init_size); @@ -4962,7 +4962,7 @@ IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermo //------------------------------build_loop_tree-------------------------------- // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit -// bits. The _nodes[] array is mapped by Node index and holds a NULL for +// bits. The _nodes[] array is mapped by Node index and holds a null for // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the // tightest enclosing IdealLoopTree for post-walked. // @@ -5063,7 +5063,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { // Pre-walked but not post-walked nodes need a pre_order number. // Tightest enclosing loop for this Node - IdealLoopTree *innermost = NULL; + IdealLoopTree *innermost = nullptr; // For all children, see if any edge is a backedge. If so, make a loop // for it. Then find the tightest enclosing loop for the self Node. @@ -5120,13 +5120,13 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { _igvn.register_new_node_with_optimizer(if_t); set_loop(if_t, l); - Node* cfg = NULL; // Find the One True Control User of m + Node* cfg = nullptr; // Find the One True Control User of m for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { Node* x = m->fast_out(j); if (x->is_CFG() && x != m && x != iff) { cfg = x; break; } } - assert(cfg != NULL, "must find the control user of m"); + assert(cfg != nullptr, "must find the control user of m"); uint k = 0; // Probably cfg->in(0) while( cfg->in(k) != m ) k++; // But check in case cfg is a Region _igvn.replace_input_of(cfg, k, if_t); // Now point to NeverBranch @@ -5164,7 +5164,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { DEBUG_ONLY(head->verify_can_be_irreducible_entry();) l = l->_parent; // Check for bad CFG here to prevent crash, and bailout of compile - if (l == NULL) { + if (l == nullptr) { #ifndef PRODUCT if (TraceLoopOpts) { tty->print_cr("bailout: unhandled CFG: infinite irreducible loop"); @@ -5233,7 +5233,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { innermost->_has_call = 1; // = true } else if (n->Opcode() == Op_SafePoint) { // Record all safepoints in this loop. - if (innermost->_safepts == NULL) innermost->_safepts = new Node_List(); + if (innermost->_safepts == nullptr) innermost->_safepts = new Node_List(); innermost->_safepts->push(n); } } @@ -5336,7 +5336,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, // into a single loop. This makes the members of the original // loop bodies pointing to dead loops; they need to move up // to the new UNION'd larger loop. I set the _head field of these - // dead loops to NULL and the _parent field points to the owning + // dead loops to null and the _parent field points to the owning // loop. Shades of UNION-FIND algorithm. IdealLoopTree *ilt; while( !(ilt = get_loop(n))->_head ) { @@ -5353,7 +5353,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, is_deleteable_safept(n)) { Node *in = n->in(TypeFunc::Control); lazy_replace(n,in); // Pull safepoint now - if (ilt->_safepts != NULL) { + if (ilt->_safepts != nullptr) { ilt->_safepts->yank(n); } // Carry on with the recursion "as if" we are walking @@ -5375,7 +5375,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, while (i < cnt) { Node *in = n->in(i); ++i; - if (in == NULL) continue; + if (in == nullptr) continue; if (in->pinned() && !in->is_CFG()) set_ctrl(in, in->in(0)); int is_visited = visited.test_set( in->_idx ); @@ -5422,7 +5422,7 @@ void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, //------------------------------dom_lca_internal-------------------------------- // Pair-wise LCA Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const { - if( !n1 ) return n2; // Handle NULL original LCA + if( !n1 ) return n2; // Handle null original LCA assert( n1->is_CFG(), "" ); assert( n2->is_CFG(), "" ); // find LCA of all uses @@ -5467,7 +5467,7 @@ Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const { // IDOMs are correct. Node *PhaseIdealLoop::compute_idom( Node *region ) const { assert( region->is_Region(), "" ); - Node *LCA = NULL; + Node *LCA = nullptr; for( uint i = 1; i < region->req(); i++ ) { if( region->in(i) != C->top() ) LCA = dom_lca( LCA, region->in(i) ); @@ -5499,10 +5499,10 @@ bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { // Compute LCA over list of uses bool had_error = false; - Node *LCA = NULL; + Node *LCA = nullptr; for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) { Node* c = n->fast_out(i); - if (_nodes[c->_idx] == NULL) + if (_nodes[c->_idx] == nullptr) continue; // Skip the occasional dead node if( c->is_Phi() ) { // For Phis, we must land above on the path for( uint j=1; jreq(); j++ ) {// For all inputs @@ -5536,29 +5536,29 @@ Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { // be guaranteed anymore. Node* CountedLoopNode::is_canonical_loop_entry() { if (!is_main_loop() && !is_post_loop()) { - return NULL; + return nullptr; } Node* ctrl = skip_predicates(); - if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { - return NULL; + if (ctrl == nullptr || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { + return nullptr; } Node* iffm = ctrl->in(0); - if (iffm == NULL || iffm->Opcode() != Op_If) { - return NULL; + if (iffm == nullptr || iffm->Opcode() != Op_If) { + return nullptr; } Node* bolzm = iffm->in(1); - if (bolzm == NULL || !bolzm->is_Bool()) { - return NULL; + if (bolzm == nullptr || !bolzm->is_Bool()) { + return nullptr; } Node* cmpzm = bolzm->in(1); - if (cmpzm == NULL || !cmpzm->is_Cmp()) { - return NULL; + if (cmpzm == nullptr || !cmpzm->is_Cmp()) { + return nullptr; } uint input = is_main_loop() ? 2 : 1; - if (input >= cmpzm->req() || cmpzm->in(input) == NULL) { - return NULL; + if (input >= cmpzm->req() || cmpzm->in(input) == nullptr) { + return nullptr; } bool res = cmpzm->in(input)->Opcode() == Op_OpaqueZeroTripGuard; #ifdef ASSERT @@ -5572,13 +5572,13 @@ Node* CountedLoopNode::is_canonical_loop_entry() { } assert(found_opaque == res, "wrong pattern"); #endif - return res ? cmpzm->in(input) : NULL; + return res ? cmpzm->in(input) : nullptr; } //------------------------------get_late_ctrl---------------------------------- // Compute latest legal control. Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { - assert(early != NULL, "early control should not be NULL"); + assert(early != nullptr, "early control should not be null"); Node* LCA = compute_lca_of_uses(n, early); #ifdef ASSERT @@ -5625,8 +5625,8 @@ Node* PhaseIdealLoop::get_late_ctrl_with_anti_dep(LoadNode* n, Node* early, Node } } else { Node* sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0); - assert(sctrl != NULL || !s->is_reachable_from_root(), "must have control"); - if (sctrl != NULL && !sctrl->is_top() && is_dominator(early, sctrl)) { + assert(sctrl != nullptr || !s->is_reachable_from_root(), "must have control"); + if (sctrl != nullptr && !sctrl->is_top() && is_dominator(early, sctrl)) { const TypePtr* adr_type = s->adr_type(); if (s->is_ArrayCopy()) { // Copy to known instance needs destination type to test for aliasing @@ -5783,7 +5783,7 @@ void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, N // Check for dead uses. Aggressively prune such junk. It might be // dead in the global sense, but still have local uses so I cannot // easily call 'remove_dead_node'. - if( _nodes[use->_idx] != NULL || use->is_top() ) { // Not dead? + if( _nodes[use->_idx] != nullptr || use->is_top() ) { // Not dead? // Due to cycles, we might not hit the same fixed point in the verify // pass as we do in the regular pass. Instead, visit such phis as // simple uses of the loop head. @@ -5842,7 +5842,7 @@ void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) { if (nn == n) { return; } - if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) { + if (nn != nullptr && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) { wq.push(nn); } } @@ -5934,18 +5934,18 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { // Compute latest point this Node can go Node *LCA = get_late_ctrl( n, early ); - // LCA is NULL due to uses being dead - if( LCA == NULL ) { + // LCA is null due to uses being dead + if( LCA == nullptr ) { #ifdef ASSERT for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) { - assert( _nodes[n->out(i1)->_idx] == NULL, "all uses must also be dead"); + assert( _nodes[n->out(i1)->_idx] == nullptr, "all uses must also be dead"); } #endif _nodes.map(n->_idx, 0); // This node is useless _deadlist.push(n); return; } - assert(LCA != NULL && !LCA->is_top(), "no dead nodes"); + assert(LCA != nullptr && !LCA->is_top(), "no dead nodes"); Node *legal = LCA; // Walk 'legal' up the IDOM chain Node *least = legal; // Best legal position so far @@ -5975,7 +5975,7 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { break; } CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - if (call == NULL) { + if (call == nullptr) { break; } int req = call->uncommon_trap_request(); @@ -5997,7 +5997,7 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { // which can inhibit range check elimination. if (least != early && !BarrierSet::barrier_set()->barrier_set_c2()->is_gc_specific_loop_opts_pass(_mode)) { Node* ctrl_out = least->unique_ctrl_out_or_null(); - if (ctrl_out != NULL && ctrl_out->is_Loop() && + if (ctrl_out != nullptr && ctrl_out->is_Loop() && least == ctrl_out->in(LoopNode::EntryControl) && (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop())) { Node* least_dom = idom(least); @@ -6043,27 +6043,27 @@ void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* tty->print_cr("%s", msg); tty->print("n: "); n->dump(); tty->print("early(n): "); early->dump(); - if (n->in(0) != NULL && !n->in(0)->is_top() && + if (n->in(0) != nullptr && !n->in(0)->is_top() && n->in(0) != early && !n->in(0)->is_Root()) { tty->print("n->in(0): "); n->in(0)->dump(); } for (uint i = 1; i < n->req(); i++) { Node* in1 = n->in(i); - if (in1 != NULL && in1 != n && !in1->is_top()) { + if (in1 != nullptr && in1 != n && !in1->is_top()) { tty->print("n->in(%d): ", i); in1->dump(); Node* in1_early = get_ctrl(in1); tty->print("early(n->in(%d)): ", i); in1_early->dump(); - if (in1->in(0) != NULL && !in1->in(0)->is_top() && + if (in1->in(0) != nullptr && !in1->in(0)->is_top() && in1->in(0) != in1_early && !in1->in(0)->is_Root()) { tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump(); } for (uint j = 1; j < in1->req(); j++) { Node* in2 = in1->in(j); - if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) { + if (in2 != nullptr && in2 != n && in2 != in1 && !in2->is_top()) { tty->print("n->in(%d)->in(%d): ", i, j); in2->dump(); Node* in2_early = get_ctrl(in2); tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump(); - if (in2->in(0) != NULL && !in2->in(0)->is_top() && + if (in2->in(0) != nullptr && !in2->in(0)->is_top() && in2->in(0) != in2_early && !in2->in(0)->is_Root()) { tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump(); } @@ -6088,7 +6088,7 @@ void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* } else { Node* u1_later = get_ctrl(u1); tty->print("later(n->out(%d)): ", i); u1_later->dump(); - if (u1->in(0) != NULL && !u1->in(0)->is_top() && + if (u1->in(0) != nullptr && !u1->in(0)->is_top() && u1->in(0) != u1_later && !u1->in(0)->is_Root()) { tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump(); } @@ -6100,7 +6100,7 @@ void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* if (!u2->is_CFG()) { Node* u2_later = get_ctrl(u2); tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump(); - if (u2->in(0) != NULL && !u2->in(0)->is_top() && + if (u2->in(0) != nullptr && !u2->in(0)->is_top() && u2->in(0) != u2_later && !u2->in(0)->is_Root()) { tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump(); } @@ -6252,7 +6252,7 @@ void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) co if (m && m->outcnt() > 0) { if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) { tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p", - _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL); + _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : nullptr); } tty->sp(2 * loop->_nest + 1); m->dump(); @@ -6326,19 +6326,19 @@ void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_ // Advance to next loop tree using a preorder, left-to-right traversal. void LoopTreeIterator::next() { assert(!done(), "must not be done."); - if (_curnt->_child != NULL) { + if (_curnt->_child != nullptr) { _curnt = _curnt->_child; - } else if (_curnt->_next != NULL) { + } else if (_curnt->_next != nullptr) { _curnt = _curnt->_next; } else { - while (_curnt != _root && _curnt->_next == NULL) { + while (_curnt != _root && _curnt->_next == nullptr) { _curnt = _curnt->_parent; } if (_curnt == _root) { - _curnt = NULL; + _curnt = nullptr; assert(done(), "must be done."); } else { - assert(_curnt->_next != NULL, "must be more to do"); + assert(_curnt->_next != nullptr, "must be more to do"); _curnt = _curnt->_next; } } diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp index e7afe0c724a..37ae6fb9d05 100644 --- a/src/hotspot/share/opto/loopnode.hpp +++ b/src/hotspot/share/opto/loopnode.hpp @@ -144,9 +144,9 @@ public: virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); virtual int Opcode() const; bool can_be_counted_loop(PhaseTransform* phase) const { - return req() == 3 && in(0) != NULL && - in(1) != NULL && phase->type(in(1)) != Type::TOP && - in(2) != NULL && phase->type(in(2)) != Type::TOP; + return req() == 3 && in(0) != nullptr && + in(1) != nullptr && phase->type(in(1)) != Type::TOP && + in(2) != nullptr && phase->type(in(2)) != Type::TOP; } bool is_valid_counted_loop(BasicType bt) const; #ifndef PRODUCT @@ -155,10 +155,10 @@ public: void verify_strip_mined(int expect_skeleton) const NOT_DEBUG_RETURN; virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; } - virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; } - virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; } - virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; } - virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return NULL; } + virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return nullptr; } + virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return nullptr; } + virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return nullptr; } + virtual SafePointNode* outer_safepoint() const { ShouldNotReachHere(); return nullptr; } }; //------------------------------Counted Loops---------------------------------- @@ -383,12 +383,12 @@ public: init_class_id(Class_BaseCountedLoopEnd); } - Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; } - Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; } - Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; } - Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : NULL; } - Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : NULL; } - bool stride_is_con() const { Node *tmp = stride(); return (tmp != NULL && tmp->is_Con()); } + Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : nullptr; } + Node* incr() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; } + Node* limit() const { Node* tmp = cmp_node(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; } + Node* stride() const { Node* tmp = incr(); return (tmp && tmp->req() == 3) ? tmp->in(2) : nullptr; } + Node* init_trip() const { Node* tmp = phi(); return (tmp && tmp->req() == 3) ? tmp->in(1) : nullptr; } + bool stride_is_con() const { Node *tmp = stride(); return (tmp != nullptr && tmp->is_Con()); } PhiNode* phi() const { Node* tmp = incr(); @@ -398,7 +398,7 @@ public: return phi->as_Phi(); } } - return NULL; + return nullptr; } BaseCountedLoopNode* loopnode() const { @@ -406,15 +406,15 @@ public: // have been optimized out by the IGVN so be cautious with the // pattern matching on the graph PhiNode* iv_phi = phi(); - if (iv_phi == NULL) { - return NULL; + if (iv_phi == nullptr) { + return nullptr; } Node* ln = iv_phi->in(0); if (!ln->is_BaseCountedLoop() || ln->as_BaseCountedLoop()->loopexit_or_null() != this) { - return NULL; + return nullptr; } if (ln->as_BaseCountedLoop()->bt() != bt()) { - return NULL; + return nullptr; } return ln->as_BaseCountedLoop(); } @@ -470,54 +470,54 @@ public: inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit_or_null() const { Node* bctrl = back_control(); - if (bctrl == NULL) return NULL; + if (bctrl == nullptr) return nullptr; Node* lexit = bctrl->in(0); if (!lexit->is_BaseCountedLoopEnd()) { - return NULL; + return nullptr; } BaseCountedLoopEndNode* result = lexit->as_BaseCountedLoopEnd(); if (result->bt() != bt()) { - return NULL; + return nullptr; } return result; } inline BaseCountedLoopEndNode* BaseCountedLoopNode::loopexit() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - assert(cle != NULL, "loopexit is NULL"); + assert(cle != nullptr, "loopexit is null"); return cle; } inline Node* BaseCountedLoopNode::init_trip() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->init_trip() : NULL; + return cle != nullptr ? cle->init_trip() : nullptr; } inline Node* BaseCountedLoopNode::stride() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->stride() : NULL; + return cle != nullptr ? cle->stride() : nullptr; } inline bool BaseCountedLoopNode::stride_is_con() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL && cle->stride_is_con(); + return cle != nullptr && cle->stride_is_con(); } inline Node* BaseCountedLoopNode::limit() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->limit() : NULL; + return cle != nullptr ? cle->limit() : nullptr; } inline Node* BaseCountedLoopNode::incr() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->incr() : NULL; + return cle != nullptr ? cle->incr() : nullptr; } inline Node* BaseCountedLoopNode::phi() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->phi() : NULL; + return cle != nullptr ? cle->phi() : nullptr; } inline jlong BaseCountedLoopNode::stride_con() const { BaseCountedLoopEndNode* cle = loopexit_or_null(); - return cle != NULL ? cle->stride_con() : 0; + return cle != nullptr ? cle->stride_con() : 0; } @@ -630,12 +630,12 @@ public: _local_loop_unroll_limit(0), _local_loop_unroll_factor(0), _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0), _has_range_checks(0), _has_range_checks_computed(0), - _safepts(NULL), - _required_safept(NULL), + _safepts(nullptr), + _required_safept(nullptr), _allow_optimizations(true) { - precond(_head != NULL); - precond(_tail != NULL); + precond(_head != nullptr); + precond(_tail != nullptr); } // Is 'l' a member of 'this'? @@ -761,7 +761,7 @@ public: // are combined with an associative binary. Helper for reassociate_invariants. int find_invariant(Node* n, PhaseIdealLoop *phase); // Return TRUE if "n" is associative. - bool is_associative(Node* n, Node* base=NULL); + bool is_associative(Node* n, Node* base=nullptr); // Return true if n is invariant bool is_invariant(Node* n) const; @@ -769,11 +769,11 @@ public: // Put loop body on igvn work list void record_for_igvn(); - bool is_root() { return _parent == NULL; } + bool is_root() { return _parent == nullptr; } // A proper/reducible loop w/o any (occasional) dead back-edge. bool is_loop() { return !_irreducible && !tail()->is_top(); } bool is_counted() { return is_loop() && _head->is_CountedLoop(); } - bool is_innermost() { return is_loop() && _child == NULL; } + bool is_innermost() { return is_loop() && _child == nullptr; } void remove_main_post_loops(CountedLoopNode *cl, PhaseIdealLoop *phase); @@ -913,8 +913,8 @@ private: // 2) a use is the same as the current LCA passed as 'n1' Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { assert( n->is_CFG(), "" ); - // Fast-path NULL lca - if( lca != NULL && lca != n ) { + // Fast-path null lca + if( lca != nullptr && lca != n ) { assert( lca->is_CFG(), "" ); // find LCA of all uses n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); @@ -925,7 +925,7 @@ private: // Helper function for directing control inputs away from CFG split points. Node *find_non_split_ctrl( Node *ctrl ) const { - if (ctrl != NULL) { + if (ctrl != nullptr) { if (ctrl->is_MultiBranch()) { ctrl = ctrl->in(0); } @@ -969,8 +969,8 @@ public: PhaseIterGVN &igvn() const { return _igvn; } bool has_node( Node* n ) const { - guarantee(n != NULL, "No Node."); - return _nodes[n->_idx] != NULL; + guarantee(n != nullptr, "No Node."); + return _nodes[n->_idx] != nullptr; } // check if transform created new nodes that need _ctrl recorded Node *get_late_ctrl( Node *n, Node *early ); @@ -989,8 +989,8 @@ public: IdealLoopTree* old_loop = get_loop(get_ctrl(n)); IdealLoopTree* new_loop = get_loop(ctrl); if (old_loop != new_loop) { - if (old_loop->_child == NULL) old_loop->_body.yank(n); - if (new_loop->_child == NULL) new_loop->_body.push(n); + if (old_loop->_child == nullptr) old_loop->_body.yank(n); + if (new_loop->_child == nullptr) new_loop->_body.push(n); } set_ctrl(n, ctrl); } @@ -1133,10 +1133,10 @@ public: Node* idom_no_update(uint didx) const { assert(didx < _idom_size, "oob"); Node* n = _idom[didx]; - assert(n != NULL,"Bad immediate dominator info."); - while (n->in(0) == NULL) { // Skip dead CFG nodes + assert(n != nullptr,"Bad immediate dominator info."); + while (n->in(0) == nullptr) { // Skip dead CFG nodes n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); - assert(n != NULL,"Bad immediate dominator info."); + assert(n != nullptr,"Bad immediate dominator info."); } return n; } @@ -1152,7 +1152,7 @@ public: } uint dom_depth(Node* d) const { - guarantee(d != NULL, "Null dominator info."); + guarantee(d != nullptr, "Null dominator info."); guarantee(d->_idx < _idom_size, ""); return _dom_depth[d->_idx]; } @@ -1202,7 +1202,7 @@ public: bool _has_irreducible_loops; // Per-Node transform - virtual Node* transform(Node* n) { return NULL; } + virtual Node* transform(Node* n) { return nullptr; } Node* loop_exit_control(Node* x, IdealLoopTree* loop); Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob); @@ -1249,7 +1249,7 @@ public: // normal "loop-exit" condition. All uses of loop-invariant old-loop values // now come from (one or more) Phis that merge their new-loop equivalents. // Parameter side_by_side_idom: - // When side_by_size_idom is NULL, the dominator tree is constructed for + // When side_by_size_idom is null, the dominator tree is constructed for // the clone loop to dominate the original. Used in construction of // pre-main-post loop sequence. // When nonnull, the clone and original are side-by-side, both are @@ -1264,7 +1264,7 @@ public: // strip mined loop. }; void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, - CloneLoopMode mode, Node* side_by_side_idom = NULL); + CloneLoopMode mode, Node* side_by_side_idom = nullptr); void clone_loop_handle_data_uses(Node* old, Node_List &old_new, IdealLoopTree* loop, IdealLoopTree* companion_loop, Node_List*& split_if_set, Node_List*& split_bool_set, @@ -1319,12 +1319,12 @@ public: bool is_iv(Node* exp, Node* iv, BasicType bt); // Return true if exp is a scaled induction var plus (or minus) constant - bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = NULL, int depth = 0); + bool is_scaled_iv_plus_offset(Node* exp, Node* iv, BasicType bt, jlong* p_scale, Node** p_offset, bool* p_short_scale = nullptr, int depth = 0); bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset) { jlong long_scale; if (is_scaled_iv_plus_offset(exp, iv, T_INT, &long_scale, p_offset)) { int int_scale = checked_cast(long_scale); - if (p_scale != NULL) { + if (p_scale != nullptr) { *p_scale = int_scale; } return true; @@ -1564,7 +1564,7 @@ public: Node *split_thru_phi( Node *n, Node *region, int policy ); // Found an If getting its condition-code input from a Phi in the // same block. Split thru the Region. - void do_split_if(Node *iff, RegionNode** new_false_region = NULL, RegionNode** new_true_region = NULL); + void do_split_if(Node *iff, RegionNode** new_false_region = nullptr, RegionNode** new_true_region = nullptr); // Conversion of fill/copy patterns into intrinsic versions bool do_intrinsify_fill(); @@ -1575,7 +1575,7 @@ public: private: // Return a type based on condition control flow const TypeInt* filtered_type( Node *n, Node* n_ctrl); - const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } + const TypeInt* filtered_type( Node *n ) { return filtered_type(n, nullptr); } // Helpers for filtered type const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); @@ -1778,7 +1778,7 @@ public: _check_at_final(chk == BUDGET_CHECK), _nodes_at_begin(0) { - precond(_phase != NULL); + precond(_phase != nullptr); _nodes_at_begin = _phase->require_nodes_begin(); } @@ -1877,7 +1877,7 @@ class CountedLoopReserveKit { inline Node* IdealLoopTree::tail() { // Handle lazy update of _tail field. - if (_tail->in(0) == NULL) { + if (_tail->in(0) == nullptr) { _tail = _phase->get_ctrl(_tail); } return _tail; @@ -1885,7 +1885,7 @@ inline Node* IdealLoopTree::tail() { inline Node* IdealLoopTree::head() { // Handle lazy update of _head field. - if (_head->in(0) == NULL) { + if (_head->in(0) == nullptr) { _head = _phase->get_ctrl(_head); } return _head; @@ -1907,7 +1907,7 @@ private: public: LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} - bool done() { return _curnt == NULL; } // Finished iterating? + bool done() { return _curnt == nullptr; } // Finished iterating? void next(); // Advance to next loop tree diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp index 8b1576b0a82..d2221a84fa2 100644 --- a/src/hotspot/share/opto/loopopts.cpp +++ b/src/hotspot/share/opto/loopopts.cpp @@ -50,7 +50,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) { // ConvI2L may have type information on it which is unsafe to push up // so disable this for now - return NULL; + return nullptr; } // Splitting range check CastIIs through a loop induction Phi can @@ -58,11 +58,11 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { // induction Phi and prevent optimizations (vectorization) if (n->Opcode() == Op_CastII && region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) { - return NULL; + return nullptr; } if (cannot_split_division(n, region)) { - return NULL; + return nullptr; } int wins = 0; @@ -72,18 +72,18 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { const Type* type = n->bottom_type(); const TypeOopPtr* t_oop = _igvn.type(n)->isa_oopptr(); Node* phi; - if (t_oop != NULL && t_oop->is_known_instance_field()) { + if (t_oop != nullptr && t_oop->is_known_instance_field()) { int iid = t_oop->instance_id(); int index = C->get_alias_index(t_oop); int offset = t_oop->offset(); - phi = new PhiNode(region, type, NULL, iid, index, offset); + phi = new PhiNode(region, type, nullptr, iid, index, offset); } else { phi = PhiNode::make_blank(region, n); } uint old_unique = C->unique(); for (uint i = 1; i < region->req(); i++) { Node* x; - Node* the_clone = NULL; + Node* the_clone = nullptr; if (region->in(i) == C->top()) { x = C->top(); // Dead path? Use a dead data op } else { @@ -148,14 +148,14 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { } } } - if (x != the_clone && the_clone != NULL) + if (x != the_clone && the_clone != nullptr) _igvn.remove_dead_node(the_clone); phi->set_req( i, x ); } // Too few wins? if (wins <= policy) { _igvn.remove_dead_node(phi); - return NULL; + return nullptr; } // Record Phi @@ -176,8 +176,8 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { } // The occasional new node if (x->_idx >= old_unique) { // Found a new, unplaced node? - old_ctrl = NULL; - old_loop = NULL; // Not in any prior loop + old_ctrl = nullptr; + old_loop = nullptr; // Not in any prior loop } else { old_ctrl = get_ctrl(x); old_loop = get_loop(old_ctrl); // Get prior loop @@ -194,7 +194,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) { // for each use outside of this loop. IdealLoopTree *use_loop = get_loop(region); if (!new_loop->is_member(use_loop) && - (old_loop == NULL || !new_loop->is_member(old_loop))) { + (old_loop == nullptr || !new_loop->is_member(old_loop))) { // Take early control, later control will be recalculated // during next iteration of loop optimizations. new_ctrl = get_early_ctrl(x); @@ -236,7 +236,7 @@ bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) co return false; } - assert(n->in(0) == NULL, "divisions with zero check should already have bailed out earlier in split-if"); + assert(n->in(0) == nullptr, "divisions with zero check should already have bailed out earlier in split-if"); Node* divisor = n->in(2); return is_divisor_counted_loop_phi(divisor, region) && loop_phi_backedge_type_contains_zero(divisor, zero); @@ -292,15 +292,15 @@ void PhaseIdealLoop::dominated_by(IfProjNode* prevdom, IfNode* iff, bool flip, b // Loop predicates may have depending checks which should not // be skipped. For example, range check predicate has two checks // for lower and upper bounds. - if (dp == NULL) + if (dp == nullptr) return; ProjNode* dp_proj = dp->as_Proj(); ProjNode* unc_proj = iff->proj_out(1 - dp_proj->_con)->as_Proj(); if (exclude_loop_predicate && - (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL || - unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL || - unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) { + (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != nullptr || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != nullptr || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != nullptr)) { // If this is a range check (IfNode::is_range_check), do not // reorder because Compile::allow_range_check_smearing might have // changed the check. @@ -345,7 +345,7 @@ Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { break; } if( i >= n->req() ) - return NULL; // No Phi inputs; nowhere to clone thru + return nullptr; // No Phi inputs; nowhere to clone thru // Check for inputs created between 'n' and the Phi input. These // must split as well; they have already been given the chance @@ -371,7 +371,7 @@ Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { set_ctrl_and_loop(m, c); continue; } - return NULL; + return nullptr; } assert(n->is_Phi() || m->is_Phi() || is_dominator(get_ctrl(m), n_ctrl), "m has strange control"); } @@ -390,18 +390,18 @@ Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoo Node* scale_ctrl = get_ctrl(scale); IdealLoopTree* scale_loop = get_loop(scale_ctrl); if (n_loop == scale_loop || !scale_loop->is_member(n_loop)) { - return NULL; + return nullptr; } const TypeInt* scale_t = scale->bottom_type()->isa_int(); - if (scale_t != NULL && scale_t->is_con() && scale_t->get_con() >= 16) { - return NULL; // Dont bother with byte/short masking + if (scale_t != nullptr && scale_t->is_con() && scale_t->get_con() >= 16) { + return nullptr; // Dont bother with byte/short masking } // Add must vary with loop (else shift would be loop-invariant) Node* add = n->in(1); Node* add_ctrl = get_ctrl(add); IdealLoopTree* add_loop = get_loop(add_ctrl); if (n_loop != add_loop) { - return NULL; // happens w/ evil ZKM loops + return nullptr; // happens w/ evil ZKM loops } // Convert I-V into I+ (0-V); same for V-I @@ -415,7 +415,7 @@ Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoo add = AddNode::make(add->in(1), neg, bt); register_new_node(add, add_ctrl); } - if (add->Opcode() != Op_Add(bt)) return NULL; + if (add->Opcode() != Op_Add(bt)) return nullptr; assert(add->Opcode() == Op_AddI || add->Opcode() == Op_AddL, ""); // See if one add input is loop invariant Node* add_var = add->in(1); @@ -431,10 +431,10 @@ Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoo add_invar_loop = add_var_loop; add_var = add->in(2); } else if (add_var_loop != n_loop) { // Else neither input is loop invariant - return NULL; + return nullptr; } if (n_loop == add_invar_loop || !add_invar_loop->is_member(n_loop)) { - return NULL; // No invariant part of the add? + return nullptr; // No invariant part of the add? } // Yes! Reshape address expression! @@ -450,7 +450,7 @@ Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoo _igvn.replace_node(n, var_add); return var_add; } - return NULL; + return nullptr; } //------------------------------remix_address_expressions---------------------- @@ -458,7 +458,7 @@ Node* PhaseIdealLoop::remix_address_expressions_add_left_shift(Node* n, IdealLoo // moved out. We'd like to do all associative operators, but it's especially // important (common) to do address expressions. Node* PhaseIdealLoop::remix_address_expressions(Node* n) { - if (!has_ctrl(n)) return NULL; + if (!has_ctrl(n)) return nullptr; Node* n_ctrl = get_ctrl(n); IdealLoopTree* n_loop = get_loop(n_ctrl); @@ -466,7 +466,7 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) { // itself is loop-varying. // Only interested in binary ops (and AddP) - if (n->req() < 3 || n->req() > 4) return NULL; + if (n->req() < 3 || n->req() > 4) return nullptr; Node* n1_ctrl = get_ctrl(n->in( 1)); Node* n2_ctrl = get_ctrl(n->in( 2)); @@ -479,22 +479,22 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) { if ((n_loop->is_member(n1_loop) && n_loop != n1_loop) || (n_loop->is_member(n2_loop) && n_loop != n2_loop) || (n_loop->is_member(n3_loop) && n_loop != n3_loop)) { - return NULL; // Leave well enough alone + return nullptr; // Leave well enough alone } // Is at least one of my inputs loop-invariant? if (n1_loop == n_loop && n2_loop == n_loop && n3_loop == n_loop) { - return NULL; // No loop-invariant inputs + return nullptr; // No loop-invariant inputs } Node* res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_INT); - if (res != NULL) { + if (res != nullptr) { return res; } res = remix_address_expressions_add_left_shift(n, n_loop, n_ctrl, T_LONG); - if (res != NULL) { + if (res != nullptr) { return res; } @@ -558,13 +558,13 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) { } } - return NULL; + return nullptr; } // Optimize ((in1[2*i] * in2[2*i]) + (in1[2*i+1] * in2[2*i+1])) Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) { assert(n->Opcode() == Op_AddI, "sanity"); - Node * nn = NULL; + Node * nn = nullptr; Node * in1 = n->in(1); Node * in2 = n->in(2); if (in1->Opcode() == Op_MulI && in2->Opcode() == Op_MulI) { @@ -627,20 +627,20 @@ Node *PhaseIdealLoop::convert_add_to_muladd(Node* n) { Node *PhaseIdealLoop::conditional_move( Node *region ) { assert(region->is_Region(), "sanity check"); - if (region->req() != 3) return NULL; + if (region->req() != 3) return nullptr; // Check for CFG diamond Node *lp = region->in(1); Node *rp = region->in(2); - if (!lp || !rp) return NULL; + if (!lp || !rp) return nullptr; Node *lp_c = lp->in(0); - if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL; + if (lp_c == nullptr || lp_c != rp->in(0) || !lp_c->is_If()) return nullptr; IfNode *iff = lp_c->as_If(); // Check for ops pinned in an arm of the diamond. // Can't remove the control flow in this case - if (lp->outcnt() > 1) return NULL; - if (rp->outcnt() > 1) return NULL; + if (lp->outcnt() > 1) return nullptr; + if (rp->outcnt() > 1) return nullptr; IdealLoopTree* r_loop = get_loop(region); assert(r_loop == get_loop(iff), "sanity"); @@ -685,12 +685,12 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { // manufacturing expensive instructions, generally a bad plan. // Just Say No to Conditionally-Moved Derived Pointers. if (tp && tp->offset() != 0) - return NULL; + return nullptr; cost++; break; } default: - return NULL; // In particular, can't do memory or I/O + return nullptr; // In particular, can't do memory or I/O } // Add in cost any speculative ops for (uint j = 1; j < region->req(); j++) { @@ -723,21 +723,21 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { }//for Node* bol = iff->in(1); if (bol->Opcode() == Op_Opaque4) { - return NULL; // Ignore loop predicate checks (the Opaque4 ensures they will go away) + return nullptr; // Ignore loop predicate checks (the Opaque4 ensures they will go away) } assert(bol->Opcode() == Op_Bool, "Unexpected node"); int cmp_op = bol->in(1)->Opcode(); if (cmp_op == Op_SubTypeCheck) { // SubTypeCheck expansion expects an IfNode - return NULL; + return nullptr; } // It is expensive to generate flags from a float compare. // Avoid duplicated float compare. - if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL; + if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr; float infrequent_prob = PROB_UNLIKELY_MAG(3); // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. if (used_inside_loop) { - if (cost >= ConditionalMoveLimit) return NULL; // Too much goo + if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo // BlockLayoutByFrequency optimization moves infrequent branch // from hot path. No point in CMOV'ing in such case (110 is used @@ -752,7 +752,7 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { //keep going } else if (iff->_prob < infrequent_prob || iff->_prob > (1.0f - infrequent_prob)) - return NULL; + return nullptr; // -------------- // Now replace all Phis with CMOV's @@ -760,7 +760,7 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { uint flip = (lp->Opcode() == Op_IfTrue); Node_List wq; while (1) { - PhiNode* phi = NULL; + PhiNode* phi = nullptr; for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { Node *out = region->fast_out(i); if (out->is_Phi()) { @@ -768,7 +768,7 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { break; } } - if (phi == NULL || _igvn.type(phi) == Type::TOP) { + if (phi == nullptr || _igvn.type(phi) == Type::TOP) { break; } if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); } @@ -778,7 +778,7 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { Node *n = wq.pop(); for (uint j = 1; j < n->req(); j++) { Node* m = n->in(j); - if (m != NULL && !is_dominator(get_ctrl(m), cmov_ctrl)) { + if (m != nullptr && !is_dominator(get_ctrl(m), cmov_ctrl)) { #ifndef PRODUCT if (PrintOpto && VerifyLoopOptimizations) { tty->print(" speculate: "); @@ -833,7 +833,7 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { IdealLoopTree *n_loop = get_loop(n_ctrl); if (n->is_Store() && n_loop != _ltree_root && n_loop->is_loop() && n_loop->_head->is_Loop() && - n->in(0) != NULL) { + n->in(0) != nullptr) { Node* address = n->in(MemNode::Address); Node* value = n->in(MemNode::ValueIn); Node* mem = n->in(MemNode::Memory); @@ -863,7 +863,7 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { mem->outcnt() == 1 && mem->in(LoopNode::LoopBackControl) == n) { - assert(n_loop->_tail != NULL, "need a tail"); + assert(n_loop->_tail != nullptr, "need a tail"); assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop"); // Verify that there's no early exit of the loop before the store. @@ -909,12 +909,12 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { } } } - return NULL; + return nullptr; } // Try moving a store out of a loop, right after the loop void PhaseIdealLoop::try_move_store_after_loop(Node* n) { - if (n->is_Store() && n->in(0) != NULL) { + if (n->is_Store() && n->in(0) != nullptr) { Node *n_ctrl = get_ctrl(n); IdealLoopTree *n_loop = get_loop(n_ctrl); // Store must be in a loop @@ -926,7 +926,7 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) { if (!n_loop->is_member(address_loop)) { // Store must be last on this memory slice in the loop and // nothing in the loop must observe it - Node* phi = NULL; + Node* phi = nullptr; for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* u = n->fast_out(i); if (has_ctrl(u)) { // control use? @@ -937,7 +937,7 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) { if (u->is_Phi() && u->in(0) == n_loop->_head) { assert(_igvn.type(u) == Type::MEMORY, "bad phi"); // multiple phis on the same slice are possible - if (phi != NULL) { + if (phi != nullptr) { return; } phi = u; @@ -946,7 +946,7 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) { } return; } - if (phi != NULL) { + if (phi != nullptr) { // Nothing in the loop before the store (next iteration) // must observe the stored value bool mem_ok = true; @@ -1065,7 +1065,7 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { } Node* res = try_move_store_before_loop(n, n_ctrl); - if (res != NULL) { + if (res != nullptr) { return n; } @@ -1084,7 +1084,7 @@ Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { // Node control inputs don't necessarily agree with loop control info (due to // transformations happened in between), thus additional dominance check is needed // to keep loop info valid. - if (dom_cast != NULL && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) { + if (dom_cast != nullptr && is_dominator(get_ctrl(dom_cast), get_ctrl(n))) { _igvn.replace_node(n, dom_cast); return dom_cast; } @@ -1295,14 +1295,14 @@ bool PhaseIdealLoop::can_split_if(Node* n_ctrl) { } // Detect if the node is the inner strip-mined loop -// Return: NULL if it's not the case, or the exit of outer strip-mined loop +// Return: null if it's not the case, or the exit of outer strip-mined loop static Node* is_inner_of_stripmined_loop(const Node* out) { - Node* out_le = NULL; + Node* out_le = nullptr; if (out->is_CountedLoopEnd()) { const CountedLoopNode* loop = out->as_CountedLoopEnd()->loopnode(); - if (loop != NULL && loop->is_strip_mined()) { + if (loop != nullptr && loop->is_strip_mined()) { out_le = loop->in(LoopNode::EntryControl)->as_OuterStripMinedLoop()->outer_loop_exit(); } } @@ -1377,7 +1377,7 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { // Now split the bool up thru the phi Node *bolphi = split_thru_phi(bol, n_ctrl, -1); - guarantee(bolphi != NULL, "null boolean phi node"); + guarantee(bolphi != nullptr, "null boolean phi node"); _igvn.replace_node(bol, bolphi); assert(iff->in(1) == bolphi, ""); @@ -1429,7 +1429,7 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { // 2) move code with side-effect in strip-mined loop // Move to the exit of outer strip-mined loop in that case. Node* out_le = is_inner_of_stripmined_loop(dom); - if (out_le != NULL) { + if (out_le != nullptr) { prevdom = out_le; } // Replace the dominated test with an obvious true or false. @@ -1570,13 +1570,13 @@ bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const { } CountedLoopEndNode* le = dom->as_CountedLoopEnd(); CountedLoopNode* cl = le->loopnode(); - if (cl == NULL) { + if (cl == nullptr) { return true; } if (!cl->is_main_loop()) { return true; } - if (cl->is_canonical_loop_entry() == NULL) { + if (cl->is_canonical_loop_entry() == nullptr) { return true; } // Further unrolling is possible so loop exit condition might change @@ -1599,7 +1599,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { Node *n_ctrl = get_ctrl(n); IdealLoopTree *n_loop = get_loop(n_ctrl); - if (n->in(0) != NULL) { + if (n->in(0) != nullptr) { IdealLoopTree* loop_ctrl = get_loop(n->in(0)); if (n_loop != loop_ctrl && n_loop->is_member(loop_ctrl)) { // n has a control input inside a loop but get_ctrl() is member of an outer loop. This could happen, for example, @@ -1615,12 +1615,12 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { if (n_loop->is_member(get_loop(early_ctrl)) && // check that this one can't be hoisted now ctrl_of_all_uses_out_of_loop(n, early_ctrl, n_loop)) { // All uses in outer loops! assert(!n->is_Store() && !n->is_LoadStore(), "no node with a side effect"); - Node* outer_loop_clone = NULL; + Node* outer_loop_clone = nullptr; for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin;) { Node* u = n->last_out(j); // Clone private computation per use _igvn.rehash_node_delayed(u); Node* x = n->clone(); // Clone computation - Node* x_ctrl = NULL; + Node* x_ctrl = nullptr; if (u->is_Phi()) { // Replace all uses of normal nodes. Replace Phi uses // individually, so the separate Nodes can sink down @@ -1663,14 +1663,14 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { Node* x_head = x_loop->_head; if (x_head->is_Loop() && x_head->is_OuterStripMinedLoop()) { // Do not add duplicate LoadNodes to the outer strip mined loop - if (outer_loop_clone != NULL) { + if (outer_loop_clone != nullptr) { _igvn.replace_node(x, outer_loop_clone); continue; } outer_loop_clone = x; } x->set_req(0, x_ctrl); - } else if (n->in(0) != NULL){ + } else if (n->in(0) != nullptr){ x->set_req(0, x_ctrl); } assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); @@ -1684,18 +1684,18 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { assert(!x->is_AddP() || !x->in(AddPNode::Address)->is_AddP() || x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base) || !x->in(AddPNode::Address)->in(AddPNode::Base)->eqv_uncast(x->in(AddPNode::Base)), "unexpected AddP shape"); - if (x->in(0) == NULL && !x->is_DecodeNarrowPtr() && + if (x->in(0) == nullptr && !x->is_DecodeNarrowPtr() && !(x->is_AddP() && x->in(AddPNode::Address)->is_AddP() && x->in(AddPNode::Address)->in(AddPNode::Base) == x->in(AddPNode::Base))) { assert(!x->is_Load(), "load should be pinned"); // Use a cast node to pin clone out of loop - Node* cast = NULL; + Node* cast = nullptr; for (uint k = 0; k < x->req(); k++) { Node* in = x->in(k); - if (in != NULL && n_loop->is_member(get_loop(get_ctrl(in)))) { + if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) { const Type* in_t = _igvn.type(in); cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t, ConstraintCastNode::UnconditionalDependency); } - if (cast != NULL) { + if (cast != nullptr) { register_new_node(cast, x_ctrl); x->replace_edge(in, cast); // Chain of AddP: @@ -1705,14 +1705,14 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { Node* u = x->fast_out(i); if (u->is_AddP() && u->in(AddPNode::Base) == n->in(AddPNode::Base)) { _igvn.replace_input_of(u, AddPNode::Base, cast); - assert(u->find_out_with(Op_AddP) == NULL, "more than 2 chained AddP nodes?"); + assert(u->find_out_with(Op_AddP) == nullptr, "more than 2 chained AddP nodes?"); } } } break; } } - assert(cast != NULL, "must have added a cast to pin the node"); + assert(cast != nullptr, "must have added a cast to pin the node"); } } _igvn.remove_dead_node(n); @@ -1725,13 +1725,13 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) { // Compute the early control of a node by following its inputs until we reach // nodes that are pinned. Then compute the LCA of the control of all pinned nodes. Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) { - Node* early_ctrl = NULL; + Node* early_ctrl = nullptr; ResourceMark rm; Unique_Node_List wq; wq.push(n); for (uint i = 0; i < wq.size(); i++) { Node* m = wq.at(i); - Node* c = NULL; + Node* c = nullptr; if (m->is_CFG()) { c = m; } else if (m->pinned()) { @@ -1739,14 +1739,14 @@ Node* PhaseIdealLoop::compute_early_ctrl(Node* n, Node* n_ctrl) { } else { for (uint j = 0; j < m->req(); j++) { Node* in = m->in(j); - if (in != NULL) { + if (in != nullptr) { wq.push(in); } } } - if (c != NULL) { + if (c != nullptr) { assert(is_dominator(c, n_ctrl), "control input must dominate current control"); - if (early_ctrl == NULL || is_dominator(early_ctrl, c)) { + if (early_ctrl == nullptr || is_dominator(early_ctrl, c)) { early_ctrl = c; } } @@ -1868,8 +1868,8 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { } Node* n = phi->in(1); - Node* sample_opaque = NULL; - Node *sample_bool = NULL; + Node* sample_opaque = nullptr; + Node *sample_bool = nullptr; if (n->Opcode() == Op_Opaque4) { sample_opaque = n; sample_bool = n->in(1); @@ -1883,8 +1883,8 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { PhiNode *phi1 = new PhiNode(phi->in(0), Type::TOP); PhiNode *phi2 = new PhiNode(phi->in(0), Type::TOP); for (i = 1; i < phi->req(); i++) { - Node *n1 = sample_opaque == NULL ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1); - Node *n2 = sample_opaque == NULL ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2); + Node *n1 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(1) : phi->in(i)->in(1)->in(1)->in(1); + Node *n2 = sample_opaque == nullptr ? phi->in(i)->in(1)->in(2) : phi->in(i)->in(1)->in(1)->in(2); phi1->set_req(i, n1); phi2->set_req(i, n2); phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); @@ -1924,7 +1924,7 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) { _igvn.register_new_node_with_optimizer(b); set_ctrl(b, phi->in(0)); - if (sample_opaque != NULL) { + if (sample_opaque != nullptr) { Node* opaque = sample_opaque->clone(); opaque->set_req(1, b); _igvn.register_new_node_with_optimizer(opaque); @@ -2035,7 +2035,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new, // Check for data-use outside of loop - at least one of OLD or USE // must not be a CFG node. #ifdef ASSERT - if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == NULL) { + if (loop->_head->as_Loop()->is_strip_mined() && outer_loop->is_member(use_loop) && !loop->is_member(use_loop) && old_new[use->_idx] == nullptr) { Node* sfpt = loop->_head->as_CountedLoop()->outer_safepoint(); assert(mode != IgnoreStripMined, "incorrect cloning mode"); assert((mode == ControlAroundStripMined && use == sfpt) || !use->is_reachable_from_root(), "missed a node"); @@ -2140,7 +2140,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new, // If inserting a new Phi, check for prior hits if( idx != 0 ) { Node *hit = _igvn.hash_find_insert(phi); - if( hit == NULL ) { + if( hit == nullptr ) { _igvn.register_new_node_with_optimizer(phi); // Register new phi } else { // or // Remove the new phi from the graph and use the hit @@ -2177,8 +2177,8 @@ static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const I bool check_old_new) { for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { Node* u = n->fast_out(j); - assert(check_old_new || old_new[u->_idx] == NULL, "shouldn't have been cloned"); - if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == NULL)) { + assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned"); + if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) { Node* c = phase->get_ctrl(u); IdealLoopTree* u_loop = phase->get_loop(c); assert(!loop->is_member(u_loop) || !loop->_body.contains(u), "can be in outer loop or out of both loops only"); @@ -2189,7 +2189,7 @@ static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const I // nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of // the outer loop too Node* u_c = u->in(0); - if (u_c != NULL) { + if (u_c != nullptr) { IdealLoopTree* u_c_loop = phase->get_loop(u_c); if (outer_loop->is_member(u_c_loop) && !loop->is_member(u_c_loop)) { wq.push(u); @@ -2215,7 +2215,7 @@ void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealL CountedLoopEndNode* new_cle = new_cl->as_CountedLoop()->loopexit_or_null(); Node* cle_out = cle->proj_out(false); - Node* new_sfpt = NULL; + Node* new_sfpt = nullptr; Node* new_cle_out = cle_out->clone(); old_new.map(cle_out->_idx, new_cle_out); if (mode == CloneIncludesStripMined) { @@ -2270,21 +2270,21 @@ void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealL Node* n = stack.node(); uint i = stack.index(); while (i < n->req() && - (n->in(i) == NULL || + (n->in(i) == nullptr || !has_ctrl(n->in(i)) || get_loop(get_ctrl(n->in(i))) != outer_loop || - (old_new[n->in(i)->_idx] != NULL && old_new[n->in(i)->_idx]->_idx >= new_counter))) { + (old_new[n->in(i)->_idx] != nullptr && old_new[n->in(i)->_idx]->_idx >= new_counter))) { i++; } if (i < n->req()) { stack.set_index(i+1); stack.push(n->in(i), 0); } else { - assert(old_new[n->_idx] == NULL || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet"); + assert(old_new[n->_idx] == nullptr || n == sfpt || old_new[n->_idx]->_idx < new_counter, "no clone yet"); Node* m = n == sfpt ? new_sfpt : n->clone(); - if (m != NULL) { + if (m != nullptr) { for (uint i = 0; i < n->req(); i++) { - if (m->in(i) != NULL && old_new[m->in(i)->_idx] != NULL) { + if (m->in(i) != nullptr && old_new[m->in(i)->_idx] != nullptr) { m->set_req(i, old_new[m->in(i)->_idx]); } } @@ -2330,7 +2330,7 @@ void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealL for (uint i = 0; i < wq.size(); i++) { Node* n = wq.at(i); set_ctrl(n, new_ctrl); - if (n->in(0) != NULL) { + if (n->in(0) != nullptr) { _igvn.replace_input_of(n, 0, new_ctrl); } collect_nodes_in_outer_loop_not_reachable_from_sfpt(n, loop, outer_loop, old_new, wq, this, false); @@ -2362,7 +2362,7 @@ void PhaseIdealLoop::clone_outer_loop(LoopNode* head, CloneLoopMode mode, IdealL // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. // // Parameter side_by_size_idom: -// When side_by_size_idom is NULL, the dominator tree is constructed for +// When side_by_size_idom is null, the dominator tree is constructed for // the clone loop to dominate the original. Used in construction of // pre-main-post loop sequence. // When nonnull, the clone and original are side-by-side, both are @@ -2376,7 +2376,7 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd if (C->do_vector_loop() && PrintOpto) { const char* mname = C->method()->name()->as_quoted_ascii(); - if (mname != NULL) { + if (mname != nullptr) { tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname); } } @@ -2417,9 +2417,9 @@ void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd // Step 4: If loop-invariant use is not control, it must be dominated by a // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region // there if needed. Make a Phi there merging old and new used values. - Node_List *split_if_set = NULL; - Node_List *split_bool_set = NULL; - Node_List *split_cex_set = NULL; + Node_List *split_if_set = nullptr; + Node_List *split_bool_set = nullptr; + Node_List *split_cex_set = nullptr; fix_data_uses(loop->_body, loop, mode, outer_loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set); for (uint i = 0; i < extra_data_nodes.size(); i++) { @@ -2464,7 +2464,7 @@ void PhaseIdealLoop::finish_clone_loop(Node_List* split_if_set, Node_List* split assert(b->in(0)->is_Region(), ""); assert(b->in(1)->is_Phi(), ""); assert(b->in(0)->in(0) == b->in(1)->in(0), ""); - split_up(b, b->in(0), NULL); + split_up(b, b->in(0), nullptr); } } } @@ -2502,7 +2502,7 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l assert(use->is_Proj(), "" ); Node* nnn = old_new[old->_idx]; - Node* newuse = NULL; + Node* newuse = nullptr; if (head->is_strip_mined() && mode != IgnoreStripMined) { CountedLoopNode* cl = head->as_CountedLoop(); CountedLoopEndNode* cle = cl->loopexit(); @@ -2518,12 +2518,12 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l } } } - if (newuse == NULL) { + if (newuse == nullptr) { newuse = use->clone(); } // Clone the loop exit control projection - if (C->do_vector_loop() && cm != NULL) { + if (C->do_vector_loop() && cm != nullptr) { cm->verify_insert_and_clone(use, newuse, cm->clone_idx()); } newuse->set_req(0,nnn); @@ -2576,7 +2576,7 @@ void PhaseIdealLoop::fix_ctrl_uses(const Node_List& body, const IdealLoopTree* l r->set_req(2, use); _igvn.register_new_node_with_optimizer(r); set_loop(r, use_loop); - set_idom(r, (side_by_side_idom == NULL) ? newuse->in(0) : side_by_side_idom, dd_r); + set_idom(r, (side_by_side_idom == nullptr) ? newuse->in(0) : side_by_side_idom, dd_r); } // End of if a loop-exit test } } @@ -2594,7 +2594,7 @@ void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, set_loop(nnn, parent); if (old->outcnt() > 0) { Node* dom = idom(old); - if (old_new[dom->_idx] != NULL) { + if (old_new[dom->_idx] != nullptr) { dom = old_new[dom->_idx]; set_idom(nnn, dom, dd ); } @@ -2603,10 +2603,10 @@ void PhaseIdealLoop::fix_body_edges(const Node_List &body, IdealLoopTree* loop, // Correct edges to the new node for (uint j = 0; j < nnn->req(); j++) { Node *n = nnn->in(j); - if (n != NULL) { + if (n != nullptr) { IdealLoopTree *old_in_loop = get_loop(has_ctrl(n) ? get_ctrl(n) : n); if (loop->is_member(old_in_loop)) { - if (old_new[n->_idx] != NULL) { + if (old_new[n->_idx] != nullptr) { nnn->set_req(j, old_new[n->_idx]); } else { assert(!body.contains(n), ""); @@ -2628,7 +2628,7 @@ void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, // Reduction flag is not copied by default. Copy it here when cloning the entire loop body. nnn->add_flag(Node::Flag_is_reduction); } - if (C->do_vector_loop() && cm != NULL) { + if (C->do_vector_loop() && cm != nullptr) { cm->verify_insert_and_clone(old, nnn, cm->clone_idx()); } _igvn.register_new_node_with_optimizer(nnn); @@ -2642,10 +2642,10 @@ void PhaseIdealLoop::clone_loop_body(const Node_List& body, Node_List &old_new, // with an optional truncation (left-shift followed by a right-shift) // of the add. Returns zero if not an iv. int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { - Node* trunc1 = NULL; - Node* trunc2 = NULL; - const TypeInteger* ttype = NULL; - if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) { + Node* trunc1 = nullptr; + Node* trunc2 = nullptr; + const TypeInteger* ttype = nullptr; + if (!iff->is_If() || iff->in(1) == nullptr || !iff->in(1)->is_Bool()) { return 0; } BoolNode* bl = iff->in(1)->as_Bool(); @@ -2657,7 +2657,7 @@ int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { return 0; } - Node* add2 = NULL; + Node* add2 = nullptr; Node* cmp1 = cmp->in(1); if (cmp1->is_Phi()) { // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) @@ -2686,7 +2686,7 @@ int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { } } } - if (add2 != NULL) { + if (add2 != nullptr) { const TypeInt* add2t = _igvn.type(add2)->is_int(); if (add2t->is_con()) { return add2t->get_con(); @@ -2699,13 +2699,13 @@ int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { //---------------------- stay_in_loop ------------------------------------- // Return the (unique) control output node that's in the loop (if it exists.) Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { - Node* unique = NULL; - if (!n) return NULL; + Node* unique = nullptr; + if (!n) return nullptr; for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* use = n->fast_out(i); if (!has_ctrl(use) && loop->is_member(get_loop(use))) { - if (unique != NULL) { - return NULL; + if (unique != nullptr) { + return nullptr; } unique = use; } @@ -2737,7 +2737,7 @@ ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { //------------------------------ short_circuit_if ------------------------------------- // Force the iff control output to be the live_proj Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { - guarantee(live_proj != NULL, "null projection"); + guarantee(live_proj != nullptr, "null projection"); int proj_con = live_proj->_con; assert(proj_con == 0 || proj_con == 1, "false or true projection"); Node *con = _igvn.intcon(proj_con); @@ -2779,7 +2779,7 @@ ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTes _igvn.rehash_node_delayed(iff); _igvn.rehash_node_delayed(proj); - proj->set_req(0, NULL); // temporary disconnect + proj->set_req(0, nullptr); // temporary disconnect ProjNode* proj2 = proj_clone(proj, iff); register_node(proj2, loop, iff, ddepth); @@ -2799,7 +2799,7 @@ ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTes set_idom(proj, new_if, ddepth); ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); - guarantee(new_exit != NULL, "null exit node"); + guarantee(new_exit != nullptr, "null exit node"); register_node(new_exit, get_loop(other_proj), new_if, ddepth); return new_exit; @@ -2840,7 +2840,7 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { _igvn.rehash_node_delayed(iff); _igvn.rehash_node_delayed(proj); - proj->set_req(0, NULL); // temporary disconnect + proj->set_req(0, nullptr); // temporary disconnect ProjNode* proj2 = proj_clone(proj, iff); register_node(proj2, loop, iff, ddepth); @@ -2848,7 +2848,7 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { reg->set_req(1, proj2); register_node(reg, loop, iff, ddepth); - IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt); + IfNode* dum_if = new IfNode(reg, short_circuit_if(nullptr, proj), iff->_prob, iff->_fcnt); register_node(dum_if, loop, reg, ddepth); proj->set_req(0, dum_if); // reattach @@ -2901,25 +2901,25 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo const bool Unsigned = false; BoolNode* bol = if_cmpu->in(1)->as_Bool(); - if (bol->_test._test != BoolTest::lt) return NULL; + if (bol->_test._test != BoolTest::lt) return nullptr; CmpNode* cmpu = bol->in(1)->as_Cmp(); - if (cmpu->Opcode() != Op_CmpU) return NULL; + if (cmpu->Opcode() != Op_CmpU) return nullptr; int stride = stride_of_possible_iv(if_cmpu); - if (stride == 0) return NULL; + if (stride == 0) return nullptr; Node* lp_proj = stay_in_loop(if_cmpu, loop); - guarantee(lp_proj != NULL, "null loop node"); + guarantee(lp_proj != nullptr, "null loop node"); ProjNode* lp_continue = lp_proj->as_Proj(); ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); if (!lp_exit->is_IfFalse()) { // The loop exit condition is (i (i >= 0 && i < limit). // We therefore can't add a single exit condition. - return NULL; + return nullptr; } // The loop exit condition is !(i (i < 0 || i >= limit). // Split out the exit condition (i < 0) for stride < 0 or (i >= limit) for stride > 0. - Node* limit = NULL; + Node* limit = nullptr; if (stride > 0) { limit = cmpu->in(2); } else { @@ -2928,7 +2928,7 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo } // Create a new region on the exit path RegionNode* reg = insert_region_before_proj(lp_exit); - guarantee(reg != NULL, "null region node"); + guarantee(reg != nullptr, "null region node"); // Clone the if-cmpu-true-false using a signed compare BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; @@ -3151,7 +3151,7 @@ void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_va phi->set_req(LoopNode::EntryControl, lp_entry_val); // Use existing phi if it already exists Node *hit = _igvn.hash_find_insert(phi); - if( hit == NULL ) { + if( hit == nullptr ) { _igvn.register_new_node_with_optimizer(phi); set_ctrl(phi, lp); } else { @@ -3487,8 +3487,8 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { // Walk up dominators to loop head looking for first loop exit // which is executed on every path thru loop. - IfNode *peel_if = NULL; - IfNode *peel_if_cmpu = NULL; + IfNode *peel_if = nullptr; + IfNode *peel_if_cmpu = nullptr; Node *iff = loop->tail(); while (iff != head) { @@ -3512,20 +3512,20 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { } // Prefer signed compare over unsigned compare. - IfNode* new_peel_if = NULL; - if (peel_if == NULL) { - if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) { + IfNode* new_peel_if = nullptr; + if (peel_if == nullptr) { + if (!PartialPeelAtUnsignedTests || peel_if_cmpu == nullptr) { return false; // No peel point found } new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); - if (new_peel_if == NULL) { + if (new_peel_if == nullptr) { return false; // No peel point found } peel_if = new_peel_if; } Node* last_peel = stay_in_loop(peel_if, loop); Node* first_not_peeled = stay_in_loop(last_peel, loop); - if (first_not_peeled == NULL || first_not_peeled == head) { + if (first_not_peeled == nullptr || first_not_peeled == head) { return false; } @@ -3632,7 +3632,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { if (!has_use_internal_to_set(n, peel, loop)) { // if not pinned and not a load (which maybe anti-dependent on a store) // and not a CMove (Matcher expects only bool->cmove). - if (n->in(0) == NULL && !n->is_Load() && !n->is_CMove()) { + if (n->in(0) == nullptr && !n->is_Load() && !n->is_CMove()) { int new_clones = clone_for_use_outside_loop(loop, n, worklist); if (new_clones == -1) { too_many_clones = true; @@ -3670,10 +3670,10 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { #ifndef PRODUCT if (TracePartialPeeling && exceed_phi_limit) { tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", - new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F'); + new_phi_cnt, old_phi_cnt, new_peel_if != nullptr?'T':'F'); } #endif - if (new_peel_if != NULL) { + if (new_peel_if != nullptr) { remove_cmpi_loop_exit(new_peel_if, loop); } // Inhibit more partial peeling on this loop @@ -3746,7 +3746,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { if ( loop->is_member(get_loop( use_c )) ) { // use is in loop - if (old_new[use->_idx] != NULL) { // null for dead code + if (old_new[use->_idx] != nullptr) { // null for dead code Node* use_clone = old_new[use->_idx]; _igvn.replace_input_of(use, j, C->top()); insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); @@ -3780,7 +3780,7 @@ bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { for (uint i = 0; i < loop->_body.size(); i++) { Node *n = loop->_body.at(i); - if (!n->is_CFG() && n->in(0) != NULL && + if (!n->is_CFG() && n->in(0) != nullptr && not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { Node* n_clone = old_new[n->_idx]; _igvn.replace_input_of(n_clone, 0, new_head_clone); @@ -3911,8 +3911,8 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old LoopNode *head = loop->_head->as_Loop(); - Node* region = NULL; - IfNode* exit_test = NULL; + Node* region = nullptr; + IfNode* exit_test = nullptr; uint inner; float f; if (StressDuplicateBackedge) { @@ -3928,7 +3928,7 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old c = idom(c); } - if (region == NULL) { + if (region == nullptr) { return false; } @@ -3936,16 +3936,16 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old } else { // Is the shape of the loop that of a counted loop... Node* back_control = loop_exit_control(head, loop); - if (back_control == NULL) { + if (back_control == nullptr) { return false; } BoolTest::mask bt = BoolTest::illegal; float cl_prob = 0; - Node* incr = NULL; - Node* limit = NULL; + Node* incr = nullptr; + Node* limit = nullptr; Node* cmp = loop_exit_test(back_control, loop, incr, limit, bt, cl_prob); - if (cmp == NULL || cmp->Opcode() != Op_CmpI) { + if (cmp == nullptr || cmp->Opcode() != Op_CmpI) { return false; } @@ -3964,25 +3964,25 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old inner = 0; for (uint i = 1; i < incr->req(); ++i) { Node* in = incr->in(i); - Node* trunc1 = NULL; - Node* trunc2 = NULL; - const TypeInteger* iv_trunc_t = NULL; + Node* trunc1 = nullptr; + Node* trunc2 = nullptr; + const TypeInteger* iv_trunc_t = nullptr; Node* orig_in = in; if (!(in = CountedLoopNode::match_incr_with_optional_truncation(in, &trunc1, &trunc2, &iv_trunc_t, T_INT))) { continue; } assert(in->Opcode() == Op_AddI, "wrong increment code"); - Node* xphi = NULL; + Node* xphi = nullptr; Node* stride = loop_iv_stride(in, loop, xphi); - if (stride == NULL) { + if (stride == nullptr) { continue; } - PhiNode* phi = loop_iv_phi(xphi, NULL, head, loop); - if (phi == NULL || - (trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) || - (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) { + PhiNode* phi = loop_iv_phi(xphi, nullptr, head, loop); + if (phi == nullptr || + (trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) || + (trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) { return false; } @@ -4052,7 +4052,7 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old } // clone shared_stmt - clone_loop_body(wq, old_new, NULL); + clone_loop_body(wq, old_new, nullptr); Node* region_clone = old_new[region->_idx]; region_clone->set_req(inner, C->top()); @@ -4068,7 +4068,7 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old // Make one of the shared_stmt copies only reachable from stmt1, the // other only from stmt2..stmtn. - Node* dom = NULL; + Node* dom = nullptr; for (uint i = 1; i < region->req(); ++i) { if (i != inner) { _igvn.replace_input_of(region, i, C->top()); @@ -4077,7 +4077,7 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old if (in->is_top()) { continue; } - if (dom == NULL) { + if (dom == nullptr) { dom = in; } else { dom = dom_lca(dom, in); @@ -4093,7 +4093,7 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old Node* outer_phi = u->clone(); outer_phi->set_req(0, outer_head); Node* backedge = old_new[u->in(LoopNode::LoopBackControl)->_idx]; - if (backedge == NULL) { + if (backedge == nullptr) { backedge = u->in(LoopNode::LoopBackControl); } outer_phi->set_req(LoopNode::LoopBackControl, backedge); @@ -4105,16 +4105,16 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old // create control and data nodes for out of loop uses (including region2) Node_List worklist; uint new_counter = C->unique(); - fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, NULL, worklist); + fix_ctrl_uses(wq, loop, old_new, ControlAroundStripMined, outer_head, nullptr, worklist); - Node_List *split_if_set = NULL; - Node_List *split_bool_set = NULL; - Node_List *split_cex_set = NULL; + Node_List *split_if_set = nullptr; + Node_List *split_bool_set = nullptr; + Node_List *split_cex_set = nullptr; fix_data_uses(wq, loop, ControlAroundStripMined, head->is_strip_mined() ? loop->_parent : loop, new_counter, old_new, worklist, split_if_set, split_bool_set, split_cex_set); finish_clone_loop(split_if_set, split_bool_set, split_cex_set); - if (exit_test != NULL) { + if (exit_test != nullptr) { float cnt = exit_test->_fcnt; if (cnt != COUNT_UNKNOWN) { exit_test->_fcnt = cnt * f; diff --git a/src/hotspot/share/opto/machnode.cpp b/src/hotspot/share/opto/machnode.cpp index dd36081cbf9..572823e69e6 100644 --- a/src/hotspot/share/opto/machnode.cpp +++ b/src/hotspot/share/opto/machnode.cpp @@ -48,7 +48,7 @@ relocInfo::relocType MachOper::constant_reloc() const { return relocInfo::none; jdouble MachOper::constantD() const { ShouldNotReachHere(); return 0.0; } jfloat MachOper::constantF() const { ShouldNotReachHere(); return 0.0; } jlong MachOper::constantL() const { ShouldNotReachHere(); return CONST64(0) ; } -TypeOopPtr *MachOper::oop() const { return NULL; } +TypeOopPtr *MachOper::oop() const { return nullptr; } int MachOper::ccode() const { return 0x00; } // A zero, default, indicates this value is not needed. // May need to lookup the base register, as done in int_ and ext_format @@ -80,7 +80,7 @@ const Type *MachOper::type() const { //------------------------------in_RegMask------------------------------------- const RegMask *MachOper::in_RegMask(int index) const { ShouldNotReachHere(); - return NULL; + return nullptr; } //------------------------------dump_spec-------------------------------------- @@ -186,7 +186,7 @@ bool MachNode::cmp( const Node &node ) const { // Return an equivalent instruction using memory for cisc_operand position MachNode *MachNode::cisc_version(int offset) { ShouldNotCallThis(); - return NULL; + return nullptr; } void MachNode::use_cisc_RegMask() { @@ -214,7 +214,7 @@ const RegMask &MachNode::in_RegMask( uint idx ) const { } const RegMask *rm = cisc_RegMask(); - if( rm == NULL || (int)opcnt != cisc_operand() ) { + if( rm == nullptr || (int)opcnt != cisc_operand() ) { rm = _opnds[opcnt]->in_RegMask(idx-skipped); } return *rm; @@ -228,9 +228,9 @@ const MachOper* MachNode::memory_inputs(Node* &base, Node* &index) const { base = NodeSentinel; index = NodeSentinel; } else { - base = NULL; - index = NULL; - if (oper != NULL) { + base = nullptr; + index = nullptr; + if (oper != nullptr) { // It has a unique memory operand. Find its index. int oper_idx = num_opnds(); while (--oper_idx >= 0) { @@ -259,36 +259,36 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty Node* index; const MachOper* oper = memory_inputs(base, index); - if (oper == NULL) { - // Base has been set to NULL + if (oper == nullptr) { + // Base has been set to null offset = 0; } else if (oper == (MachOper*)-1) { // Base has been set to NodeSentinel // There is not a unique memory use here. We will fall to AliasIdxBot. offset = Type::OffsetBot; } else { - // Base may be NULL, even if offset turns out to be != 0 + // Base may be null, even if offset turns out to be != 0 intptr_t disp = oper->constant_disp(); int scale = oper->scale(); // Now we have collected every part of the ADLC MEMORY_INTER. // See if it adds up to a base + offset. - if (index != NULL) { + if (index != nullptr) { const Type* t_index = index->bottom_type(); if (t_index->isa_narrowoop() || t_index->isa_narrowklass()) { // EncodeN, LoadN, LoadConN, LoadNKlass, // EncodeNKlass, LoadConNklass. // Memory references through narrow oops have a // funny base so grab the type from the index: // [R12 + narrow_oop_reg<<3 + offset] - assert(base == NULL, "Memory references through narrow oops have no base"); + assert(base == nullptr, "Memory references through narrow oops have no base"); offset = disp; adr_type = t_index->make_ptr()->add_offset(offset); - return NULL; + return nullptr; } else if (!index->is_Con()) { disp = Type::OffsetBot; } else if (disp != Type::OffsetBot) { const TypeX* ti = t_index->isa_intptr_t(); - if (ti == NULL) { + if (ti == nullptr) { disp = Type::OffsetBot; // a random constant?? } else { disp += ti->get_con() << scale; @@ -302,8 +302,8 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty // Lookup the TypePtr used by indOffset32X, a compile-time constant oop, // Add the offset determined by the "base", or use Type::OffsetBot. if( adr_type == TYPE_PTR_SENTINAL ) { - const TypePtr *t_disp = oper->disp_as_type(); // only !NULL for indOffset32X - if (t_disp != NULL) { + const TypePtr *t_disp = oper->disp_as_type(); // only not null for indOffset32X + if (t_disp != nullptr) { offset = Type::OffsetBot; const Type* t_base = base->bottom_type(); if (t_base->isa_intptr_t()) { @@ -313,10 +313,10 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty } } adr_type = t_disp->add_offset(offset); - } else if( base == NULL && offset != 0 && offset != Type::OffsetBot ) { + } else if( base == nullptr && offset != 0 && offset != Type::OffsetBot ) { // Use ideal type if it is oop ptr. const TypePtr *tp = oper->type()->isa_ptr(); - if( tp != NULL) { + if( tp != nullptr) { adr_type = tp; } } @@ -341,12 +341,12 @@ const class TypePtr *MachNode::adr_type() const { // %%%%% Someday we'd like to allow constant oop offsets which // would let Intel load from static globals in 1 instruction. // Currently Intel requires 2 instructions and a register temp. - if (base == NULL) { - // NULL base, zero offset means no memory at all (a null pointer!) + if (base == nullptr) { + // null base, zero offset means no memory at all (a null pointer!) if (offset == 0) { - return NULL; + return nullptr; } - // NULL base, any offset means any pointer whatever + // null base, any offset means any pointer whatever if (offset == Type::OffsetBot) { return TypePtr::BOTTOM; } @@ -379,7 +379,7 @@ const class TypePtr *MachNode::adr_type() const { const TypePtr *tp = t->isa_ptr(); // be conservative if we do not recognize the type - if (tp == NULL) { + if (tp == nullptr) { assert(false, "this path may produce not optimal code"); return TypePtr::BOTTOM; } @@ -508,7 +508,7 @@ bool MachNode::rematerialize() const { void MachNode::dump_spec(outputStream *st) const { uint cnt = num_opnds(); for( uint i=0; idump_spec(st); } else { st->print(" _"); @@ -532,10 +532,10 @@ void MachNode::dump_format(PhaseRegAlloc *ra, outputStream *st) const { //============================================================================= #ifndef PRODUCT void MachTypeNode::dump_spec(outputStream *st) const { - if (_bottom_type != NULL) { + if (_bottom_type != nullptr) { _bottom_type->dump_on(st); } else { - st->print(" NULL"); + st->print(" null"); } if (barrier_data() != 0) { st->print(" barrier("); @@ -610,16 +610,16 @@ const TypePtr *MachProjNode::adr_type() const { if (bottom_type() == Type::MEMORY) { // in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM Node* ctrl = in(0); - if (ctrl == NULL) return NULL; // node is dead + if (ctrl == nullptr) return nullptr; // node is dead const TypePtr* adr_type = ctrl->adr_type(); #ifdef ASSERT if (!VMError::is_error_reported() && !Node::in_dump()) - assert(adr_type != NULL, "source must have adr_type"); + assert(adr_type != nullptr, "source must have adr_type"); #endif return adr_type; } assert(bottom_type()->base() != Type::Memory, "no other memories?"); - return NULL; + return nullptr; } #ifndef PRODUCT @@ -683,9 +683,9 @@ const Type* MachCallNode::Value(PhaseGVN* phase) const { return tf()->range(); } #ifndef PRODUCT void MachCallNode::dump_spec(outputStream *st) const { st->print("# "); - if (tf() != NULL) tf()->dump_on(st); + if (tf() != nullptr) tf()->dump_on(st); if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); - if (jvms() != NULL) jvms()->dump_spec(st); + if (jvms() != nullptr) jvms()->dump_spec(st); } #endif @@ -779,7 +779,7 @@ bool MachCallStaticJavaNode::cmp( const Node &n ) const { //----------------------------uncommon_trap_request---------------------------- // If this is an uncommon trap, return the request code, else zero. int MachCallStaticJavaNode::uncommon_trap_request() const { - if (_name != NULL && !strcmp(_name, "uncommon_trap")) { + if (_name != nullptr && !strcmp(_name, "uncommon_trap")) { return CallStaticJavaNode::extract_uncommon_trap_request(this); } return 0; @@ -799,7 +799,7 @@ void MachCallStaticJavaNode::dump_trap_args(outputStream *st) const { void MachCallStaticJavaNode::dump_spec(outputStream *st) const { st->print("Static "); - if (_name != NULL) { + if (_name != nullptr) { st->print("wrapper for: %s", _name ); dump_trap_args(st); st->print(" "); diff --git a/src/hotspot/share/opto/machnode.hpp b/src/hotspot/share/opto/machnode.hpp index 921cf158ee9..a455e420d5d 100644 --- a/src/hotspot/share/opto/machnode.hpp +++ b/src/hotspot/share/opto/machnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -166,7 +166,7 @@ public: // Access the TypeKlassPtr of operands with a base==RegI and disp==RegP // Only returns non-null value for x86_32.ad's indOffset32X - virtual const TypePtr *disp_as_type() const { return NULL; } + virtual const TypePtr *disp_as_type() const { return nullptr; } // Return the label virtual Label *label() const; @@ -199,7 +199,7 @@ public: // Check whether o is a valid oper. static bool notAnOper(const MachOper *o) { - if (o == NULL) return true; + if (o == nullptr) return true; if (((intptr_t)o & 1) != 0) return true; if (*(address*)o == badAddress) return true; // kill by Node::destruct return false; @@ -212,7 +212,7 @@ public: // ADLC inherit from this class. class MachNode : public Node { public: - MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(NULL) { + MachNode() : Node((uint)0), _barrier(0), _num_opnds(0), _opnds(nullptr) { init_class_id(Class_Mach); } // Required boilerplate @@ -264,7 +264,7 @@ public: virtual const RegMask &in_RegMask(uint) const; // cisc-spillable instructions redefine for use by in_RegMask - virtual const RegMask *cisc_RegMask() const { return NULL; } + virtual const RegMask *cisc_RegMask() const { return nullptr; } // If this instruction is a 2-address instruction, then return the // index of the input which must match the output. Not necessary @@ -334,7 +334,7 @@ public: } // If this is a memory op, return the base pointer and fixed offset. - // If there are no such, return NULL. If there are multiple addresses + // If there are no such, return null. If there are multiple addresses // or the address is indeterminate (rare cases) then return (Node*)-1, // which serves as node bottom. // If the offset is not statically determined, set it to Type::OffsetBot. @@ -346,14 +346,14 @@ public: // Helper for get_base_and_disp: find the base and index input nodes. // Returns the MachOper as determined by memory_operand(), for use, if // needed by the caller. If (MachOper *)-1 is returned, base and index - // are set to NodeSentinel. If (MachOper *) NULL is returned, base and - // index are set to NULL. + // are set to NodeSentinel. If null is returned, base and + // index are set to null. const MachOper* memory_inputs(Node* &base, Node* &index) const; // Helper for memory_inputs: Which operand carries the necessary info? - // By default, returns NULL, which means there is no such operand. + // By default, returns null, which means there is no such operand. // If it returns (MachOper*)-1, this means there are multiple memories. - virtual const MachOper* memory_operand() const { return NULL; } + virtual const MachOper* memory_operand() const { return nullptr; } // Call "get_base_and_disp" to decide which category of memory is used here. virtual const class TypePtr *adr_type() const; @@ -400,7 +400,7 @@ public: // Define the following defaults for non-matched machine nodes virtual uint oper_input_base() const { return 0; } virtual uint rule() const { return 9999999; } - virtual const class Type *bottom_type() const { return _opnds == NULL ? Type::CONTROL : MachNode::bottom_type(); } + virtual const class Type *bottom_type() const { return _opnds == nullptr ? Type::CONTROL : MachNode::bottom_type(); } }; //------------------------------MachTypeNode---------------------------- @@ -600,7 +600,7 @@ public: MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()), _spill_type(spill_type) { init_class_id(Class_MachSpillCopy); init_flags(Flag_is_Copy); - add_req(NULL); + add_req(nullptr); add_req(n); } virtual uint size_of() const { return sizeof(*this); } @@ -668,7 +668,7 @@ class MachMergeNode : public MachIdealNode { public: MachMergeNode(Node *n1) { init_class_id(Class_MachMerge); - add_req(NULL); + add_req(nullptr); add_req(n1); } virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); } @@ -694,7 +694,7 @@ public: virtual void save_label(Label** label, uint* block_num) = 0; // Support for short branches - virtual MachNode *short_branch_version() { return NULL; } + virtual MachNode *short_branch_version() { return nullptr; } virtual bool pinned() const { return true; }; }; @@ -839,7 +839,7 @@ public: OopMap* oop_map() const { return _oop_map; } void set_oop_map(OopMap* om) { _oop_map = om; } - MachSafePointNode() : MachReturnNode(), _oop_map(NULL), _jvms(NULL), _jvmadj(0), _has_ea_local_in_scope(false) { + MachSafePointNode() : MachReturnNode(), _oop_map(nullptr), _jvms(nullptr), _jvmadj(0), _has_ea_local_in_scope(false) { init_class_id(Class_MachSafePoint); } @@ -951,11 +951,11 @@ public: if (_override_symbolic_info) { // Attach corresponding Method* to the call site, so VM can use it during resolution // instead of querying symbolic info from bytecode. - assert(_method != NULL, "method should be set"); + assert(_method != nullptr, "method should be set"); assert(_method->constant_encoding()->is_method(), "should point to a Method"); return cbuf.oop_recorder()->find_index(_method->constant_encoding()); } - return 0; // Use symbolic info from bytecode (resolved_method == NULL). + return 0; // Use symbolic info from bytecode (resolved_method is null). } #ifndef PRODUCT @@ -1005,7 +1005,7 @@ class MachCallRuntimeNode : public MachCallNode { virtual bool cmp( const Node &n ) const; virtual uint size_of() const; // Size is bigger public: - const char *_name; // Printable name, if _method is NULL + const char *_name; // Printable name, if _method is null bool _leaf_no_fp; // Is this CallLeafNoFP? MachCallRuntimeNode() : MachCallNode() { init_class_id(Class_MachCallRuntime); @@ -1064,7 +1064,7 @@ public: init_class_id(Class_MachTemp); _num_opnds = 1; _opnds = _opnd_array; - add_req(NULL); + add_req(nullptr); _opnds[0] = oper; } virtual uint size_of() const { return sizeof(MachTempNode); } @@ -1096,7 +1096,7 @@ public: virtual MachOper *clone() const; - virtual Label *label() const { assert(_label != NULL, "need Label"); return _label; } + virtual Label *label() const { assert(_label != nullptr, "need Label"); return _label; } virtual uint opcode() const; diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 62b41d720b1..a581c963910 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { else use->set_prec(j, newref); nreplacements++; - } else if (j >= req && uin == NULL) { + } else if (j >= req && uin == nullptr) { break; } } @@ -83,7 +83,7 @@ int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { } void PhaseMacroExpand::migrate_outs(Node *old, Node *target) { - assert(old != NULL, "sanity"); + assert(old != nullptr, "sanity"); for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) { Node* use = old->fast_out(i); _igvn.rehash_node_delayed(use); @@ -143,9 +143,9 @@ CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* sl // Slow path call has no side-effects, uses few values copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); - if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); - if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); - if (parm2 != NULL) call->init_req(TypeFunc::Parms+2, parm2); + if (parm0 != nullptr) call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != nullptr) call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != nullptr) call->init_req(TypeFunc::Parms+2, parm2); call->copy_call_debug_info(&_igvn, oldcall); call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. _igvn.replace_node(oldcall, call); @@ -190,9 +190,9 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me } mem = in->in(TypeFunc::Memory); } else if (in->is_MemBar()) { - ArrayCopyNode* ac = NULL; + ArrayCopyNode* ac = nullptr; if (ArrayCopyNode::may_modify(tinst, in->as_MemBar(), phase, ac)) { - if (ac != NULL) { + if (ac != nullptr) { assert(ac->is_clonebasic(), "Only basic clone is a non escaping clone"); return ac; } @@ -230,7 +230,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me InitializeNode* init = alloc->as_Allocate()->initialization(); // We are looking for stored value, return Initialize node // or memory edge from Allocate node. - if (init != NULL) { + if (init != nullptr) { return init; } else { return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers). @@ -239,7 +239,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me // Otherwise skip it (the call updated 'mem' value). } else if (mem->Opcode() == Op_SCMemProj) { mem = mem->in(0); - Node* adr = NULL; + Node* adr = nullptr; if (mem->is_LoadStore()) { adr = mem->in(MemNode::Address); } else { @@ -252,7 +252,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me if (adr_idx == alias_idx) { DEBUG_ONLY(mem->dump();) assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); - return NULL; + return nullptr; } mem = mem->in(MemNode::Memory); } else if (mem->Opcode() == Op_StrInflatedCopy) { @@ -262,7 +262,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me if (adr_idx == alias_idx) { DEBUG_ONLY(mem->dump();) assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); - return NULL; + return nullptr; } mem = mem->in(MemNode::Memory); } else { @@ -281,7 +281,7 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, bt = T_OBJECT; type = ftype->make_oopptr(); } - Node* res = NULL; + Node* res = nullptr; if (ac->is_clonebasic()) { assert(ac->in(ArrayCopyNode::Src) != ac->in(ArrayCopyNode::Dest), "clone source equals destination"); Node* base = ac->in(ArrayCopyNode::Src); @@ -299,8 +299,8 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, const TypeInt* src_pos_t = _igvn.type(src_pos)->is_int(); const TypeInt* dest_pos_t = _igvn.type(dest_pos)->is_int(); - Node* adr = NULL; - const TypePtr* adr_type = NULL; + Node* adr = nullptr; + const TypePtr* adr_type = nullptr; if (src_pos_t->is_con() && dest_pos_t->is_con()) { intptr_t off = ((src_pos_t->get_con() - dest_pos_t->get_con()) << shift) + offset; Node* base = ac->in(ArrayCopyNode::Src); @@ -324,7 +324,7 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, if (ac->in(ArrayCopyNode::Src) == ac->in(ArrayCopyNode::Dest)) { // Non constant offset in the array: we can't statically // determine the value - return NULL; + return nullptr; } } MergeMemNode* mergemen = _igvn.transform(MergeMemNode::make(mem))->as_MergeMem(); @@ -332,21 +332,21 @@ Node* PhaseMacroExpand::make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, res = ArrayCopyNode::load(bs, &_igvn, ctl, mergemen, adr, adr_type, type, bt); } } - if (res != NULL) { + if (res != nullptr) { if (ftype->isa_narrowoop()) { // PhaseMacroExpand::scalar_replacement adds DecodeN nodes res = _igvn.transform(new EncodePNode(res, ftype)); } return res; } - return NULL; + return nullptr; } // // Given a Memory Phi, compute a value Phi containing the values from stores // on the input paths. // Note: this function is recursive, its depth is limited by the "level" argument -// Returns the computed Phi, or NULL if it cannot compute it. +// Returns the computed Phi, or null if it cannot compute it. Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level) { assert(mem->is_Phi(), "sanity"); int alias_idx = C->get_alias_index(adr_t); @@ -364,26 +364,26 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * } // Check if an appropriate new value phi already exists. Node* new_phi = value_phis->find(mem->_idx); - if (new_phi != NULL) + if (new_phi != nullptr) return new_phi; if (level <= 0) { - return NULL; // Give up: phi tree too deep + return nullptr; // Give up: phi tree too deep } Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); Node *alloc_mem = alloc->in(TypeFunc::Memory); uint length = mem->req(); - GrowableArray values(length, length, NULL); + GrowableArray values(length, length, nullptr); // create a new Phi for the value - PhiNode *phi = new PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset); + PhiNode *phi = new PhiNode(mem->in(0), phi_type, nullptr, mem->_idx, instance_id, alias_idx, offset); transform_later(phi); value_phis->push(phi, mem->_idx); for (uint j = 1; j < length; j++) { Node *in = mem->in(j); - if (in == NULL || in->is_top()) { + if (in == nullptr || in->is_top()) { values.at_put(j, in); } else { Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn); @@ -395,8 +395,8 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * if (val->is_Initialize()) { val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); } - if (val == NULL) { - return NULL; // can't find a value on this path + if (val == nullptr) { + return nullptr; // can't find a value on this path } if (val == mem) { values.at_put(j, mem); @@ -412,8 +412,8 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * values.at_put(j, _igvn.zerocon(ft)); } else if (val->is_Phi()) { val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1); - if (val == NULL) { - return NULL; + if (val == nullptr) { + return nullptr; } values.at_put(j, val); } else if (val->Opcode() == Op_SCMemProj) { @@ -421,17 +421,17 @@ Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type * val->in(0)->Opcode() == Op_EncodeISOArray || val->in(0)->Opcode() == Op_StrCompressedCopy, "sanity"); assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); - return NULL; + return nullptr; } else if (val->is_ArrayCopy()) { Node* res = make_arraycopy_load(val->as_ArrayCopy(), offset, val->in(0), val->in(TypeFunc::Memory), ft, phi_type, alloc); - if (res == NULL) { - return NULL; + if (res == nullptr) { + return nullptr; } values.at_put(j, res); } else { DEBUG_ONLY( val->dump(); ) assert(false, "unknown node on this path"); - return NULL; // unknown node on this path + return nullptr; // unknown node on this path } } } @@ -463,14 +463,14 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType Node *mem = sfpt_mem; while (!done) { if (visited.test_set(mem->_idx)) { - return NULL; // found a loop, give up + return nullptr; // found a loop, give up } mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn); if (mem == start_mem || mem == alloc_mem) { done = true; // hit a sentinel, return appropriate 0 value } else if (mem->is_Initialize()) { mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); - if (mem == NULL) { + if (mem == nullptr) { done = true; // Something go wrong. } else if (mem->is_Store()) { const TypePtr* atype = mem->as_Store()->adr_type(); @@ -479,27 +479,27 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType } } else if (mem->is_Store()) { const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); - assert(atype != NULL, "address type must be oopptr"); + assert(atype != nullptr, "address type must be oopptr"); assert(C->get_alias_index(atype) == alias_idx && atype->is_known_instance_field() && atype->offset() == offset && atype->instance_id() == instance_id, "store is correct memory slice"); done = true; } else if (mem->is_Phi()) { // try to find a phi's unique input - Node *unique_input = NULL; + Node *unique_input = nullptr; Node *top = C->top(); for (uint i = 1; i < mem->req(); i++) { Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn); - if (n == NULL || n == top || n == mem) { + if (n == nullptr || n == top || n == mem) { continue; - } else if (unique_input == NULL) { + } else if (unique_input == nullptr) { unique_input = n; } else if (unique_input != n) { unique_input = top; break; } } - if (unique_input != NULL && unique_input != top) { + if (unique_input != nullptr && unique_input != top) { mem = unique_input; } else { done = true; @@ -511,7 +511,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType assert(false, "unexpected node"); } } - if (mem != NULL) { + if (mem != nullptr) { if (mem == start_mem || mem == alloc_mem) { // hit a sentinel, return appropriate 0 value return _igvn.zerocon(ft); @@ -524,7 +524,7 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType // attempt to produce a Phi reflecting the values on the input paths of the Phi Node_Stack value_phis(8); Node* phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit); - if (phi != NULL) { + if (phi != nullptr) { return phi; } else { // Kill all new Phis @@ -546,27 +546,27 @@ Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType } } // Something go wrong. - return NULL; + return nullptr; } // Check the possibility of scalar replacement. bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray & safepoints) { // Scan the uses of the allocation to check for anything that would // prevent us from eliminating it. - NOT_PRODUCT( const char* fail_eliminate = NULL; ) - DEBUG_ONLY( Node* disq_node = NULL; ) + NOT_PRODUCT( const char* fail_eliminate = nullptr; ) + DEBUG_ONLY( Node* disq_node = nullptr; ) bool can_eliminate = true; Node* res = alloc->result_cast(); - const TypeOopPtr* res_type = NULL; - if (res == NULL) { + const TypeOopPtr* res_type = nullptr; + if (res == nullptr) { // All users were eliminated. } else if (!res->is_CheckCastPP()) { NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) can_eliminate = false; } else { res_type = _igvn.type(res)->isa_oopptr(); - if (res_type == NULL) { + if (res_type == nullptr) { NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) can_eliminate = false; } else if (res_type->isa_aryptr()) { @@ -578,7 +578,7 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr } } - if (can_eliminate && res != NULL) { + if (can_eliminate && res != nullptr) { BarrierSetC2 *bs = BarrierSet::barrier_set()->barrier_set_c2(); for (DUIterator_Fast jmax, j = res->fast_outs(jmax); j < jmax && can_eliminate; j++) { @@ -622,9 +622,9 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr can_eliminate = false; } Node* sfptMem = sfpt->memory(); - if (sfptMem == NULL || sfptMem->is_top()) { + if (sfptMem == nullptr || sfptMem->is_top()) { DEBUG_ONLY(disq_node = use;) - NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";) + NOT_PRODUCT(fail_eliminate = "null or TOP memory";) can_eliminate = false; } else { safepoints.append_if_missing(sfpt); @@ -654,18 +654,18 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr if (PrintEliminateAllocations) { if (can_eliminate) { tty->print("Scalar "); - if (res == NULL) + if (res == nullptr) alloc->dump(); else res->dump(); } else if (alloc->_is_scalar_replaceable) { tty->print("NotScalar (%s)", fail_eliminate); - if (res == NULL) + if (res == nullptr) alloc->dump(); else res->dump(); #ifdef ASSERT - if (disq_node != NULL) { + if (disq_node != nullptr) { tty->print(" >>>> "); disq_node->dump(); } @@ -680,21 +680,21 @@ bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArr bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray & safepoints) { GrowableArray safepoints_done; - ciInstanceKlass* iklass = NULL; + ciInstanceKlass* iklass = nullptr; int nfields = 0; int array_base = 0; int element_size = 0; BasicType basic_elem_type = T_ILLEGAL; - const Type* field_type = NULL; + const Type* field_type = nullptr; Node* res = alloc->result_cast(); - assert(res == NULL || res->is_CheckCastPP(), "unexpected AllocateNode result"); - const TypeOopPtr* res_type = NULL; - if (res != NULL) { // Could be NULL when there are no users + assert(res == nullptr || res->is_CheckCastPP(), "unexpected AllocateNode result"); + const TypeOopPtr* res_type = nullptr; + if (res != nullptr) { // Could be null when there are no users res_type = _igvn.type(res)->isa_oopptr(); } - if (res != NULL) { + if (res != nullptr) { if (res_type->isa_instptr()) { // find the fields of the class which will be needed for safepoint debug information iklass = res_type->is_instptr()->instance_klass(); @@ -716,7 +716,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray memory(); Node* ctl = sfpt->control(); - assert(sfpt->jvms() != NULL, "missed JVMS"); + assert(sfpt->jvms() != nullptr, "missed JVMS"); // Fields of scalar objs are referenced only at the end // of regular debuginfo at the last (youngest) JVMS. // Record relative start index. @@ -732,8 +732,8 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray nonstatic_field_at(j); offset = field->offset(); ciType* elem_type = field->type(); @@ -743,12 +743,12 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray is_loaded()) { field_type = TypeInstPtr::BOTTOM; - } else if (field != NULL && field->is_static_constant()) { + } else if (field != nullptr && field->is_static_constant()) { ciObject* con = field->constant_value().as_object(); // Do not "join" in the previous type; it doesn't add value, // and may yield a vacuous result if the field is of interface type. field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); - assert(field_type != NULL, "field singleton type must be consistent"); + assert(field_type != nullptr, "field singleton type must be consistent"); } else { field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); } @@ -766,7 +766,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray add_offset(offset)->isa_oopptr(); Node *field_val = value_from_mem(mem, ctl, basic_elem_type, field_type, field_addr_type, alloc); - if (field_val == NULL) { + if (field_val == nullptr) { // We weren't able to find a value for this field, // give up on eliminating this allocation. @@ -804,7 +804,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray print("=== At SafePoint node %d can't find value of Field: ", sfpt->_idx); field->print(); @@ -815,7 +815,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray _idx, j); } tty->print(", which prevents elimination of: "); - if (res == NULL) + if (res == nullptr) alloc->dump(); else res->dump(); @@ -850,10 +850,10 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray proj_out_or_null(TypeFunc::Control); Node* mem_proj = n->proj_out_or_null(TypeFunc::Memory); - if (ctl_proj != NULL) { + if (ctl_proj != nullptr) { igvn.replace_node(ctl_proj, n->in(0)); } - if (mem_proj != NULL) { + if (mem_proj != nullptr) { igvn.replace_node(mem_proj, n->in(TypeFunc::Memory)); } } @@ -861,7 +861,7 @@ static void disconnect_projections(MultiNode* n, PhaseIterGVN& igvn) { // Process users of eliminated allocation. void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { Node* res = alloc->result_cast(); - if (res != NULL) { + if (res != nullptr) { for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { Node *use = res->last_out(j); uint oc1 = res->outcnt(); @@ -940,7 +940,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { // // Process other users of allocation's projections // - if (_callprojs.resproj != NULL && _callprojs.resproj->outcnt() != 0) { + if (_callprojs.resproj != nullptr && _callprojs.resproj->outcnt() != 0) { // First disconnect stores captured by Initialize node. // If Initialize node is eliminated first in the following code, // it will kill such stores and DUIterator_Last will assert. @@ -960,16 +960,16 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { InitializeNode *init = use->as_Initialize(); assert(init->outcnt() <= 2, "only a control and memory projection expected"); Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control); - if (ctrl_proj != NULL) { + if (ctrl_proj != nullptr) { _igvn.replace_node(ctrl_proj, init->in(TypeFunc::Control)); #ifdef ASSERT - // If the InitializeNode has no memory out, it will die, and tmp will become NULL + // If the InitializeNode has no memory out, it will die, and tmp will become null Node* tmp = init->in(TypeFunc::Control); - assert(tmp == NULL || tmp == _callprojs.fallthrough_catchproj, "allocation control projection"); + assert(tmp == nullptr || tmp == _callprojs.fallthrough_catchproj, "allocation control projection"); #endif } Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory); - if (mem_proj != NULL) { + if (mem_proj != nullptr) { Node *mem = init->in(TypeFunc::Memory); #ifdef ASSERT if (mem->is_MergeMem()) { @@ -986,22 +986,22 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) { j -= (oc1 - _callprojs.resproj->outcnt()); } } - if (_callprojs.fallthrough_catchproj != NULL) { + if (_callprojs.fallthrough_catchproj != nullptr) { _igvn.replace_node(_callprojs.fallthrough_catchproj, alloc->in(TypeFunc::Control)); } - if (_callprojs.fallthrough_memproj != NULL) { + if (_callprojs.fallthrough_memproj != nullptr) { _igvn.replace_node(_callprojs.fallthrough_memproj, alloc->in(TypeFunc::Memory)); } - if (_callprojs.catchall_memproj != NULL) { + if (_callprojs.catchall_memproj != nullptr) { _igvn.replace_node(_callprojs.catchall_memproj, C->top()); } - if (_callprojs.fallthrough_ioproj != NULL) { + if (_callprojs.fallthrough_ioproj != nullptr) { _igvn.replace_node(_callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O)); } - if (_callprojs.catchall_ioproj != NULL) { + if (_callprojs.catchall_ioproj != nullptr) { _igvn.replace_node(_callprojs.catchall_ioproj, C->top()); } - if (_callprojs.catchall_catchproj != NULL) { + if (_callprojs.catchall_catchproj != nullptr) { _igvn.replace_node(_callprojs.catchall_catchproj, C->top()); } } @@ -1023,7 +1023,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { bool boxing_alloc = C->eliminate_boxing() && tklass->isa_instklassptr() && tklass->is_instklassptr()->instance_klass()->is_box_klass(); - if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != NULL))) { + if (!alloc->_is_scalar_replaceable && (!boxing_alloc || (res != nullptr))) { return false; } @@ -1035,7 +1035,7 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { } if (!alloc->_is_scalar_replaceable) { - assert(res == NULL, "sanity"); + assert(res == nullptr, "sanity"); // We can only eliminate allocation if all debug info references // are already replaced with SafePointScalarObject because // we can't search for a fields value without instance_id. @@ -1049,11 +1049,11 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { } CompileLog* log = C->log(); - if (log != NULL) { + if (log != nullptr) { log->head("eliminate_allocation type='%d'", log->identify(tklass->exact_klass())); JVMState* p = alloc->jvms(); - while (p != NULL) { + while (p != nullptr) { log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); p = p->caller(); } @@ -1076,25 +1076,25 @@ bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { bool PhaseMacroExpand::eliminate_boxing_node(CallStaticJavaNode *boxing) { // EA should remove all uses of non-escaping boxing node. - if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != NULL) { + if (!C->eliminate_boxing() || boxing->proj_out_or_null(TypeFunc::Parms) != nullptr) { return false; } - assert(boxing->result_cast() == NULL, "unexpected boxing node result"); + assert(boxing->result_cast() == nullptr, "unexpected boxing node result"); boxing->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); const TypeTuple* r = boxing->tf()->range(); assert(r->cnt() > TypeFunc::Parms, "sanity"); const TypeInstPtr* t = r->field_at(TypeFunc::Parms)->isa_instptr(); - assert(t != NULL, "sanity"); + assert(t != nullptr, "sanity"); CompileLog* log = C->log(); - if (log != NULL) { + if (log != nullptr) { log->head("eliminate_boxing type='%d'", log->identify(t->instance_klass())); JVMState* p = boxing->jvms(); - while (p != NULL) { + while (p != nullptr) { log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); p = p->caller(); } @@ -1126,7 +1126,7 @@ Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { Node* adr = basic_plus_adr(base, offset); - mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt, MemNode::unordered); + mem = StoreNode::make(_igvn, ctl, mem, adr, nullptr, value, bt, MemNode::unordered); transform_later(mem); return mem; } @@ -1199,15 +1199,15 @@ void PhaseMacroExpand::expand_allocate_common( Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); Node* klass_node = alloc->in(AllocateNode::KlassNode); Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); - assert(ctrl != NULL, "must have control"); + assert(ctrl != nullptr, "must have control"); // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. // they will not be used if "always_slow" is set enum { slow_result_path = 1, fast_result_path = 2 }; - Node *result_region = NULL; - Node *result_phi_rawmem = NULL; - Node *result_phi_rawoop = NULL; - Node *result_phi_i_o = NULL; + Node *result_region = nullptr; + Node *result_phi_rawmem = nullptr; + Node *result_phi_rawoop = nullptr; + Node *result_phi_i_o = nullptr; // The initial slow comparison is a size check, the comparison // we want to do is a BoolTest::gt @@ -1219,7 +1219,7 @@ void PhaseMacroExpand::expand_allocate_common( // 1 - always too big or negative assert(tv <= 1, "0 or 1 if a constant"); expand_fast_path = (tv == 0); - initial_slow_test = NULL; + initial_slow_test = nullptr; } else { initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); } @@ -1227,16 +1227,16 @@ void PhaseMacroExpand::expand_allocate_common( if (!UseTLAB) { // Force slow-path allocation expand_fast_path = false; - initial_slow_test = NULL; + initial_slow_test = nullptr; } - bool allocation_has_use = (alloc->result_cast() != NULL); + bool allocation_has_use = (alloc->result_cast() != nullptr); if (!allocation_has_use) { InitializeNode* init = alloc->initialization(); - if (init != NULL) { + if (init != nullptr) { init->remove(&_igvn); } - if (expand_fast_path && (initial_slow_test == NULL)) { + if (expand_fast_path && (initial_slow_test == nullptr)) { // Remove allocation node and return. // Size is a non-negative constant -> no initial check needed -> directly to fast path. // Also, no usages -> empty fast path -> no fall out to slow path -> nothing left. @@ -1244,7 +1244,7 @@ void PhaseMacroExpand::expand_allocate_common( if (PrintEliminateAllocations) { tty->print("NotUsed "); Node* res = alloc->proj_out_or_null(TypeFunc::Parms); - if (res != NULL) { + if (res != nullptr) { res->dump(); } else { alloc->dump(); @@ -1257,11 +1257,11 @@ void PhaseMacroExpand::expand_allocate_common( } enum { too_big_or_final_path = 1, need_gc_path = 2 }; - Node *slow_region = NULL; + Node *slow_region = nullptr; Node *toobig_false = ctrl; // generate the initial test if necessary - if (initial_slow_test != NULL ) { + if (initial_slow_test != nullptr ) { assert (expand_fast_path, "Only need test if there is a fast path"); slow_region = new RegionNode(3); @@ -1314,16 +1314,16 @@ void PhaseMacroExpand::expand_allocate_common( Node* fast_oop_ctrl; Node* fast_oop_rawmem; if (allocation_has_use) { - Node* needgc_ctrl = NULL; + Node* needgc_ctrl = nullptr; result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM); - intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines; + intx prefetch_lines = length != nullptr ? AllocatePrefetchLines : AllocateInstancePrefetchLines; BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); Node* fast_oop = bs->obj_allocate(this, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl, fast_oop_ctrl, fast_oop_rawmem, prefetch_lines); - if (initial_slow_test != NULL) { + if (initial_slow_test != nullptr) { // This completes all paths into the slow merge point slow_region->init_req(need_gc_path, needgc_ctrl); transform_later(slow_region); @@ -1342,7 +1342,7 @@ void PhaseMacroExpand::expand_allocate_common( result_phi_rawoop->init_req(fast_result_path, fast_oop); } else { - assert (initial_slow_test != NULL, "sanity"); + assert (initial_slow_test != nullptr, "sanity"); fast_oop_ctrl = toobig_false; fast_oop_rawmem = mem; transform_later(slow_region); @@ -1368,7 +1368,7 @@ void PhaseMacroExpand::expand_allocate_common( call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); call->init_req(TypeFunc::Parms+0, klass_node); - if (length != NULL) { + if (length != nullptr) { call->init_req(TypeFunc::Parms+1, length); } @@ -1378,7 +1378,7 @@ void PhaseMacroExpand::expand_allocate_common( // For array allocations, copy the valid length check to the call node so Compile::final_graph_reshaping() can verify // that the call has the expected number of CatchProj nodes (in case the allocation always fails and the fallthrough // path dies). - if (valid_length_test != NULL) { + if (valid_length_test != nullptr) { call->add_req(valid_length_test); } if (expand_fast_path) { @@ -1407,13 +1407,13 @@ void PhaseMacroExpand::expand_allocate_common( // the control and i_o paths. Replace the control memory projection with // result_phi_rawmem (unless we are only generating a slow call when // both memory projections are combined) - if (expand_fast_path && _callprojs.fallthrough_memproj != NULL) { + if (expand_fast_path && _callprojs.fallthrough_memproj != nullptr) { migrate_outs(_callprojs.fallthrough_memproj, result_phi_rawmem); } // Now change uses of catchall_memproj to use fallthrough_memproj and delete // catchall_memproj so we end up with a call that has only 1 memory projection. - if (_callprojs.catchall_memproj != NULL ) { - if (_callprojs.fallthrough_memproj == NULL) { + if (_callprojs.catchall_memproj != nullptr ) { + if (_callprojs.fallthrough_memproj == nullptr) { _callprojs.fallthrough_memproj = new ProjNode(call, TypeFunc::Memory); transform_later(_callprojs.fallthrough_memproj); } @@ -1426,13 +1426,13 @@ void PhaseMacroExpand::expand_allocate_common( // otherwise incoming i_o become dead when only a slow call is generated // (it is different from memory projections where both projections are // combined in such case). - if (_callprojs.fallthrough_ioproj != NULL) { + if (_callprojs.fallthrough_ioproj != nullptr) { migrate_outs(_callprojs.fallthrough_ioproj, result_phi_i_o); } // Now change uses of catchall_ioproj to use fallthrough_ioproj and delete // catchall_ioproj so we end up with a call that has only 1 i_o projection. - if (_callprojs.catchall_ioproj != NULL ) { - if (_callprojs.fallthrough_ioproj == NULL) { + if (_callprojs.catchall_ioproj != nullptr ) { + if (_callprojs.fallthrough_ioproj == nullptr) { _callprojs.fallthrough_ioproj = new ProjNode(call, TypeFunc::I_O); transform_later(_callprojs.fallthrough_ioproj); } @@ -1456,7 +1456,7 @@ void PhaseMacroExpand::expand_allocate_common( return; } - if (_callprojs.fallthrough_catchproj != NULL) { + if (_callprojs.fallthrough_catchproj != nullptr) { ctrl = _callprojs.fallthrough_catchproj->clone(); transform_later(ctrl); _igvn.replace_node(_callprojs.fallthrough_catchproj, result_region); @@ -1464,7 +1464,7 @@ void PhaseMacroExpand::expand_allocate_common( ctrl = top(); } Node *slow_result; - if (_callprojs.resproj == NULL) { + if (_callprojs.resproj == nullptr) { // no uses of the allocation result slow_result = top(); } else { @@ -1493,7 +1493,7 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { Node* i_o = alloc->in(TypeFunc::I_O); alloc->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); - if (_callprojs.resproj != NULL) { + if (_callprojs.resproj != nullptr) { for (DUIterator_Fast imax, i = _callprojs.resproj->fast_outs(imax); i < imax; i++) { Node* use = _callprojs.resproj->fast_out(i); use->isa_MemBar()->remove(&_igvn); @@ -1503,32 +1503,32 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) { assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted"); _igvn.remove_dead_node(_callprojs.resproj); } - if (_callprojs.fallthrough_catchproj != NULL) { + if (_callprojs.fallthrough_catchproj != nullptr) { migrate_outs(_callprojs.fallthrough_catchproj, ctrl); _igvn.remove_dead_node(_callprojs.fallthrough_catchproj); } - if (_callprojs.catchall_catchproj != NULL) { + if (_callprojs.catchall_catchproj != nullptr) { _igvn.rehash_node_delayed(_callprojs.catchall_catchproj); _callprojs.catchall_catchproj->set_req(0, top()); } - if (_callprojs.fallthrough_proj != NULL) { + if (_callprojs.fallthrough_proj != nullptr) { Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out(); _igvn.remove_dead_node(catchnode); _igvn.remove_dead_node(_callprojs.fallthrough_proj); } - if (_callprojs.fallthrough_memproj != NULL) { + if (_callprojs.fallthrough_memproj != nullptr) { migrate_outs(_callprojs.fallthrough_memproj, mem); _igvn.remove_dead_node(_callprojs.fallthrough_memproj); } - if (_callprojs.fallthrough_ioproj != NULL) { + if (_callprojs.fallthrough_ioproj != nullptr) { migrate_outs(_callprojs.fallthrough_ioproj, i_o); _igvn.remove_dead_node(_callprojs.fallthrough_ioproj); } - if (_callprojs.catchall_memproj != NULL) { + if (_callprojs.catchall_memproj != nullptr) { _igvn.rehash_node_delayed(_callprojs.catchall_memproj); _callprojs.catchall_memproj->set_req(0, top()); } - if (_callprojs.catchall_ioproj != NULL) { + if (_callprojs.catchall_ioproj != nullptr) { _igvn.rehash_node_delayed(_callprojs.catchall_ioproj); _callprojs.catchall_ioproj->set_req(0, top()); } @@ -1562,8 +1562,8 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN // not escape. if (!alloc->does_not_escape_thread() && !alloc->is_allocation_MemBar_redundant() && - (init == NULL || !init->is_complete_with_arraycopy())) { - if (init == NULL || init->req() < InitializeNode::RawStores) { + (init == nullptr || !init->is_complete_with_arraycopy())) { + if (init == nullptr || init->req() < InitializeNode::RawStores) { // No InitializeNode or no stores captured by zeroing // elimination. Simply add the MemBarStoreStore after object // initialization. @@ -1606,10 +1606,10 @@ void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeN // All nodes that depended on the InitializeNode for control // and memory must now depend on the MemBarNode that itself // depends on the InitializeNode - if (init_ctrl != NULL) { + if (init_ctrl != nullptr) { _igvn.replace_node(init_ctrl, ctrl); } - if (init_mem != NULL) { + if (init_mem != nullptr) { _igvn.replace_node(init_mem, mem); } } @@ -1665,7 +1665,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, int header_size = alloc->minimum_header_size(); // conservatively small // Array length - if (length != NULL) { // Arrays need length field + if (length != nullptr) { // Arrays need length field rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); // conservatively small header size: header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); @@ -1679,7 +1679,7 @@ PhaseMacroExpand::initialize_object(AllocateNode* alloc, } // Clear the object body, if necessary. - if (init == NULL) { + if (init == nullptr) { // The init has somehow disappeared; be cautious and clear everything. // // This can happen if a node is allocated but an uncommon trap occurs @@ -1865,9 +1865,9 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { - expand_allocate_common(alloc, NULL, + expand_allocate_common(alloc, nullptr, OptoRuntime::new_instance_Type(), - OptoRuntime::new_instance_Java(), NULL); + OptoRuntime::new_instance_Java(), nullptr); } void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { @@ -1877,8 +1877,8 @@ void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { Node* klass_node = alloc->in(AllocateNode::KlassNode); const TypeAryKlassPtr* ary_klass_t = _igvn.type(klass_node)->isa_aryklassptr(); address slow_call_address; // Address of slow call - if (init != NULL && init->is_complete_with_arraycopy() && - ary_klass_t && ary_klass_t->elem()->isa_klassptr() == NULL) { + if (init != nullptr && init->is_complete_with_arraycopy() && + ary_klass_t && ary_klass_t->elem()->isa_klassptr() == nullptr) { // Don't zero type array during slow allocation in VM since // it will be initialized later by arraycopy in compiled code. slow_call_address = OptoRuntime::new_array_nozero_Java(); @@ -1911,7 +1911,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) { // eliminated even if different objects are referenced in one locked region // (for example, OSR compilation of nested loop inside locked scope). if (EliminateNestedLocks || - oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj, NULL)) { + oldbox->as_BoxLock()->is_simple_lock_region(nullptr, obj, nullptr)) { // Box is used only in one lock region. Mark this box as eliminated. _igvn.hash_delete(oldbox); oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value @@ -2003,7 +2003,7 @@ void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) { } else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened // Only Lock node has JVMState needed here. // Not that preceding claim is documented anywhere else. - if (alock->jvms() != NULL) { + if (alock->jvms() != nullptr) { if (alock->as_Lock()->is_nested_lock_region()) { // Mark eliminated related nested locks and unlocks. Node* obj = alock->obj_node(); @@ -2031,7 +2031,7 @@ void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) { } else { #ifdef ASSERT alock->log_lock_optimization(C, "eliminate_lock_NOT_nested_lock_region"); - if (C->log() != NULL) + if (C->log() != nullptr) alock->as_Lock()->is_nested_lock_region(C); // rerun for debugging output #endif } @@ -2089,14 +2089,14 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { Node* mem = alock->in(TypeFunc::Memory); Node* ctrl = alock->in(TypeFunc::Control); - guarantee(ctrl != NULL, "missing control projection, cannot replace_node() with NULL"); + guarantee(ctrl != nullptr, "missing control projection, cannot replace_node() with null"); alock->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); // There are 2 projections from the lock. The lock node will // be deleted when its last use is subsumed below. assert(alock->outcnt() == 2 && - _callprojs.fallthrough_proj != NULL && - _callprojs.fallthrough_memproj != NULL, + _callprojs.fallthrough_proj != nullptr && + _callprojs.fallthrough_memproj != nullptr, "Unexpected projections from Lock/Unlock"); Node* fallthroughproj = _callprojs.fallthrough_proj; @@ -2108,7 +2108,7 @@ bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { if (alock->is_Lock()) { // Search for MemBarAcquireLock node and delete it also. MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); - assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, ""); + assert(membar != nullptr && membar->Opcode() == Op_MemBarAcquireLock, ""); Node* ctrlproj = membar->proj_out(TypeFunc::Control); Node* memproj = membar->proj_out(TypeFunc::Memory); _igvn.replace_node(ctrlproj, fallthroughproj); @@ -2168,8 +2168,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) { // Make slow path call CallNode *call = make_slow_call((CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), - OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, - obj, box, NULL); + OptoRuntime::complete_monitor_locking_Java(), nullptr, slow_path, + obj, box, nullptr); call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); @@ -2177,8 +2177,8 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) { // de-opted. So the compiler thinks the slow-call can never throw an // exception. If it DOES throw an exception we would need the debug // info removed first (since if it throws there is no monitor). - assert(_callprojs.fallthrough_ioproj == NULL && _callprojs.catchall_ioproj == NULL && - _callprojs.catchall_memproj == NULL && _callprojs.catchall_catchproj == NULL, "Unexpected projection from Lock"); + assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr && + _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock"); // Capture slow path // disconnect fall-through projection from call and create a new one @@ -2232,8 +2232,8 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { "complete_monitor_unlocking_C", slow_path, obj, box, thread); call->extract_projections(&_callprojs, false /*separate_io_proj*/, false /*do_asserts*/); - assert(_callprojs.fallthrough_ioproj == NULL && _callprojs.catchall_ioproj == NULL && - _callprojs.catchall_memproj == NULL && _callprojs.catchall_catchproj == NULL, "Unexpected projection from Lock"); + assert(_callprojs.fallthrough_ioproj == nullptr && _callprojs.catchall_ioproj == nullptr && + _callprojs.catchall_memproj == nullptr && _callprojs.catchall_catchproj == nullptr, "Unexpected projection from Lock"); // No exceptions for unlocking // Capture slow path @@ -2257,7 +2257,7 @@ void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { } void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) { - assert(check->in(SubTypeCheckNode::Control) == NULL, "should be pinned"); + assert(check->in(SubTypeCheckNode::Control) == nullptr, "should be pinned"); Node* bol = check->unique_out(); Node* obj_or_subklass = check->in(SubTypeCheckNode::ObjOrSubKlass); Node* superklass = check->in(SubTypeCheckNode::SuperKlass); @@ -2276,15 +2276,15 @@ void PhaseMacroExpand::expand_subtypecheck_node(SubTypeCheckNode *check) { Node* iffalse = iff->as_If()->proj_out(0); Node* ctrl = iff->in(0); - Node* subklass = NULL; + Node* subklass = nullptr; if (_igvn.type(obj_or_subklass)->isa_klassptr()) { subklass = obj_or_subklass; } else { Node* k_adr = basic_plus_adr(obj_or_subklass, oopDesc::klass_offset_in_bytes()); - subklass = _igvn.transform(LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), k_adr, TypeInstPtr::KLASS)); + subklass = _igvn.transform(LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), k_adr, TypeInstPtr::KLASS)); } - Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, NULL, _igvn); + Node* not_subtype_ctrl = Phase::gen_subtype_check(subklass, superklass, &ctrl, nullptr, _igvn); _igvn.replace_input_of(iff, 0, C->top()); _igvn.replace_node(iftrue, not_subtype_ctrl); @@ -2429,7 +2429,7 @@ bool PhaseMacroExpand::expand_macro_nodes() { (bol->_test._test == BoolTest::ne), ""); IfNode* ifn = bol->unique_out()->as_If(); assert((ifn->outcnt() == 2) && - ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != NULL, ""); + ifn->proj_out(1)->is_uncommon_trap_proj(Deoptimization::Reason_rtm_state_change) != nullptr, ""); #endif Node* repl = n->in(1); if (!_has_locks) { @@ -2486,7 +2486,7 @@ bool PhaseMacroExpand::expand_macro_nodes() { int macro_count = C->macro_count(); Node * n = C->macro_node(macro_count-1); assert(n->is_macro(), "only macro nodes expected here"); - if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) { + if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) { // node is unreachable, so don't try to expand it C->remove_macro_node(n); continue; @@ -2539,7 +2539,7 @@ bool PhaseMacroExpand::expand_macro_nodes() { int macro_count = C->macro_count(); Node * n = C->macro_node(macro_count-1); assert(n->is_macro(), "only macro nodes expected here"); - if (_igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) { + if (_igvn.type(n) == Type::TOP || (n->in(0) != nullptr && n->in(0)->is_top())) { // node is unreachable, so don't try to expand it C->remove_macro_node(n); continue; @@ -2594,7 +2594,7 @@ int PhaseMacroExpand::count_MemBar(Compile *C) { } Unique_Node_List ideal_nodes; int total = 0; - ideal_nodes.map(C->live_nodes(), NULL); + ideal_nodes.map(C->live_nodes(), nullptr); ideal_nodes.push(C->root()); for (uint next = 0; next < ideal_nodes.size(); ++next) { Node* n = ideal_nodes.at(next); diff --git a/src/hotspot/share/opto/macro.hpp b/src/hotspot/share/opto/macro.hpp index 39de431fea7..239c2af7143 100644 --- a/src/hotspot/share/opto/macro.hpp +++ b/src/hotspot/share/opto/macro.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,10 +67,10 @@ public: const TypeFunc* call_type, address call_addr, const char* call_name, const TypePtr* adr_type, - Node* parm0 = NULL, Node* parm1 = NULL, - Node* parm2 = NULL, Node* parm3 = NULL, - Node* parm4 = NULL, Node* parm5 = NULL, - Node* parm6 = NULL, Node* parm7 = NULL); + Node* parm0 = nullptr, Node* parm1 = nullptr, + Node* parm2 = nullptr, Node* parm3 = nullptr, + Node* parm4 = nullptr, Node* parm5 = nullptr, + Node* parm6 = nullptr, Node* parm7 = nullptr); address basictype2arraycopy(BasicType t, Node* src_offset, @@ -111,7 +111,7 @@ private: void expand_unlock_node(UnlockNode *unlock); // More helper methods modeled after GraphKit for array copy - void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = NULL); + void insert_mem_bar(Node** ctrl, Node** mem, int opcode, Node* precedent = nullptr); Node* array_element_address(Node* ary, Node* idx, BasicType elembt); Node* ConvI2L(Node* offset); @@ -139,7 +139,7 @@ private: Node* copy_length, bool disjoint_bases = false, bool length_never_negative = false, - RegionNode* slow_region = NULL); + RegionNode* slow_region = nullptr); void generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, const TypePtr* adr_type, Node* dest, diff --git a/src/hotspot/share/opto/macroArrayCopy.cpp b/src/hotspot/share/opto/macroArrayCopy.cpp index 83a6a2d6fa1..eb4fca426da 100644 --- a/src/hotspot/share/opto/macroArrayCopy.cpp +++ b/src/hotspot/share/opto/macroArrayCopy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -83,17 +83,17 @@ Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem, call->init_req(TypeFunc::ReturnAdr, top()); call->init_req(TypeFunc::FramePtr, top()); - // Hook each parm in order. Stop looking at the first NULL. - if (parm0 != NULL) { call->init_req(TypeFunc::Parms+0, parm0); - if (parm1 != NULL) { call->init_req(TypeFunc::Parms+1, parm1); - if (parm2 != NULL) { call->init_req(TypeFunc::Parms+2, parm2); - if (parm3 != NULL) { call->init_req(TypeFunc::Parms+3, parm3); - if (parm4 != NULL) { call->init_req(TypeFunc::Parms+4, parm4); - if (parm5 != NULL) { call->init_req(TypeFunc::Parms+5, parm5); - if (parm6 != NULL) { call->init_req(TypeFunc::Parms+6, parm6); - if (parm7 != NULL) { call->init_req(TypeFunc::Parms+7, parm7); + // Hook each parm in order. Stop looking at the first null. + if (parm0 != nullptr) { call->init_req(TypeFunc::Parms+0, parm0); + if (parm1 != nullptr) { call->init_req(TypeFunc::Parms+1, parm1); + if (parm2 != nullptr) { call->init_req(TypeFunc::Parms+2, parm2); + if (parm3 != nullptr) { call->init_req(TypeFunc::Parms+3, parm3); + if (parm4 != nullptr) { call->init_req(TypeFunc::Parms+4, parm4); + if (parm5 != nullptr) { call->init_req(TypeFunc::Parms+5, parm5); + if (parm6 != nullptr) { call->init_req(TypeFunc::Parms+6, parm6); + if (parm7 != nullptr) { call->init_req(TypeFunc::Parms+7, parm7); /* close each nested if ===> */ } } } } } } } } - assert(call->in(call->req()-1) != NULL, "must initialize all parms"); + assert(call->in(call->req()-1) != nullptr, "must initialize all parms"); return call; } @@ -106,19 +106,19 @@ Node* PhaseMacroExpand::make_leaf_call(Node* ctrl, Node* mem, // In all cases, GraphKit::control() is updated to the fast path. // The returned value represents the control for the slow path. // The return value is never 'top'; it is either a valid control -// or NULL if it is obvious that the slow path can never be taken. -// Also, if region and the slow control are not NULL, the slow edge +// or null if it is obvious that the slow path can never be taken. +// Also, if region and the slow control are not null, the slow edge // is appended to the region. Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* region, float true_prob) { if ((*ctrl)->is_top()) { // Already short circuited. - return NULL; + return nullptr; } // Build an if node and its projections. // If test is true we take the slow path, which we assume is uncommon. if (_igvn.type(test) == TypeInt::ZERO) { // The slow branch is never taken. No need to build this guard. - return NULL; + return nullptr; } IfNode* iff = new IfNode(*ctrl, test, true_prob, COUNT_UNKNOWN); @@ -127,7 +127,7 @@ Node* PhaseMacroExpand::generate_guard(Node** ctrl, Node* test, RegionNode* regi Node* if_slow = new IfTrueNode(iff); transform_later(if_slow); - if (region != NULL) { + if (region != nullptr) { region->add_req(if_slow); } @@ -199,11 +199,11 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode RegionNode** exit_block, Node** result_memory, Node* length, Node* src_start, Node* dst_start, BasicType type) { const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr(); - Node* inline_block = NULL; - Node* stub_block = NULL; + Node* inline_block = nullptr; + Node* stub_block = nullptr; int const_len = -1; - const TypeInt* lty = NULL; + const TypeInt* lty = nullptr; uint shift = exact_log2(type2aelembytes(type)); if (length->Opcode() == Op_ConvI2L) { lty = _igvn.type(length->in(1))->isa_int(); @@ -234,7 +234,7 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode transform_later(cmp_le); Node* bol_le = new BoolNode(cmp_le, BoolTest::le); transform_later(bol_le); - inline_block = generate_guard(ctrl, bol_le, NULL, PROB_FAIR); + inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR); stub_block = *ctrl; Node* mask_gen = VectorMaskGenNode::make(casted_length, type); @@ -269,16 +269,16 @@ void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode Node* PhaseMacroExpand::generate_nonpositive_guard(Node** ctrl, Node* index, bool never_negative) { - if ((*ctrl)->is_top()) return NULL; + if ((*ctrl)->is_top()) return nullptr; if (_igvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] - return NULL; // index is already adequately typed + return nullptr; // index is already adequately typed Node* cmp_le = new CmpINode(index, intcon(0)); transform_later(cmp_le); BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); Node* bol_le = new BoolNode(cmp_le, le_or_eq); transform_later(bol_le); - Node* is_notp = generate_guard(ctrl, bol_le, NULL, PROB_MIN); + Node* is_notp = generate_guard(ctrl, bol_le, nullptr, PROB_MIN); return is_notp; } @@ -318,8 +318,8 @@ address PhaseMacroExpand::basictype2arraycopy(BasicType t, // or they are identical (which we can treat as disjoint.) We can also // treat a copy with a destination index less that the source index // as disjoint since a low->high copy will work correctly in this case. - if (src_offset_inttype != NULL && src_offset_inttype->is_con() && - dest_offset_inttype != NULL && dest_offset_inttype->is_con()) { + if (src_offset_inttype != nullptr && src_offset_inttype->is_con() && + dest_offset_inttype != nullptr && dest_offset_inttype->is_con()) { // both indices are constants int s_offs = src_offset_inttype->get_con(); int d_offs = dest_offset_inttype->get_con(); @@ -327,7 +327,7 @@ address PhaseMacroExpand::basictype2arraycopy(BasicType t, aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0); if (s_offs >= d_offs) disjoint = true; - } else if (src_offset == dest_offset && src_offset != NULL) { + } else if (src_offset == dest_offset && src_offset != nullptr) { // This can occur if the offsets are identical non-constants. disjoint = true; } @@ -380,7 +380,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* bool disjoint_bases, bool length_never_negative, RegionNode* slow_region) { - if (slow_region == NULL) { + if (slow_region == nullptr) { slow_region = new RegionNode(1); transform_later(slow_region); } @@ -398,7 +398,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* && !(UseTLAB && ZeroTLAB) // pointless if already zeroed && basic_elem_type != T_CONFLICT // avoid corner case && !src->eqv_uncast(dest) - && alloc != NULL + && alloc != nullptr && _igvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0) { assert(ac->is_alloc_tightly_coupled(), "sanity"); // acopy to uninitialized tightly coupled allocations @@ -423,7 +423,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* } } else { // No zeroing elimination needed here. - alloc = NULL; + alloc = nullptr; acopy_to_uninitialized = false; //original_dest = dest; //dest_needs_zeroing = false; @@ -455,9 +455,9 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // Checked control path: Node* checked_control = top(); - Node* checked_mem = NULL; - Node* checked_i_o = NULL; - Node* checked_value = NULL; + Node* checked_mem = nullptr; + Node* checked_i_o = nullptr; + Node* checked_value = nullptr; if (basic_elem_type == T_CONFLICT) { assert(!dest_needs_zeroing, ""); @@ -465,7 +465,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* adr_type, src, src_offset, dest, dest_offset, copy_length, acopy_to_uninitialized); - if (cv == NULL) cv = intcon(-1); // failure (no stub available) + if (cv == nullptr) cv = intcon(-1); // failure (no stub available) checked_control = *ctrl; checked_i_o = *io; checked_mem = mem->memory_at(alias_idx); @@ -474,7 +474,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* } Node* not_pos = generate_nonpositive_guard(ctrl, copy_length, length_never_negative); - if (not_pos != NULL) { + if (not_pos != nullptr) { Node* local_ctrl = not_pos, *local_io = *io; MergeMemNode* local_mem = MergeMemNode::make(mem); transform_later(local_mem); @@ -495,7 +495,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // Clear the whole thing since there are no source elements to copy. generate_clear_array(local_ctrl, local_mem, adr_type, dest, basic_elem_type, - intcon(0), NULL, + intcon(0), nullptr, alloc->in(AllocateNode::AllocSize)); // Use a secondary InitializeNode as raw memory barrier. // Currently it is needed only on this path since other @@ -533,7 +533,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* generate_clear_array(*ctrl, mem, adr_type, dest, basic_elem_type, intcon(0), dest_offset, - NULL); + nullptr); } // Next, perform a dynamic check on the tail length. @@ -541,16 +541,16 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // There are two wins: Avoid generating the ClearArray // with its attendant messy index arithmetic, and upgrade // the copy to a more hardware-friendly word size of 64 bits. - Node* tail_ctl = NULL; + Node* tail_ctl = nullptr; if (!(*ctrl)->is_top() && !dest_tail->eqv_uncast(dest_length)) { Node* cmp_lt = transform_later( new CmpINode(dest_tail, dest_length) ); Node* bol_lt = transform_later( new BoolNode(cmp_lt, BoolTest::lt) ); - tail_ctl = generate_slow_guard(ctrl, bol_lt, NULL); - assert(tail_ctl != NULL || !(*ctrl)->is_top(), "must be an outcome"); + tail_ctl = generate_slow_guard(ctrl, bol_lt, nullptr); + assert(tail_ctl != nullptr || !(*ctrl)->is_top(), "must be an outcome"); } // At this point, let's assume there is no tail. - if (!(*ctrl)->is_top() && alloc != NULL && basic_elem_type != T_OBJECT) { + if (!(*ctrl)->is_top() && alloc != nullptr && basic_elem_type != T_OBJECT) { // There is no tail. Try an upgrade to a 64-bit copy. bool didit = false; { @@ -575,13 +575,13 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* } // Clear the tail, if any. - if (tail_ctl != NULL) { - Node* notail_ctl = (*ctrl)->is_top() ? NULL : *ctrl; + if (tail_ctl != nullptr) { + Node* notail_ctl = (*ctrl)->is_top() ? nullptr : *ctrl; *ctrl = tail_ctl; - if (notail_ctl == NULL) { + if (notail_ctl == nullptr) { generate_clear_array(*ctrl, mem, adr_type, dest, basic_elem_type, - dest_tail, NULL, + dest_tail, nullptr, dest_size); } else { // Make a local merge. @@ -591,7 +591,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* done_mem->init_req(1, mem->memory_at(alias_idx)); generate_clear_array(*ctrl, mem, adr_type, dest, basic_elem_type, - dest_tail, NULL, + dest_tail, nullptr, dest_size); done_ctl->init_req(2, *ctrl); done_mem->init_req(2, mem->memory_at(alias_idx)); @@ -620,7 +620,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* Node* src_klass = ac->in(ArrayCopyNode::SrcKlass); Node* dest_klass = ac->in(ArrayCopyNode::DestKlass); - assert(src_klass != NULL && dest_klass != NULL, "should have klasses"); + assert(src_klass != nullptr && dest_klass != nullptr, "should have klasses"); // Generate the subtype check. // This might fold up statically, or then again it might not. @@ -643,14 +643,14 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // (At this point we can assume disjoint_bases, since types differ.) int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); Node* p1 = basic_plus_adr(dest_klass, ek_offset); - Node* n1 = LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); + Node* n1 = LoadKlassNode::make(_igvn, nullptr, C->immutable_memory(), p1, TypeRawPtr::BOTTOM); Node* dest_elem_klass = transform_later(n1); Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem, adr_type, dest_elem_klass, src, src_offset, dest, dest_offset, ConvI2X(copy_length), acopy_to_uninitialized); - if (cv == NULL) cv = intcon(-1); // failure (no stub available) + if (cv == nullptr) cv = intcon(-1); // failure (no stub available) checked_control = local_ctrl; checked_i_o = *io; checked_mem = local_mem->memory_at(alias_idx); @@ -660,7 +660,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // At this point we know we do not need type checks on oop stores. BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); - if (!bs->array_copy_requires_gc_barriers(alloc != NULL, copy_type, false, false, BarrierSetC2::Expansion)) { + if (!bs->array_copy_requires_gc_barriers(alloc != nullptr, copy_type, false, false, BarrierSetC2::Expansion)) { // If we do not need gc barriers, copy using the jint or jlong stub. copy_type = LP64_ONLY(UseCompressedOops ? T_INT : T_LONG) NOT_LP64(T_INT); assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type), @@ -686,7 +686,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* } // Here are all the slow paths up to this point, in one bundle: - assert(slow_region != NULL, "allocated on entry"); + assert(slow_region != nullptr, "allocated on entry"); slow_control = slow_region; DEBUG_ONLY(slow_region = (RegionNode*)badAddress); @@ -729,7 +729,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* slow_i_o = slow_i_o2; slow_mem = slow_mem2; - if (alloc != NULL) { + if (alloc != nullptr) { // We'll restart from the very beginning, after zeroing the whole thing. // This can cause double writes, but that's OK since dest is brand new. // So we ignore the low 31 bits of the value returned from the stub. @@ -769,7 +769,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* if (dest_needs_zeroing) { generate_clear_array(local_ctrl, local_mem, adr_type, dest, basic_elem_type, - intcon(0), NULL, + intcon(0), nullptr, alloc->in(AllocateNode::AllocSize)); } @@ -789,7 +789,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // Remove unused edges. for (uint i = 1; i < result_region->req(); i++) { - if (result_region->in(i) == NULL) { + if (result_region->in(i) == nullptr) { result_region->init_req(i, top()); } } @@ -801,7 +801,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // mem no longer guaranteed to stay a MergeMemNode Node* out_mem = mem; - DEBUG_ONLY(mem = NULL); + DEBUG_ONLY(mem = nullptr); // The memory edges above are precise in order to model effects around // array copies accurately to allow value numbering of field loads around @@ -815,7 +815,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // the membar also. // // Do not let reads from the cloned object float above the arraycopy. - if (alloc != NULL && !alloc->initialization()->does_not_escape()) { + if (alloc != nullptr && !alloc->initialization()->does_not_escape()) { // Do not let stores that initialize this object be reordered with // a subsequent store that would make this object accessible by // other threads. @@ -831,7 +831,7 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* } _igvn.replace_node(_callprojs.fallthrough_memproj, out_mem); - if (_callprojs.fallthrough_ioproj != NULL) { + if (_callprojs.fallthrough_ioproj != nullptr) { _igvn.replace_node(_callprojs.fallthrough_ioproj, *io); } _igvn.replace_node(_callprojs.fallthrough_catchproj, *ctrl); @@ -839,9 +839,9 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* #ifdef ASSERT const TypeOopPtr* dest_t = _igvn.type(dest)->is_oopptr(); if (dest_t->is_known_instance() && !is_partial_array_copy) { - ArrayCopyNode* ac = NULL; + ArrayCopyNode* ac = nullptr; assert(ArrayCopyNode::may_modify(dest_t, (*ctrl)->in(0)->as_MemBar(), &_igvn, ac), "dependency on arraycopy lost"); - assert(ac == NULL, "no arraycopy anymore"); + assert(ac == nullptr, "no arraycopy anymore"); } #endif @@ -865,12 +865,12 @@ Node* PhaseMacroExpand::generate_arraycopy(ArrayCopyNode *ac, AllocateArrayNode* // dest oop of the destination array // basic_elem_type element type of the destination // slice_idx array index of first element to store -// slice_len number of elements to store (or NULL) +// slice_len number of elements to store (or null) // dest_size total size in bytes of the array object // -// Exactly one of slice_len or dest_size must be non-NULL. -// If dest_size is non-NULL, zeroing extends to the end of the object. -// If slice_len is non-NULL, the slice_idx value must be a constant. +// Exactly one of slice_len or dest_size must be non-null. +// If dest_size is non-null, zeroing extends to the end of the object. +// If slice_len is non-null, the slice_idx value must be a constant. void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, const TypePtr* adr_type, Node* dest, @@ -879,9 +879,9 @@ void PhaseMacroExpand::generate_clear_array(Node* ctrl, MergeMemNode* merge_mem, Node* slice_len, Node* dest_size) { // one or the other but not both of slice_len and dest_size: - assert((slice_len != NULL? 1: 0) + (dest_size != NULL? 1: 0) == 1, ""); - if (slice_len == NULL) slice_len = top(); - if (dest_size == NULL) dest_size = top(); + assert((slice_len != nullptr? 1: 0) + (dest_size != nullptr? 1: 0) == 1, ""); + if (slice_len == nullptr) slice_len = top(); + if (dest_size == nullptr) dest_size = top(); uint alias_idx = C->get_alias_index(adr_type); @@ -1041,10 +1041,10 @@ bool PhaseMacroExpand::generate_block_arraycopy(Node** ctrl, MergeMemNode** mem, countx = transform_later(new SubXNode(countx, MakeConX(dest_off))); countx = transform_later(new URShiftXNode(countx, intcon(LogBytesPerLong))); - bool disjoint_bases = true; // since alloc != NULL + bool disjoint_bases = true; // since alloc isn't null generate_unchecked_arraycopy(ctrl, mem, adr_type, T_LONG, disjoint_bases, - sptr, NULL, dptr, NULL, countx, dest_uninitialized); + sptr, nullptr, dptr, nullptr, countx, dest_uninitialized); return true; } @@ -1098,12 +1098,12 @@ MergeMemNode* PhaseMacroExpand::generate_slow_arraycopy(ArrayCopyNode *ac, transform_later(out_mem); // When src is negative and arraycopy is before an infinite loop,_callprojs.fallthrough_ioproj - // could be NULL. Skip clone and update NULL fallthrough_ioproj. - if (_callprojs.fallthrough_ioproj != NULL) { + // could be null. Skip clone and update null fallthrough_ioproj. + if (_callprojs.fallthrough_ioproj != nullptr) { *io = _callprojs.fallthrough_ioproj->clone(); transform_later(*io); } else { - *io = NULL; + *io = nullptr; } return out_mem; @@ -1116,11 +1116,11 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** Node* src, Node* src_offset, Node* dest, Node* dest_offset, Node* copy_length, bool dest_uninitialized) { - if ((*ctrl)->is_top()) return NULL; + if ((*ctrl)->is_top()) return nullptr; address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); - if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. - return NULL; + if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. + return nullptr; } // Pick out the parameters required to perform a store-check @@ -1129,7 +1129,7 @@ Node* PhaseMacroExpand::generate_checkcast_arraycopy(Node** ctrl, MergeMemNode** // super_check_offset, for the desired klass. int sco_offset = in_bytes(Klass::super_check_offset_offset()); Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); - Node* n3 = new LoadINode(NULL, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); + Node* n3 = new LoadINode(nullptr, *mem /*memory(p3)*/, p3, _igvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered); Node* check_offset = ConvI2X(transform_later(n3)); Node* check_value = dest_elem_klass; @@ -1154,12 +1154,12 @@ Node* PhaseMacroExpand::generate_generic_arraycopy(Node** ctrl, MergeMemNode** m Node* src, Node* src_offset, Node* dest, Node* dest_offset, Node* copy_length, bool dest_uninitialized) { - if ((*ctrl)->is_top()) return NULL; + if ((*ctrl)->is_top()) return nullptr; assert(!dest_uninitialized, "Invariant"); address copyfunc_addr = StubRoutines::generic_arraycopy(); - if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. - return NULL; + if (copyfunc_addr == nullptr) { // Stub was not generated, go slow path. + return nullptr; } const TypeFunc* call_type = OptoRuntime::generic_arraycopy_Type(); @@ -1186,7 +1186,7 @@ bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** Node* src_start = src; Node* dest_start = dest; - if (src_offset != NULL || dest_offset != NULL) { + if (src_offset != nullptr || dest_offset != nullptr) { src_start = array_element_address(src, src_offset, basic_elem_type); dest_start = array_element_address(dest, dest_offset, basic_elem_type); } @@ -1197,8 +1197,8 @@ bool PhaseMacroExpand::generate_unchecked_arraycopy(Node** ctrl, MergeMemNode** basictype2arraycopy(basic_elem_type, src_offset, dest_offset, disjoint_bases, copyfunc_name, dest_uninitialized); - Node* result_memory = NULL; - RegionNode* exit_block = NULL; + Node* result_memory = nullptr; + RegionNode* exit_block = nullptr; if (ArrayOperationPartialInlineSize > 0 && is_subword_type(basic_elem_type) && Matcher::vector_width_in_bytes(basic_elem_type) >= 16) { generate_partial_inlining_block(ctrl, mem, adr_type, &exit_block, &result_memory, @@ -1242,7 +1242,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { Node* dest = ac->in(ArrayCopyNode::Dest); Node* dest_offset = ac->in(ArrayCopyNode::DestPos); Node* length = ac->in(ArrayCopyNode::Length); - MergeMemNode* merge_mem = NULL; + MergeMemNode* merge_mem = nullptr; if (ac->is_clonebasic()) { BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); @@ -1253,10 +1253,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { merge_mem = MergeMemNode::make(mem); transform_later(merge_mem); - AllocateArrayNode* alloc = NULL; + AllocateArrayNode* alloc = nullptr; if (ac->is_alloc_tightly_coupled()) { alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn); - assert(alloc != NULL, "expect alloc"); + assert(alloc != nullptr, "expect alloc"); } const TypePtr* adr_type = _igvn.type(dest)->is_oopptr()->add_offset(Type::OffsetBot); @@ -1271,10 +1271,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { return; } - AllocateArrayNode* alloc = NULL; + AllocateArrayNode* alloc = nullptr; if (ac->is_alloc_tightly_coupled()) { alloc = AllocateArrayNode::Ideal_array_allocation(dest, &_igvn); - assert(alloc != NULL, "expect alloc"); + assert(alloc != nullptr, "expect alloc"); } assert(ac->is_arraycopy() || ac->is_arraycopy_validated(), "should be an arraycopy"); @@ -1292,10 +1292,10 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { BasicType src_elem = T_CONFLICT; BasicType dest_elem = T_CONFLICT; - if (top_src != NULL && top_src->elem() != Type::BOTTOM) { + if (top_src != nullptr && top_src->elem() != Type::BOTTOM) { src_elem = top_src->elem()->array_element_basic_type(); } - if (top_dest != NULL && top_dest->elem() != Type::BOTTOM) { + if (top_dest != nullptr && top_dest->elem() != Type::BOTTOM) { dest_elem = top_dest->elem()->array_element_basic_type(); } if (is_reference_type(src_elem, true)) src_elem = T_OBJECT; @@ -1319,7 +1319,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { } // Call StubRoutines::generic_arraycopy stub. - Node* mem = generate_arraycopy(ac, NULL, &ctrl, merge_mem, &io, + Node* mem = generate_arraycopy(ac, nullptr, &ctrl, merge_mem, &io, TypeRawPtr::BOTTOM, T_CONFLICT, src, src_offset, dest, dest_offset, length, // If a negative length guard was generated for the ArrayCopyNode, @@ -1341,7 +1341,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { } _igvn.replace_node(_callprojs.fallthrough_memproj, merge_mem); - if (_callprojs.fallthrough_ioproj != NULL) { + if (_callprojs.fallthrough_ioproj != nullptr) { _igvn.replace_node(_callprojs.fallthrough_ioproj, io); } _igvn.replace_node(_callprojs.fallthrough_catchproj, ctrl); @@ -1390,7 +1390,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { // (7) src_offset + length must not exceed length of src. Node* alen = ac->in(ArrayCopyNode::SrcLen); - assert(alen != NULL, "need src len"); + assert(alen != nullptr, "need src len"); generate_limit_guard(&ctrl, src_offset, length, alen, @@ -1398,7 +1398,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { // (8) dest_offset + length must not exceed length of dest. alen = ac->in(ArrayCopyNode::DestLen); - assert(alen != NULL, "need dest len"); + assert(alen != nullptr, "need dest len"); generate_limit_guard(&ctrl, dest_offset, length, alen, @@ -1408,7 +1408,7 @@ void PhaseMacroExpand::expand_arraycopy_node(ArrayCopyNode *ac) { // The generate_arraycopy subroutine checks this. } // This is where the memory effects are placed: - const TypePtr* adr_type = NULL; + const TypePtr* adr_type = nullptr; if (ac->_dest_type != TypeOopPtr::BOTTOM) { adr_type = ac->_dest_type->add_offset(Type::OffsetBot)->is_ptr(); } else { diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index 8993040e021..039144f700b 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -83,52 +83,52 @@ Matcher::Matcher() _register_save_type(register_save_type) { C->set_matcher(this); - idealreg2spillmask [Op_RegI] = NULL; - idealreg2spillmask [Op_RegN] = NULL; - idealreg2spillmask [Op_RegL] = NULL; - idealreg2spillmask [Op_RegF] = NULL; - idealreg2spillmask [Op_RegD] = NULL; - idealreg2spillmask [Op_RegP] = NULL; - idealreg2spillmask [Op_VecA] = NULL; - idealreg2spillmask [Op_VecS] = NULL; - idealreg2spillmask [Op_VecD] = NULL; - idealreg2spillmask [Op_VecX] = NULL; - idealreg2spillmask [Op_VecY] = NULL; - idealreg2spillmask [Op_VecZ] = NULL; - idealreg2spillmask [Op_RegFlags] = NULL; - idealreg2spillmask [Op_RegVectMask] = NULL; + idealreg2spillmask [Op_RegI] = nullptr; + idealreg2spillmask [Op_RegN] = nullptr; + idealreg2spillmask [Op_RegL] = nullptr; + idealreg2spillmask [Op_RegF] = nullptr; + idealreg2spillmask [Op_RegD] = nullptr; + idealreg2spillmask [Op_RegP] = nullptr; + idealreg2spillmask [Op_VecA] = nullptr; + idealreg2spillmask [Op_VecS] = nullptr; + idealreg2spillmask [Op_VecD] = nullptr; + idealreg2spillmask [Op_VecX] = nullptr; + idealreg2spillmask [Op_VecY] = nullptr; + idealreg2spillmask [Op_VecZ] = nullptr; + idealreg2spillmask [Op_RegFlags] = nullptr; + idealreg2spillmask [Op_RegVectMask] = nullptr; - idealreg2debugmask [Op_RegI] = NULL; - idealreg2debugmask [Op_RegN] = NULL; - idealreg2debugmask [Op_RegL] = NULL; - idealreg2debugmask [Op_RegF] = NULL; - idealreg2debugmask [Op_RegD] = NULL; - idealreg2debugmask [Op_RegP] = NULL; - idealreg2debugmask [Op_VecA] = NULL; - idealreg2debugmask [Op_VecS] = NULL; - idealreg2debugmask [Op_VecD] = NULL; - idealreg2debugmask [Op_VecX] = NULL; - idealreg2debugmask [Op_VecY] = NULL; - idealreg2debugmask [Op_VecZ] = NULL; - idealreg2debugmask [Op_RegFlags] = NULL; - idealreg2debugmask [Op_RegVectMask] = NULL; + idealreg2debugmask [Op_RegI] = nullptr; + idealreg2debugmask [Op_RegN] = nullptr; + idealreg2debugmask [Op_RegL] = nullptr; + idealreg2debugmask [Op_RegF] = nullptr; + idealreg2debugmask [Op_RegD] = nullptr; + idealreg2debugmask [Op_RegP] = nullptr; + idealreg2debugmask [Op_VecA] = nullptr; + idealreg2debugmask [Op_VecS] = nullptr; + idealreg2debugmask [Op_VecD] = nullptr; + idealreg2debugmask [Op_VecX] = nullptr; + idealreg2debugmask [Op_VecY] = nullptr; + idealreg2debugmask [Op_VecZ] = nullptr; + idealreg2debugmask [Op_RegFlags] = nullptr; + idealreg2debugmask [Op_RegVectMask] = nullptr; - idealreg2mhdebugmask[Op_RegI] = NULL; - idealreg2mhdebugmask[Op_RegN] = NULL; - idealreg2mhdebugmask[Op_RegL] = NULL; - idealreg2mhdebugmask[Op_RegF] = NULL; - idealreg2mhdebugmask[Op_RegD] = NULL; - idealreg2mhdebugmask[Op_RegP] = NULL; - idealreg2mhdebugmask[Op_VecA] = NULL; - idealreg2mhdebugmask[Op_VecS] = NULL; - idealreg2mhdebugmask[Op_VecD] = NULL; - idealreg2mhdebugmask[Op_VecX] = NULL; - idealreg2mhdebugmask[Op_VecY] = NULL; - idealreg2mhdebugmask[Op_VecZ] = NULL; - idealreg2mhdebugmask[Op_RegFlags] = NULL; - idealreg2mhdebugmask[Op_RegVectMask] = NULL; + idealreg2mhdebugmask[Op_RegI] = nullptr; + idealreg2mhdebugmask[Op_RegN] = nullptr; + idealreg2mhdebugmask[Op_RegL] = nullptr; + idealreg2mhdebugmask[Op_RegF] = nullptr; + idealreg2mhdebugmask[Op_RegD] = nullptr; + idealreg2mhdebugmask[Op_RegP] = nullptr; + idealreg2mhdebugmask[Op_VecA] = nullptr; + idealreg2mhdebugmask[Op_VecS] = nullptr; + idealreg2mhdebugmask[Op_VecD] = nullptr; + idealreg2mhdebugmask[Op_VecX] = nullptr; + idealreg2mhdebugmask[Op_VecY] = nullptr; + idealreg2mhdebugmask[Op_VecZ] = nullptr; + idealreg2mhdebugmask[Op_RegFlags] = nullptr; + idealreg2mhdebugmask[Op_RegVectMask] = nullptr; - debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node + debug_only(_mem_node = nullptr;) // Ideal memory node consumed by mach node } //------------------------------warp_incoming_stk_arg------------------------ @@ -172,7 +172,7 @@ void Matcher::verify_new_nodes_only(Node* xroot) { assert(C->node_arena()->contains(n), "dead node"); for (uint j = 0; j < n->req(); j++) { Node* in = n->in(j); - if (in != NULL) { + if (in != nullptr) { assert(C->node_arena()->contains(in), "dead node"); if (!visited.test(in->_idx)) { worklist.push(in); @@ -325,7 +325,7 @@ void Matcher::match( ) { C->print_method(PHASE_BEFORE_MATCHING, 1); - // Create new ideal node ConP #NULL even if it does exist in old space + // Create new ideal node ConP #null even if it does exist in old space // to avoid false sharing if the corresponding mach node is not used. // The corresponding mach node is only used in rare cases for derived // pointers. @@ -336,10 +336,10 @@ void Matcher::match( ) { // Save debug and profile information for nodes in old space: _old_node_note_array = C->node_note_array(); - if (_old_node_note_array != NULL) { + if (_old_node_note_array != nullptr) { C->set_node_note_array(new(C->comp_arena()) GrowableArray (C->comp_arena(), _old_node_note_array->length(), - 0, NULL)); + 0, nullptr)); } // Pre-size the new_node table to avoid the need for range checks. @@ -356,7 +356,7 @@ void Matcher::match( ) { C->set_cached_top_node(xform( C->top(), live_nodes )); if (!C->failing()) { Node* xroot = xform( C->root(), 1 ); - if (xroot == NULL) { + if (xroot == nullptr) { Matcher::soft_match_failure(); // recursive matching process failed C->record_method_not_compilable("instruction match failed"); } else { @@ -373,22 +373,22 @@ void Matcher::match( ) { } } - // Generate new mach node for ConP #NULL - assert(new_ideal_null != NULL, "sanity"); + // Generate new mach node for ConP #null + assert(new_ideal_null != nullptr, "sanity"); _mach_null = match_tree(new_ideal_null); // Don't set control, it will confuse GCM since there are no uses. // The control will be set when this node is used first time // in find_base_for_derived(). - assert(_mach_null != NULL, ""); + assert(_mach_null != nullptr, ""); - C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL); + C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr); #ifdef ASSERT verify_new_nodes_only(xroot); #endif } } - if (C->top() == NULL || C->root() == NULL) { + if (C->top() == nullptr || C->root() == nullptr) { C->record_method_not_compilable("graph lost"); // %%% cannot happen? } if (C->failing()) { @@ -1025,7 +1025,7 @@ static void match_alias_type(Compile* C, Node* n, Node* m) { for (uint i = 1; i < n->req(); i++) { Node* n1 = n->in(i); const TypePtr* n1at = n1->adr_type(); - if (n1at != NULL) { + if (n1at != nullptr) { nat = n1at; nidx = C->get_alias_index(n1at); } @@ -1076,7 +1076,7 @@ static void match_alias_type(Compile* C, Node* n, Node* m) { case Op_OnSpinWait: case Op_EncodeISOArray: nidx = Compile::AliasIdxTop; - nat = NULL; + nat = nullptr; break; } } @@ -1099,10 +1099,10 @@ Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; } Node *Matcher::xform( Node *n, int max_stack ) { // Use one stack to keep both: child's node/state and parent's node/index MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2 - mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root + mstack.push(n, Visit, nullptr, -1); // set null as parent to indicate root while (mstack.is_nonempty()) { C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions"); - if (C->failing()) return NULL; + if (C->failing()) return nullptr; n = mstack.node(); // Leave node on stack Node_State nstate = mstack.state(); if (nstate == Visit) { @@ -1119,17 +1119,17 @@ Node *Matcher::xform( Node *n, int max_stack ) { // Calls match special. They match alone with no children. // Their children, the incoming arguments, match normally. m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n); - if (C->failing()) return NULL; - if (m == NULL) { Matcher::soft_match_failure(); return NULL; } + if (C->failing()) return nullptr; + if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; } if (n->is_MemBar()) { m->as_MachMemBar()->set_adr_type(n->adr_type()); } } else { // Nothing the matcher cares about - if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections? + if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) { // Projections? // Convert to machine-dependent projection m = n->in(0)->as_Multi()->match( n->as_Proj(), this ); NOT_PRODUCT(record_new2old(m, n);) - if (m->in(0) != NULL) // m might be top + if (m->in(0) != nullptr) // m might be top collect_null_checks(m, n); } else { // Else just a regular 'ol guy m = n->clone(); // So just clone into new-space @@ -1141,7 +1141,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { } set_new_node(n, m); // Map old to new - if (_old_node_note_array != NULL) { + if (_old_node_note_array != nullptr) { Node_Notes* nn = C->locate_node_notes(_old_node_note_array, n->_idx); C->set_node_notes_at(m->_idx, nn); @@ -1159,7 +1159,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { // Put precedence edges on stack first (match them last). for (i = oldn->req(); (uint)i < oldn->len(); i++) { Node *m = oldn->in(i); - if (m == NULL) break; + if (m == nullptr) break; // set -1 to call add_prec() instead of set_req() during Step1 mstack.push(m, Visit, n, -1); } @@ -1167,7 +1167,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { // Handle precedence edges for interior nodes for (i = n->len()-1; (uint)i >= n->req(); i--) { Node *m = n->in(i); - if (m == NULL || C->node_arena()->contains(m)) continue; + if (m == nullptr || C->node_arena()->contains(m)) continue; n->rm_prec(i); // set -1 to call add_prec() instead of set_req() during Step1 mstack.push(m, Visit, n, -1); @@ -1202,7 +1202,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { // And now walk his children, and convert his inputs to new-space. for( ; i >= 0; --i ) { // For all normal inputs do Node *m = n->in(i); // Get input - if(m != NULL) + if(m != nullptr) mstack.push(m, Visit, n, i); } @@ -1210,7 +1210,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { else if (nstate == Post_Visit) { // Set xformed input Node *p = mstack.parent(); - if (p != NULL) { // root doesn't have parent + if (p != nullptr) { // root doesn't have parent int i = (int)mstack.index(); if (i >= 0) p->set_req(i, n); // required input @@ -1256,13 +1256,13 @@ OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out // They match alone with no children. Their children, the incoming // arguments, match normally. MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { - MachSafePointNode *msfpt = NULL; - MachCallNode *mcall = NULL; + MachSafePointNode *msfpt = nullptr; + MachCallNode *mcall = nullptr; uint cnt; // Split out case for SafePoint vs Call CallNode *call; const TypeTuple *domain; - ciMethod* method = NULL; + ciMethod* method = nullptr; bool is_method_handle_invoke = false; // for special kill effects if( sfpt->is_Call() ) { call = sfpt->as_Call(); @@ -1271,8 +1271,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { // Match just the call, nothing else MachNode *m = match_tree(call); - if (C->failing()) return NULL; - if( m == NULL ) { Matcher::soft_match_failure(); return NULL; } + if (C->failing()) return nullptr; + if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; } // Copy data from the Ideal SafePoint to the machine version mcall = m->as_MachCall(); @@ -1312,10 +1312,10 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { } // This is a non-call safepoint else { - call = NULL; - domain = NULL; + call = nullptr; + domain = nullptr; MachNode *mn = match_tree(sfpt); - if (C->failing()) return NULL; + if (C->failing()) return nullptr; msfpt = mn->as_MachSafePoint(); cnt = TypeFunc::Parms; } @@ -1340,7 +1340,7 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { OptoReg::Name out_arg_limit_per_call = begin_out_arg_area; // Calls to C may hammer extra stack slots above and beyond any arguments. // These are usually backing store for register arguments for varargs. - if( call != NULL && call->is_CallRuntime() ) + if( call != nullptr && call->is_CallRuntime() ) out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed()); @@ -1451,7 +1451,7 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { } // Debug inputs begin just after the last incoming parameter - assert((mcall == NULL) || (mcall->jvms() == NULL) || + assert((mcall == nullptr) || (mcall->jvms() == nullptr) || (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), ""); // Add additional edges. @@ -1490,18 +1490,18 @@ MachNode *Matcher::match_tree( const Node *n ) { Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ; #ifdef ASSERT Node* save_mem_node = _mem_node; - _mem_node = n->is_Store() ? (Node*)n : NULL; + _mem_node = n->is_Store() ? (Node*)n : nullptr; #endif // State object for root node of match tree // Allocate it on _states_arena - stack allocation can cause stack overflow. State *s = new (&_states_arena) State; - s->_kids[0] = NULL; - s->_kids[1] = NULL; + s->_kids[0] = nullptr; + s->_kids[1] = nullptr; s->_leaf = (Node*)n; // Label the input tree, allocating labels from top-level arena Node* root_mem = mem; Label_Root(n, s, n->in(0), root_mem); - if (C->failing()) return NULL; + if (C->failing()) return nullptr; // The minimum cost match for the whole tree is found at the root State uint mincost = max_juint; @@ -1521,7 +1521,7 @@ MachNode *Matcher::match_tree( const Node *n ) { s->dump(); #endif Matcher::soft_match_failure(); - return NULL; + return nullptr; } // Reduce input tree based upon the state labels to machine Nodes MachNode *m = ReduceInst(s, s->rule(mincost), mem); @@ -1568,7 +1568,7 @@ static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool s Node* m_control = m->in(0); // Control of load's memory can post-dominates load's control. // So use it since load can't float above its memory. - Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL; + Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr; if (control && m_control && control != m_control && control != mem_control) { // Actually, we can live with the most conservative control we @@ -1622,7 +1622,7 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) LabelRootDepth++; if (LabelRootDepth > MaxLabelRootDepth) { C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth"); - return NULL; + return nullptr; } uint care = 0; // Edges matcher cares about uint cnt = n->req(); @@ -1632,13 +1632,13 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) // Can only subsume a child into your match-tree if that child's memory state // is not modified along the path to another input. // It is unsafe even if the other inputs are separate roots. - Node *input_mem = NULL; + Node *input_mem = nullptr; for( i = 1; i < cnt; i++ ) { if( !n->match_edge(i) ) continue; Node *m = n->in(i); // Get ith input assert( m, "expect non-null children" ); if( m->is_Load() ) { - if( input_mem == NULL ) { + if( input_mem == nullptr ) { input_mem = m->in(MemNode::Memory); if (mem == (Node*)1) { // Save this memory to bail out if there's another memory access @@ -1660,8 +1660,8 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) assert( care <= 2, "binary only for now" ); // Recursively label the State tree. - s->_kids[0] = NULL; - s->_kids[1] = NULL; + s->_kids[0] = nullptr; + s->_kids[1] = nullptr; s->_leaf = m; // Check for leaves of the State Tree; things that cannot be a part of @@ -1686,11 +1686,11 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) } else { // If match tree has no control and we do, adopt it for entire tree - if( control == NULL && m->in(0) != NULL && m->req() > 1 ) + if( control == nullptr && m->in(0) != nullptr && m->req() > 1 ) control = m->in(0); // Pick up control // Else match as a normal part of the match tree. control = Label_Root(m, s, control, mem); - if (C->failing()) return NULL; + if (C->failing()) return nullptr; } } @@ -1718,36 +1718,36 @@ Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) // program. The register allocator is free to split uses later to // split live ranges. MachNode* Matcher::find_shared_node(Node* leaf, uint rule) { - if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL; + if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr; // See if this Con has already been reduced using this rule. - if (_shared_nodes.Size() <= leaf->_idx) return NULL; + if (_shared_nodes.Size() <= leaf->_idx) return nullptr; MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx); - if (last != NULL && rule == last->rule()) { + if (last != nullptr && rule == last->rule()) { // Don't expect control change for DecodeN if (leaf->is_DecodeNarrowPtr()) return last; // Get the new space root. Node* xroot = new_node(C->root()); - if (xroot == NULL) { + if (xroot == nullptr) { // This shouldn't happen give the order of matching. - return NULL; + return nullptr; } // Shared constants need to have their control be root so they // can be scheduled properly. Node* control = last->in(0); if (control != xroot) { - if (control == NULL || control == C->root()) { + if (control == nullptr || control == C->root()) { last->set_req(0, xroot); } else { assert(false, "unexpected control"); - return NULL; + return nullptr; } } return last; } - return NULL; + return nullptr; } @@ -1773,15 +1773,15 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { assert( rule >= NUM_OPERANDS, "called with operand rule" ); MachNode* shared_node = find_shared_node(s->_leaf, rule); - if (shared_node != NULL) { + if (shared_node != nullptr) { return shared_node; } // Build the object to represent this state & prepare for recursive calls MachNode *mach = s->MachNodeGenerator(rule); - guarantee(mach != NULL, "Missing MachNode"); + guarantee(mach != nullptr, "Missing MachNode"); mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]); - assert( mach->_opnds[0] != NULL, "Missing result operand" ); + assert( mach->_opnds[0] != nullptr, "Missing result operand" ); Node *leaf = s->_leaf; NOT_PRODUCT(record_new2old(mach, leaf);) // Check for instruction or instruction chain rule @@ -1804,14 +1804,14 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { #ifdef ASSERT // Verify adr type after matching memory operation const MachOper* oper = mach->memory_operand(); - if (oper != NULL && oper != (MachOper*)-1) { + if (oper != nullptr && oper != (MachOper*)-1) { // It has a unique memory operand. Find corresponding ideal mem node. - Node* m = NULL; + Node* m = nullptr; if (leaf->is_Mem()) { m = leaf; } else { m = _mem_node; - assert(m != NULL && m->is_Mem(), "expecting memory node"); + assert(m != nullptr && m->is_Mem(), "expecting memory node"); } const Type* mach_at = mach->adr_type(); // DecodeN node consumed by an address may have different type @@ -1850,7 +1850,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { ex->in(1)->set_req(0, C->root()); // Remove old node from the graph for( uint i=0; ireq(); i++ ) { - mach->set_req(i,NULL); + mach->set_req(i,nullptr); } NOT_PRODUCT(record_new2old(ex, s->_leaf);) } @@ -1883,7 +1883,7 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) { void Matcher::handle_precedence_edges(Node* n, MachNode *mach) { for (uint i = n->req(); i < n->len(); i++) { - if (n->in(i) != NULL) { + if (n->in(i) != nullptr) { mach->add_prec(n->in(i)); } } @@ -1930,15 +1930,15 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;) mem = mem2; } - if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) { - if( mach->in(0) == NULL ) + if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) { + if( mach->in(0) == nullptr ) mach->set_req(0, s->_leaf->in(0)); } // Now recursively walk the state tree & add operand list. for( uint i=0; i<2; i++ ) { // binary tree State *newstate = s->_kids[i]; - if( newstate == NULL ) break; // Might only have 1 child + if( newstate == nullptr ) break; // Might only have 1 child // 'op' is what I am expecting to receive int op; if( i == 0 ) { @@ -1993,10 +1993,10 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) { assert( rule < _LAST_MACH_OPER, "called with operand rule" ); State *kid = s->_kids[0]; - assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" ); + assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" ); // Leaf? And not subsumed? - if( kid == NULL && !_swallowed[rule] ) { + if( kid == nullptr && !_swallowed[rule] ) { mach->add_req( s->_leaf ); // Add leaf pointer return; // Bail out } @@ -2017,7 +2017,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) { } } - for (uint i = 0; kid != NULL && i < 2; kid = s->_kids[1], i++) { // binary tree + for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) { // binary tree int newrule; if( i == 0) { newrule = kid->rule(_leftOp[rule]); @@ -2057,7 +2057,7 @@ OptoReg::Name Matcher::find_receiver() { } bool Matcher::is_vshift_con_pattern(Node* n, Node* m) { - if (n != NULL && m != NULL) { + if (n != nullptr && m != nullptr) { return VectorNode::is_vector_shift(n) && VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con(); } @@ -2137,8 +2137,8 @@ void Matcher::find_shared(Node* n) { } for (int i = n->req() - 1; i >= 0; --i) { // For my children Node* m = n->in(i); // Get ith input - if (m == NULL) { - continue; // Ignore NULLs + if (m == nullptr) { + continue; // Ignore nulls } if (clone_node(n, m, mstack)) { continue; @@ -2561,7 +2561,7 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { // Look for DecodeN node which should be pinned to orig_proj. // On platforms (Sparc) which can not handle 2 adds // in addressing mode we have to keep a DecodeN node and - // use it to do implicit NULL check in address. + // use it to do implicit null check in address. // // DecodeN node was pinned to non-null path (orig_proj) during // CastPP transformation in final_graph_reshaping_impl(). @@ -2571,9 +2571,9 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { Node* d = orig_proj->raw_out(i); if (d->is_DecodeN() && d->in(1) == val) { val = d; - val->set_req(0, NULL); // Unpin now. + val->set_req(0, nullptr); // Unpin now. // Mark this as special case to distinguish from - // a regular case: CmpP(DecodeN, NULL). + // a regular case: CmpP(DecodeN, null). val = (Node*)(((intptr_t)val) | 1); break; } @@ -2587,7 +2587,7 @@ void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) { } //---------------------------validate_null_checks------------------------------ -// Its possible that the value being NULL checked is not the root of a match +// Its possible that the value being null checked is not the root of a match // tree. If so, I cannot use the value in an implicit null check. void Matcher::validate_null_checks( ) { uint cnt = _null_check_tests.size(); @@ -2599,12 +2599,12 @@ void Matcher::validate_null_checks( ) { if (has_new_node(val)) { Node* new_val = new_node(val); if (is_decoden) { - assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity"); + assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity"); // Note: new_val may have a control edge if // the original ideal node DecodeN was matched before // it was unpinned in Matcher::collect_null_checks(). // Unpin the mach node and mark it. - new_val->set_req(0, NULL); + new_val->set_req(0, nullptr); new_val = (Node*)(((intptr_t)new_val) | 1); } // Is a match-tree root, so replace with the matched value @@ -2630,15 +2630,15 @@ bool Matcher::gen_narrow_oop_implicit_null_checks() { } return CompressedOops::use_implicit_null_checks() && (narrow_oop_use_complex_address() || - CompressedOops::base() != NULL); + CompressedOops::base() != nullptr); } // Compute RegMask for an ideal register. const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) { const Type* t = Type::mreg2type[ideal_reg]; - if (t == NULL) { + if (t == nullptr) { assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg); - return NULL; // not supported + return nullptr; // not supported } Node* fp = ret->in(TypeFunc::FramePtr); Node* mem = ret->in(TypeFunc::Memory); @@ -2647,25 +2647,25 @@ const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) { Node* spill; switch (ideal_reg) { - case Op_RegN: spill = new LoadNNode(NULL, mem, fp, atp, t->is_narrowoop(), mo); break; - case Op_RegI: spill = new LoadINode(NULL, mem, fp, atp, t->is_int(), mo); break; - case Op_RegP: spill = new LoadPNode(NULL, mem, fp, atp, t->is_ptr(), mo); break; - case Op_RegF: spill = new LoadFNode(NULL, mem, fp, atp, t, mo); break; - case Op_RegD: spill = new LoadDNode(NULL, mem, fp, atp, t, mo); break; - case Op_RegL: spill = new LoadLNode(NULL, mem, fp, atp, t->is_long(), mo); break; + case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break; + case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(), mo); break; + case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(), mo); break; + case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t, mo); break; + case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t, mo); break; + case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(), mo); break; case Op_VecA: // fall-through case Op_VecS: // fall-through case Op_VecD: // fall-through case Op_VecX: // fall-through case Op_VecY: // fall-through - case Op_VecZ: spill = new LoadVectorNode(NULL, mem, fp, atp, t->is_vect()); break; + case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break; case Op_RegVectMask: return Matcher::predicate_reg_mask(); default: ShouldNotReachHere(); } MachNode* mspill = match_tree(spill); - assert(mspill != NULL, "matching failed: %d", ideal_reg); + assert(mspill != nullptr, "matching failed: %d", ideal_reg); // Handle generic vector operand case if (Matcher::supports_generic_vector_operands && t->isa_vect()) { specialize_mach_node(mspill); @@ -2701,7 +2701,7 @@ void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) { // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx). MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) { assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates"); - Node* def = NULL; + Node* def = nullptr; if (opnd_idx == 0) { // DEF def = m; // use mach node itself to compute vector operand type } else { @@ -2743,7 +2743,7 @@ void Matcher::specialize_generic_vector_operands() { while (live_nodes.size() > 0) { MachNode* m = live_nodes.pop()->isa_Mach(); - if (m != NULL) { + if (m != nullptr) { if (Matcher::is_reg2reg_move(m)) { // Register allocator properly handles vec <=> leg moves using register masks. int opnd_idx = m->operand_index(1); @@ -2799,7 +2799,7 @@ bool Matcher::verify_after_postselect_cleanup() { C->identify_useful_nodes(useful); for (uint i = 0; i < useful.size(); i++) { MachNode* m = useful.at(i)->isa_Mach(); - if (m != NULL) { + if (m != nullptr) { assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed"); for (uint j = 0; j < m->num_opnds(); j++) { assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed"); @@ -2822,7 +2822,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) { const MemBarNode* membar = vmb->as_MemBar(); // Get the Ideal Proj node, ctrl, that can be used to iterate forward - Node* ctrl = NULL; + Node* ctrl = nullptr; for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) { Node* p = membar->fast_out(i); assert(p->is_Proj(), "only projections here"); @@ -2832,7 +2832,7 @@ bool Matcher::post_store_load_barrier(const Node* vmb) { break; } } - assert((ctrl != NULL), "missing control projection"); + assert((ctrl != nullptr), "missing control projection"); for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) { Node *x = ctrl->fast_out(j); @@ -2905,7 +2905,7 @@ bool Matcher::branches_to_uncommon_trap(const Node *n) { assert(n->is_If(), "You should only call this on if nodes."); IfNode *ifn = n->as_If(); - Node *ifFalse = NULL; + Node *ifFalse = nullptr; for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) { if (ifn->fast_out(i)->is_IfFalse()) { ifFalse = ifn->fast_out(i); @@ -2917,9 +2917,9 @@ bool Matcher::branches_to_uncommon_trap(const Node *n) { Node *reg = ifFalse; int cnt = 4; // We must protect against cycles. Limit to 4 iterations. // Alternatively use visited set? Seems too expensive. - while (reg != NULL && cnt > 0) { - CallNode *call = NULL; - RegionNode *nxt_reg = NULL; + while (reg != nullptr && cnt > 0) { + CallNode *call = nullptr; + RegionNode *nxt_reg = nullptr; for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) { Node *o = reg->fast_out(i); if (o->is_Call()) { diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp index 2de901c6135..1b86b3f4f1f 100644 --- a/src/hotspot/share/opto/matcher.hpp +++ b/src/hotspot/share/opto/matcher.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -149,10 +149,10 @@ private: // Accessors for the inherited field PhaseTransform::_nodes: void grow_new_node_array(uint idx_limit) { - _nodes.map(idx_limit-1, NULL); + _nodes.map(idx_limit-1, nullptr); } bool has_new_node(const Node* n) const { - return _nodes.at(n->_idx) != NULL; + return _nodes.at(n->_idx) != nullptr; } Node* new_node(const Node* n) const { assert(has_new_node(n), "set before get"); @@ -170,7 +170,7 @@ private: Node* _mem_node; // Ideal memory node consumed by mach node #endif - // Mach node for ConP #NULL + // Mach node for ConP #null MachNode* _mach_null; void handle_precedence_edges(Node* n, MachNode *mach); diff --git a/src/hotspot/share/opto/mathexactnode.cpp b/src/hotspot/share/opto/mathexactnode.cpp index dddac5e581e..4c065e5cf52 100644 --- a/src/hotspot/share/opto/mathexactnode.cpp +++ b/src/hotspot/share/opto/mathexactnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -192,8 +192,8 @@ struct IdealHelper { const Type* type1 = phase->type(arg1); const Type* type2 = phase->type(arg2); - if (type1 == NULL || type2 == NULL) { - return NULL; + if (type1 == nullptr || type2 == nullptr) { + return nullptr; } if (type1 != Type::TOP && type1->singleton() && @@ -204,9 +204,9 @@ struct IdealHelper { Node* con_result = ConINode::make(0); return con_result; } - return NULL; + return nullptr; } - return NULL; + return nullptr; } static const Type* Value(const OverflowOp* node, PhaseTransform* phase) { @@ -218,7 +218,7 @@ struct IdealHelper { const TypeClass* i1 = TypeClass::as_self(t1); const TypeClass* i2 = TypeClass::as_self(t2); - if (i1 == NULL || i2 == NULL) { + if (i1 == nullptr || i2 == nullptr) { return TypeInt::CC; } diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 61d31225093..f9a66090e25 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -65,14 +65,14 @@ uint MemNode::size_of() const { return sizeof(*this); } const TypePtr *MemNode::adr_type() const { Node* adr = in(Address); - if (adr == NULL) return NULL; // node is dead - const TypePtr* cross_check = NULL; + if (adr == nullptr) return nullptr; // node is dead + const TypePtr* cross_check = nullptr; DEBUG_ONLY(cross_check = _adr_type); return calculate_adr_type(adr->bottom_type(), cross_check); } bool MemNode::check_if_adr_maybe_raw(Node* adr) { - if (adr != NULL) { + if (adr != nullptr) { if (adr->bottom_type()->base() == Type::RawPtr || adr->bottom_type()->base() == Type::AnyPtr) { return true; } @@ -82,11 +82,11 @@ bool MemNode::check_if_adr_maybe_raw(Node* adr) { #ifndef PRODUCT void MemNode::dump_spec(outputStream *st) const { - if (in(Address) == NULL) return; // node is dead + if (in(Address) == nullptr) return; // node is dead #ifndef ASSERT // fake the missing field - const TypePtr* _adr_type = NULL; - if (in(Address) != NULL) + const TypePtr* _adr_type = nullptr; + if (in(Address) != nullptr) _adr_type = in(Address)->bottom_type()->isa_ptr(); #endif dump_adr_type(this, _adr_type, st); @@ -108,14 +108,14 @@ void MemNode::dump_spec(outputStream *st) const { void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { st->print(" @"); - if (adr_type == NULL) { - st->print("NULL"); + if (adr_type == nullptr) { + st->print("null"); } else { adr_type->dump_on(st); Compile* C = Compile::current(); - Compile::AliasType* atp = NULL; + Compile::AliasType* atp = nullptr; if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type); - if (atp == NULL) + if (atp == nullptr) st->print(", idx=?\?;"); else if (atp->index() == Compile::AliasIdxBot) st->print(", idx=Bot;"); @@ -139,16 +139,16 @@ extern void print_alias_types(); #endif Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) { - assert((t_oop != NULL), "sanity"); + assert((t_oop != nullptr), "sanity"); bool is_instance = t_oop->is_known_instance_field(); bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() && - (load != NULL) && load->is_Load() && - (phase->is_IterGVN() != NULL); + (load != nullptr) && load->is_Load() && + (phase->is_IterGVN() != nullptr); if (!(is_instance || is_boxed_value_load)) return mchain; // don't try to optimize non-instance types uint instance_id = t_oop->instance_id(); Node *start_mem = phase->C->start()->proj_out_or_null(TypeFunc::Memory); - Node *prev = NULL; + Node *prev = nullptr; Node *result = mchain; while (prev != result) { prev = result; @@ -169,7 +169,7 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oo AllocateNode* alloc = proj_in->as_Initialize()->allocation(); // Stop if this is the initialization for the object instance which // contains this memory slice, otherwise skip over it. - if ((alloc == NULL) || (alloc->_idx == instance_id)) { + if ((alloc == nullptr) || (alloc->_idx == instance_id)) { break; } if (is_instance) { @@ -182,7 +182,7 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oo } } } else if (proj_in->is_MemBar()) { - ArrayCopyNode* ac = NULL; + ArrayCopyNode* ac = nullptr; if (ArrayCopyNode::may_modify(t_oop, proj_in->as_MemBar(), phase, ac)) { break; } @@ -200,7 +200,7 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oo } // Otherwise skip it (the call updated 'result' value). } else if (result->is_MergeMem()) { - result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty); + result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, nullptr, tty); } } return result; @@ -208,12 +208,12 @@ Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oo Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) { const TypeOopPtr* t_oop = t_adr->isa_oopptr(); - if (t_oop == NULL) + if (t_oop == nullptr) return mchain; // don't try to optimize non-oop types Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase); bool is_instance = t_oop->is_known_instance_field(); PhaseIterGVN *igvn = phase->is_IterGVN(); - if (is_instance && igvn != NULL && result->is_Phi()) { + if (is_instance && igvn != nullptr && result->is_Phi()) { PhiNode *mphi = result->as_Phi(); assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); const TypePtr *t = mphi->adr_type(); @@ -253,10 +253,10 @@ static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const T { // Check that current type is consistent with the alias index used during graph construction assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); - bool consistent = adr_check == NULL || adr_check->empty() || + bool consistent = adr_check == nullptr || adr_check->empty() || phase->C->must_alias(adr_check, alias_idx ); // Sometimes dead array references collapse to a[-1], a[-2], or a[-3] - if( !consistent && adr_check != NULL && !adr_check->empty() && + if( !consistent && adr_check != nullptr && !adr_check->empty() && tp->isa_aryptr() && tp->offset() == Type::OffsetBot && adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || @@ -267,8 +267,8 @@ static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const T } if( !consistent ) { st->print("alias_idx==%d, adr_check==", alias_idx); - if( adr_check == NULL ) { - st->print("NULL"); + if( adr_check == nullptr ) { + st->print("null"); } else { adr_check->dump(); } @@ -313,9 +313,9 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { PhaseIterGVN *igvn = phase->is_IterGVN(); // Wait if control on the worklist. - if (ctl && can_reshape && igvn != NULL) { - Node* bol = NULL; - Node* cmp = NULL; + if (ctl && can_reshape && igvn != nullptr) { + Node* bol = nullptr; + Node* cmp = nullptr; if (ctl->in(0)->is_If()) { assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity"); bol = ctl->in(0)->in(1); @@ -323,34 +323,34 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { cmp = ctl->in(0)->in(1)->in(1); } if (igvn->_worklist.member(ctl) || - (bol != NULL && igvn->_worklist.member(bol)) || - (cmp != NULL && igvn->_worklist.member(cmp)) ) { + (bol != nullptr && igvn->_worklist.member(bol)) || + (cmp != nullptr && igvn->_worklist.member(cmp)) ) { // This control path may be dead. // Delay this memory node transformation until the control is processed. igvn->_worklist.push(this); - return NodeSentinel; // caller will return NULL + return NodeSentinel; // caller will return null } } // Ignore if memory is dead, or self-loop Node *mem = in(MemNode::Memory); - if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL + if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return null assert(mem != this, "dead loop in MemNode::Ideal"); - if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) { + if (can_reshape && igvn != nullptr && igvn->_worklist.member(mem)) { // This memory slice may be dead. // Delay this mem node transformation until the memory is processed. igvn->_worklist.push(this); - return NodeSentinel; // caller will return NULL + return NodeSentinel; // caller will return null } Node *address = in(MemNode::Address); const Type *t_adr = phase->type(address); - if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL + if (t_adr == Type::TOP) return NodeSentinel; // caller will return null if (can_reshape && is_unsafe_access() && (t_adr == TypePtr::NULL_PTR)) { // Unsafe off-heap access with zero address. Remove access and other control users // to not confuse optimizations and add a HaltNode to fail if this is ever executed. - assert(ctl != NULL, "unsafe accesses should be control dependent"); + assert(ctl != nullptr, "unsafe accesses should be control dependent"); for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) { Node* u = ctl->fast_out(i); if (u != ctl) { @@ -365,13 +365,13 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { return this; } - if (can_reshape && igvn != NULL && + if (can_reshape && igvn != nullptr && (igvn->_worklist.member(address) || (igvn->_worklist.size() > 0 && t_adr != adr_type())) ) { // The address's base and type may change when the address is processed. // Delay this mem node transformation until the address is processed. igvn->_worklist.push(this); - return NodeSentinel; // caller will return NULL + return NodeSentinel; // caller will return null } // Do NOT remove or optimize the next lines: ensure a new alias index @@ -381,15 +381,15 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); } - Node* base = NULL; + Node* base = nullptr; if (address->is_AddP()) { base = address->in(AddPNode::Base); } - if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && + if (base != nullptr && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && !t_adr->isa_rawptr()) { // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true. // Skip this node optimization if its address has TOP base. - return NodeSentinel; // caller will return NULL + return NodeSentinel; // caller will return null } // Avoid independent memory operations @@ -417,7 +417,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { } // let the subclass continue analyzing... - return NULL; + return nullptr; } // Helper function for proving some simple control dominations. @@ -428,12 +428,12 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { // control input of a memory operation predates (dominates) // an allocation it wants to look past. bool MemNode::all_controls_dominate(Node* dom, Node* sub) { - if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top()) + if (dom == nullptr || dom->is_top() || sub == nullptr || sub->is_top()) return false; // Conservative answer for dead code // Check 'dom'. Skip Proj and CatchProj nodes. dom = dom->find_exact_control(dom); - if (dom == NULL || dom->is_top()) + if (dom == nullptr || dom->is_top()) return false; // Conservative answer for dead code if (dom == sub) { @@ -450,14 +450,14 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) { // Currently 'sub' is either Allocate, Initialize or Start nodes. // Or Region for the check in LoadNode::Ideal(); - // 'sub' should have sub->in(0) != NULL. + // 'sub' should have sub->in(0) != nullptr. assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || sub->is_Region() || sub->is_Call(), "expecting only these nodes"); // Get control edge of 'sub'. Node* orig_sub = sub; sub = sub->find_exact_control(sub->in(0)); - if (sub == NULL || sub->is_top()) + if (sub == nullptr || sub->is_top()) return false; // Conservative answer for dead code assert(sub->is_CFG(), "expecting control"); @@ -485,7 +485,7 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) { if (!n->is_CFG() && n->pinned()) { // Check only own control edge for pinned non-control nodes. n = n->find_exact_control(n->in(0)); - if (n == NULL || n->is_top()) + if (n == nullptr || n->is_top()) return false; // Conservative answer for dead code assert(n->is_CFG(), "expecting control"); dom_list.push(n); @@ -499,7 +499,7 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) { } else { // First, own control edge. Node* m = n->find_exact_control(n->in(0)); - if (m != NULL) { + if (m != nullptr) { if (m->is_top()) return false; // Conservative answer for dead code dom_list.push(m); @@ -508,7 +508,7 @@ bool MemNode::all_controls_dominate(Node* dom, Node* sub) { uint cnt = n->req(); for (uint i = 1; i < cnt; i++) { m = n->find_exact_control(n->in(i)); - if (m == NULL || m->is_top()) + if (m == nullptr || m->is_top()) continue; dom_list.push(m); } @@ -530,14 +530,14 @@ bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, // They may both manifestly be allocations, and they should differ. // Or, if they are not both allocations, they can be distinct constants. // Otherwise, one is an allocation and the other a pre-existing value. - if (a1 == NULL && a2 == NULL) { // neither an allocation + if (a1 == nullptr && a2 == nullptr) { // neither an allocation return (p1 != p2) && p1->is_Con() && p2->is_Con(); - } else if (a1 != NULL && a2 != NULL) { // both allocations + } else if (a1 != nullptr && a2 != nullptr) { // both allocations return (a1 != a2); - } else if (a1 != NULL) { // one allocation a1 + } else if (a1 != nullptr) { // one allocation a1 // (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.) return all_controls_dominate(p2, a1); - } else { //(a2 != NULL) // one allocation a2 + } else { //(a2 != null) // one allocation a2 return all_controls_dominate(p1, a2); } return false; @@ -551,10 +551,10 @@ bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, // (c) can_see_stored_value=false and ac cannot have set the value for this load. // In case (c) change the parameter mem to the memory input of ac to skip it // when searching stored value. -// Otherwise return NULL. +// Otherwise return null. Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { ArrayCopyNode* ac = find_array_copy_clone(phase, ld_alloc, mem); - if (ac != NULL) { + if (ac != nullptr) { Node* ld_addp = in(MemNode::Address); Node* src = ac->in(ArrayCopyNode::Src); const TypeAryPtr* ary_t = phase->type(src)->isa_aryptr(); @@ -562,7 +562,7 @@ Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, N // This is a load from a cloned array. The corresponding arraycopy ac must // have set the value for the load and we can return ac but only if the load // is known to be within bounds. This is checked below. - if (ary_t != NULL && ld_addp->is_AddP()) { + if (ary_t != nullptr && ld_addp->is_AddP()) { Node* ld_offs = ld_addp->in(AddPNode::Offset); BasicType ary_elem = ary_t->elem()->array_element_basic_type(); jlong header = arrayOopDesc::base_offset_in_bytes(ary_elem); @@ -578,8 +578,8 @@ Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, N // The load is known to be out-of-bounds. } // The load could be out-of-bounds. It must not be hoisted but must remain - // dependent on the runtime range check. This is achieved by returning NULL. - } else if (mem->is_Proj() && mem->in(0) != NULL && mem->in(0)->is_ArrayCopy()) { + // dependent on the runtime range check. This is achieved by returning null. + } else if (mem->is_Proj() && mem->in(0) != nullptr && mem->in(0)->is_ArrayCopy()) { ArrayCopyNode* ac = mem->in(0)->as_ArrayCopy(); if (ac->is_arraycopy_validated() || @@ -605,18 +605,18 @@ Node* LoadNode::find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, N } } } - return NULL; + return nullptr; } ArrayCopyNode* MemNode::find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const { - if (mem->is_Proj() && mem->in(0) != NULL && (mem->in(0)->Opcode() == Op_MemBarStoreStore || + if (mem->is_Proj() && mem->in(0) != nullptr && (mem->in(0)->Opcode() == Op_MemBarStoreStore || mem->in(0)->Opcode() == Op_MemBarCPUOrder)) { - if (ld_alloc != NULL) { + if (ld_alloc != nullptr) { // Check if there is an array copy for a clone Node* mb = mem->in(0); - ArrayCopyNode* ac = NULL; - if (mb->in(0) != NULL && mb->in(0)->is_Proj() && - mb->in(0)->in(0) != NULL && mb->in(0)->in(0)->is_ArrayCopy()) { + ArrayCopyNode* ac = nullptr; + if (mb->in(0) != nullptr && mb->in(0)->is_Proj() && + mb->in(0)->in(0) != nullptr && mb->in(0)->in(0)->is_ArrayCopy()) { ac = mb->in(0)->in(0)->as_ArrayCopy(); } else { // Step over GC barrier when ReduceInitialCardMarks is disabled @@ -628,15 +628,15 @@ ArrayCopyNode* MemNode::find_array_copy_clone(PhaseTransform* phase, Node* ld_al } } - if (ac != NULL && ac->is_clonebasic()) { + if (ac != nullptr && ac->is_clonebasic()) { AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase); - if (alloc != NULL && alloc == ld_alloc) { + if (alloc != nullptr && alloc == ld_alloc) { return ac; } } } } - return NULL; + return nullptr; } // The logic for reordering loads and stores uses four steps: @@ -660,7 +660,7 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); if (offset == Type::OffsetBot) - return NULL; // cannot unalias unless there are precise offsets + return nullptr; // cannot unalias unless there are precise offsets const bool adr_maybe_raw = check_if_adr_maybe_raw(adr); const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); @@ -678,7 +678,7 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { Node* st_adr = mem->in(MemNode::Address); intptr_t st_offset = 0; Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); - if (st_base == NULL) + if (st_base == nullptr) break; // inscrutable pointer // For raw accesses it's not enough to prove that constant offsets don't intersect. @@ -721,13 +721,13 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { InitializeNode* st_init = mem->in(0)->as_Initialize(); AllocateNode* st_alloc = st_init->allocation(); - if (st_alloc == NULL) + if (st_alloc == nullptr) break; // something degenerated bool known_identical = false; bool known_independent = false; if (alloc == st_alloc) known_identical = true; - else if (alloc != NULL) + else if (alloc != nullptr) known_independent = true; else if (all_controls_dominate(this, st_alloc)) known_independent = true; @@ -752,14 +752,14 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { return mem; // let caller handle steps (c), (d) } - } else if (find_previous_arraycopy(phase, alloc, mem, false) != NULL) { + } else if (find_previous_arraycopy(phase, alloc, mem, false) != nullptr) { if (prev != mem) { // Found an arraycopy but it doesn't affect that load continue; } // Found an arraycopy that may affect that load return mem; - } else if (addr_t != NULL && addr_t->is_known_instance_field()) { + } else if (addr_t != nullptr && addr_t->is_known_instance_field()) { // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. if (mem->is_Proj() && mem->in(0)->is_Call()) { // ArrayCopyNodes processed here as well. @@ -769,7 +769,7 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { continue; // (a) advance through independent call memory } } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { - ArrayCopyNode* ac = NULL; + ArrayCopyNode* ac = nullptr; if (ArrayCopyNode::may_modify(addr_t, mem->in(0)->as_MemBar(), phase, ac)) { break; } @@ -796,26 +796,26 @@ Node* MemNode::find_previous_store(PhaseTransform* phase) { break; } - return NULL; // bail out + return nullptr; // bail out } //----------------------calculate_adr_type------------------------------------- // Helper function. Notices when the given type of address hits top or bottom. // Also, asserts a cross-check of the type against the expected address type. const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) { - if (t == Type::TOP) return NULL; // does not touch memory any more? + if (t == Type::TOP) return nullptr; // does not touch memory any more? #ifdef ASSERT - if (!VerifyAliases || VMError::is_error_reported() || Node::in_dump()) cross_check = NULL; + if (!VerifyAliases || VMError::is_error_reported() || Node::in_dump()) cross_check = nullptr; #endif const TypePtr* tp = t->isa_ptr(); - if (tp == NULL) { - assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide"); + if (tp == nullptr) { + assert(cross_check == nullptr || cross_check == TypePtr::BOTTOM, "expected memory type must be wide"); return TypePtr::BOTTOM; // touches lots of memory } else { #ifdef ASSERT // %%%% [phh] We don't check the alias index if cross_check is // TypeRawPtr::BOTTOM. Needs to be investigated. - if (cross_check != NULL && + if (cross_check != nullptr && cross_check != TypePtr::BOTTOM && cross_check != TypeRawPtr::BOTTOM) { // Recheck the alias index, to see if it has changed (due to a bug). @@ -910,11 +910,11 @@ Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypeP adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), "use LoadRangeNode instead"); // Check control edge of raw loads - assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || + assert( ctl != nullptr || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || // oop will be recorded in oop map if load crosses safepoint rt->isa_oopptr() || is_immutable_value(adr), "raw memory operations should have control edge"); - LoadNode* load = NULL; + LoadNode* load = nullptr; switch (bt) { case T_BOOLEAN: load = new LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; case T_BYTE: load = new LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo, control_dependency); break; @@ -940,7 +940,7 @@ Node* LoadNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypeP ShouldNotReachHere(); break; } - assert(load != NULL, "LoadNode should have been created"); + assert(load != nullptr, "LoadNode should have been created"); if (unaligned) { load->set_unaligned_access(); } @@ -966,10 +966,10 @@ uint LoadNode::hash() const { } static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) { - if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) { - bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile(); + if ((atp != nullptr) && (atp->index() >= Compile::AliasIdxRaw)) { + bool non_volatile = (atp->field() != nullptr) && !atp->field()->is_volatile(); bool is_stable_ary = FoldStableValues && - (tp != NULL) && (tp->isa_aryptr() != NULL) && + (tp != nullptr) && (tp->isa_aryptr() != nullptr) && tp->isa_aryptr()->is_stable(); return (eliminate_boxing && non_volatile) || is_stable_ary; @@ -986,7 +986,7 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { intptr_t ld_off = 0; AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); Node* ac = find_previous_arraycopy(phase, ld_alloc, st, true); - if (ac != NULL) { + if (ac != nullptr) { assert(ac->is_ArrayCopy(), "what kind of node can this be?"); Node* mem = ac->in(TypeFunc::Memory); @@ -994,13 +994,13 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { Node* src = ac->in(ArrayCopyNode::Src); if (!ac->as_ArrayCopy()->is_clonebasic() && !phase->type(src)->isa_aryptr()) { - return NULL; + return nullptr; } LoadNode* ld = clone()->as_Load(); Node* addp = in(MemNode::Address)->clone(); if (ac->as_ArrayCopy()->is_clonebasic()) { - assert(ld_alloc != NULL, "need an alloc"); + assert(ld_alloc != nullptr, "need an alloc"); assert(addp->is_AddP(), "address must be addp"); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); assert(bs->step_over_gc_barrier(addp->in(AddPNode::Base)) == bs->step_over_gc_barrier(ac->in(ArrayCopyNode::Dest)), "strange pattern"); @@ -1043,7 +1043,7 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const { ld->_control_dependency = UnknownControl; return ld; } - return NULL; + return nullptr; } @@ -1060,11 +1060,11 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off); Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base, phase); const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); - Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; + Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr; // This is more general than load from boxing objects. if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { uint alias_idx = atp->index(); - Node* result = NULL; + Node* result = nullptr; Node* current = st; // Skip through chains of MemBarNodes checking the MergeMems for // new states for the slice of this load. Stop once any other @@ -1101,7 +1101,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { } break; } - if (result != NULL) { + if (result != nullptr) { st = result; } } @@ -1116,11 +1116,11 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { // Try harder before giving up. Unify base pointers with casts (e.g., raw/non-raw pointers). intptr_t st_off = 0; Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_off); - if (ld_base == NULL) return NULL; - if (st_base == NULL) return NULL; - if (!ld_base->eqv_uncast(st_base, /*keep_deps=*/true)) return NULL; - if (ld_off != st_off) return NULL; - if (ld_off == Type::OffsetBot) return NULL; + if (ld_base == nullptr) return nullptr; + if (st_base == nullptr) return nullptr; + if (!ld_base->eqv_uncast(st_base, /*keep_deps=*/true)) return nullptr; + if (ld_off != st_off) return nullptr; + if (ld_off == Type::OffsetBot) return nullptr; // Same base, same offset. // Possible improvement for arrays: check index value instead of absolute offset. @@ -1136,14 +1136,14 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { } // Now prove that we have a LoadQ matched to a StoreQ, for some Q. if (store_Opcode() != st->Opcode()) { - return NULL; + return nullptr; } // LoadVector/StoreVector needs additional check to ensure the types match. if (st->is_StoreVector()) { const TypeVect* in_vt = st->as_StoreVector()->vect_type(); const TypeVect* out_vt = as_LoadVector()->vect_type(); if (in_vt != out_vt) { - return NULL; + return nullptr; } } return st->in(MemNode::ValueIn); @@ -1160,7 +1160,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { // can create new nodes. Think of it as lazily manifesting // virtually pre-existing constants.) if (memory_type() != T_VOID) { - if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == NULL) { + if (ReduceBulkZeroing || find_array_copy_clone(phase, ld_alloc, in(MemNode::Memory)) == nullptr) { // If ReduceBulkZeroing is disabled, we need to check if the allocation does not belong to an // ArrayCopyNode clone. If it does, then we cannot assume zero since the initialization is done // by the ArrayCopyNode. @@ -1176,10 +1176,10 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { if (st->is_Proj() && st->in(0)->is_Initialize()) { InitializeNode* init = st->in(0)->as_Initialize(); AllocateNode* alloc = init->allocation(); - if ((alloc != NULL) && (alloc == ld_alloc)) { + if ((alloc != nullptr) && (alloc == ld_alloc)) { // examine a captured store value st = init->find_captured_store(ld_off, memory_size(), phase); - if (st != NULL) { + if (st != nullptr) { continue; // take one more trip around } } @@ -1187,12 +1187,12 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { // Load boxed value from result of valueOf() call is input parameter. if (this->is_Load() && ld_adr->is_AddP() && - (tp != NULL) && tp->is_ptr_to_boxed_value()) { + (tp != nullptr) && tp->is_ptr_to_boxed_value()) { intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); base = bs->step_over_gc_barrier(base); - if (base != NULL && base->is_Proj() && + if (base != nullptr && base->is_Proj() && base->as_Proj()->_con == TypeFunc::Parms && base->in(0)->is_CallStaticJava() && base->in(0)->as_CallStaticJava()->is_boxing_method()) { @@ -1203,7 +1203,7 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { break; } - return NULL; + return nullptr; } //----------------------is_instance_field_load_with_local_phi------------------ @@ -1212,7 +1212,7 @@ bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { in(Address)->is_AddP() ) { const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr(); // Only instances and boxed values. - if( t_oop != NULL && + if( t_oop != nullptr && (t_oop->is_ptr_to_boxed_value() || t_oop->is_known_instance_field()) && t_oop->offset() != Type::OffsetBot && @@ -1266,7 +1266,7 @@ Node* LoadNode::Identity(PhaseGVN* phase) { // Use _idx of address base (could be Phi node) for boxed values. intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); - if (base == NULL) { + if (base == nullptr) { return this; } this_iid = base->_idx; @@ -1287,7 +1287,7 @@ Node* LoadNode::Identity(PhaseGVN* phase) { // Construct an equivalent unsigned load. Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; - const Type* rt = NULL; + const Type* rt = nullptr; switch (Opcode()) { case Op_LoadUB: return this; case Op_LoadUS: return this; @@ -1295,7 +1295,7 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { case Op_LoadS: bt = T_CHAR; rt = TypeInt::CHAR; break; default: assert(false, "no unsigned variant: %s", Name()); - return NULL; + return nullptr; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), raw_adr_type(), rt, bt, _mo, _control_dependency, @@ -1305,7 +1305,7 @@ Node* LoadNode::convert_to_unsigned_load(PhaseGVN& gvn) { // Construct an equivalent signed load. Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { BasicType bt = T_ILLEGAL; - const Type* rt = NULL; + const Type* rt = nullptr; switch (Opcode()) { case Op_LoadUB: bt = T_BYTE; rt = TypeInt::BYTE; break; case Op_LoadUS: bt = T_SHORT; rt = TypeInt::SHORT; break; @@ -1315,7 +1315,7 @@ Node* LoadNode::convert_to_signed_load(PhaseGVN& gvn) { case Op_LoadL: return this; default: assert(false, "no signed variant: %s", Name()); - return NULL; + return nullptr; } return LoadNode::make(gvn, in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), raw_adr_type(), rt, bt, _mo, _control_dependency, @@ -1339,7 +1339,7 @@ Node* LoadNode::convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt) { assert(has_reinterpret_variant(rt), "no reinterpret variant: %s %s", Name(), type2name(bt)); bool is_mismatched = is_mismatched_access(); const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr(); - if (raw_type == NULL) { + if (raw_type == nullptr) { is_mismatched = true; // conservatively match all non-raw accesses as mismatched } const int op = Opcode(); @@ -1373,7 +1373,7 @@ Node* StoreNode::convert_to_reinterpret_store(PhaseGVN& gvn, Node* val, const Ty bool is_mismatched = is_mismatched_access(); const TypeRawPtr* raw_type = gvn.type(in(MemNode::Memory))->isa_rawptr(); - if (raw_type == NULL) { + if (raw_type == nullptr) { is_mismatched = true; // conservatively match all non-raw accesses as mismatched } if (is_mismatched) { @@ -1391,11 +1391,11 @@ Node* LoadNode::eliminate_autobox(PhaseIterGVN* igvn) { assert(igvn->C->eliminate_boxing(), "sanity"); intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(in(Address), igvn, ignore); - if ((base == NULL) || base->is_Phi()) { + if ((base == nullptr) || base->is_Phi()) { // Push the loads from the phi that comes from valueOf up // through it to allow elimination of the loads and the recovery // of the original value. It is done in split_through_phi(). - return NULL; + return nullptr; } else if (base->is_Load() || (base->is_DecodeN() && base->in(1)->is_Load())) { // Eliminate the load of boxed value for integer types from the cache @@ -1407,17 +1407,17 @@ Node* LoadNode::eliminate_autobox(PhaseIterGVN* igvn) { base = base->in(1); } if (!base->in(Address)->is_AddP()) { - return NULL; // Complex address + return nullptr; // Complex address } AddPNode* address = base->in(Address)->as_AddP(); Node* cache_base = address->in(AddPNode::Base); - if ((cache_base != NULL) && cache_base->is_DecodeN()) { + if ((cache_base != nullptr) && cache_base->is_DecodeN()) { // Get ConP node which is static 'cache' field. cache_base = cache_base->in(1); } - if ((cache_base != NULL) && cache_base->is_Con()) { + if ((cache_base != nullptr) && cache_base->is_Con()) { const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); - if ((base_type != NULL) && base_type->is_autobox_cache()) { + if ((base_type != nullptr) && base_type->is_autobox_cache()) { Node* elements[4]; int shift = exact_log2(type2aelembytes(T_OBJECT)); int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); @@ -1442,11 +1442,11 @@ Node* LoadNode::eliminate_autobox(PhaseIterGVN* igvn) { bt == T_INT || bt == T_LONG, "wrong type = %s", type2name(bt)); jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int(); if (cache_low != (int)cache_low) { - return NULL; // should not happen since cache is array indexed by value + return nullptr; // should not happen since cache is array indexed by value } jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift); if (offset != (int)offset) { - return NULL; // should not happen since cache is array indexed by value + return nullptr; // should not happen since cache is array indexed by value } // Add up all the offsets making of the address of the load Node* result = elements[0]; @@ -1498,21 +1498,21 @@ Node* LoadNode::eliminate_autobox(PhaseIterGVN* igvn) { } } } - return NULL; + return nullptr; } static bool stable_phi(PhiNode* phi, PhaseGVN *phase) { Node* region = phi->in(0); - if (region == NULL) { + if (region == nullptr) { return false; // Wait stable graph } uint cnt = phi->req(); for (uint i = 1; i < cnt; i++) { Node* rc = region->in(i); - if (rc == NULL || phase->type(rc) == Type::TOP) + if (rc == nullptr || phase->type(rc) == Type::TOP) return false; // Wait stable graph Node* in = phi->in(i); - if (in == NULL || phase->type(in) == Type::TOP) + if (in == nullptr || phase->type(in) == Type::TOP) return false; // Wait stable graph } return true; @@ -1523,32 +1523,32 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { if (req() > 3) { assert(is_LoadVector() && Opcode() != Op_LoadVector, "load has too many inputs"); // LoadVector subclasses such as LoadVectorMasked have extra inputs that the logic below doesn't take into account - return NULL; + return nullptr; } Node* mem = in(Memory); Node* address = in(Address); const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr(); - assert((t_oop != NULL) && + assert((t_oop != nullptr) && (t_oop->is_known_instance_field() || t_oop->is_ptr_to_boxed_value()), "invalid conditions"); Compile* C = phase->C; intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); - bool base_is_phi = (base != NULL) && base->is_Phi(); + bool base_is_phi = (base != nullptr) && base->is_Phi(); bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() && - (base != NULL) && (base == address->in(AddPNode::Base)) && + (base != nullptr) && (base == address->in(AddPNode::Base)) && phase->type(base)->higher_equal(TypePtr::NOTNULL); if (!((mem->is_Phi() || base_is_phi) && (load_boxed_values || t_oop->is_known_instance_field()))) { - return NULL; // memory is not Phi + return nullptr; // memory is not Phi } if (mem->is_Phi()) { if (!stable_phi(mem->as_Phi(), phase)) { - return NULL; // Wait stable graph + return nullptr; // Wait stable graph } uint cnt = mem->req(); // Check for loop invariant memory. @@ -1573,14 +1573,14 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { } if (base_is_phi) { if (!stable_phi(base->as_Phi(), phase)) { - return NULL; // Wait stable graph + return nullptr; // Wait stable graph } uint cnt = base->req(); // Check for loop invariant memory. if (cnt == 3) { for (uint i = 1; i < cnt; i++) { if (base->in(i) == base) { - return NULL; // Wait stable graph + return nullptr; // Wait stable graph } } } @@ -1592,7 +1592,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { // Do nothing here if Identity will find a value // (to avoid infinite chain of value phis generation). if (this != Identity(phase)) { - return NULL; + return nullptr; } // Select Region to split through. @@ -1602,13 +1602,13 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { region = mem->in(0); // Skip if the region dominates some control edge of the address. if (!MemNode::all_controls_dominate(address, region)) - return NULL; + return nullptr; } else if (!mem->is_Phi()) { assert(base_is_phi, "sanity"); region = base->in(0); // Skip if the region dominates some control edge of the memory. if (!MemNode::all_controls_dominate(mem, region)) - return NULL; + return nullptr; } else if (base->in(0) != mem->in(0)) { assert(base_is_phi && mem->is_Phi(), "sanity"); if (MemNode::all_controls_dominate(mem, base->in(0))) { @@ -1616,7 +1616,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { } else if (MemNode::all_controls_dominate(address, mem->in(0))) { region = mem->in(0); } else { - return NULL; // complex graph + return nullptr; // complex graph } } else { assert(base->in(0) == mem->in(0), "sanity"); @@ -1632,17 +1632,17 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { this_iid = base->_idx; } PhaseIterGVN* igvn = phase->is_IterGVN(); - Node* phi = new PhiNode(region, this_type, NULL, mem->_idx, this_iid, this_index, this_offset); + Node* phi = new PhiNode(region, this_type, nullptr, mem->_idx, this_iid, this_index, this_offset); for (uint i = 1; i < region->req(); i++) { Node* x; - Node* the_clone = NULL; + Node* the_clone = nullptr; Node* in = region->in(i); if (region->is_CountedLoop() && region->as_Loop()->is_strip_mined() && i == LoopNode::EntryControl && - in != NULL && in->is_OuterStripMinedLoop()) { + in != nullptr && in->is_OuterStripMinedLoop()) { // No node should go in the outer strip mined loop in = in->in(LoopNode::EntryControl); } - if (in == NULL || in == C->top()) { + if (in == nullptr || in == C->top()) { x = C->top(); // Dead path? Use a dead data op } else { x = this->clone(); // Else clone up the data op @@ -1651,7 +1651,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { if (this->in(0) == region) { x->set_req(0, in); } else { - x->set_req(0, NULL); + x->set_req(0, nullptr); } if (mem->is_Phi() && (mem->in(0) == region)) { x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone. @@ -1703,7 +1703,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase) { } } } - if (x != the_clone && the_clone != NULL) { + if (x != the_clone && the_clone != nullptr) { igvn->remove_dead_node(the_clone); } phi->set_req(i, x); @@ -1718,14 +1718,14 @@ AllocateNode* LoadNode::is_new_object_mark_load(PhaseGVN *phase) const { Node* address = in(MemNode::Address); AllocateNode* alloc = AllocateNode::Ideal_allocation(address, phase); Node* mem = in(MemNode::Memory); - if (alloc != NULL && mem->is_Proj() && - mem->in(0) != NULL && + if (alloc != nullptr && mem->is_Proj() && + mem->in(0) != nullptr && mem->in(0) == alloc->initialization() && - alloc->initialization()->proj_out_or_null(0) != NULL) { + alloc->initialization()->proj_out_or_null(0) != nullptr) { return alloc; } } - return NULL; + return nullptr; } @@ -1736,10 +1736,10 @@ AllocateNode* LoadNode::is_new_object_mark_load(PhaseGVN *phase) const { // try to hook me up to the exact initializing store. Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (has_pinned_control_dependency()) { - return NULL; + return nullptr; } Node* p = MemNode::Ideal_common(phase, can_reshape); - if (p) return (p == NodeSentinel) ? NULL : p; + if (p) return (p == NodeSentinel) ? nullptr : p; Node* ctrl = in(MemNode::Control); Node* address = in(MemNode::Address); @@ -1750,7 +1750,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Skip up past a SafePoint control. Cannot do this for Stores because // pointer stores & cardmarks must stay on the same side of a SafePoint. - if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && + if( ctrl != nullptr && ctrl->Opcode() == Op_SafePoint && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw && !addr_mark && (depends_only_on_test() || has_unknown_control_dependency())) { @@ -1761,15 +1761,15 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { intptr_t ignore = 0; Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); - if (base != NULL + if (base != nullptr && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) { // Check for useless control edge in some common special cases - if (in(MemNode::Control) != NULL + if (in(MemNode::Control) != nullptr && can_remove_control() && phase->type(base)->higher_equal(TypePtr::NOTNULL) && all_controls_dominate(base, phase->C->start())) { // A method-invariant, non-null address (constant or 'this' argument). - set_req(MemNode::Control, NULL); + set_req(MemNode::Control, nullptr); progress = true; } } @@ -1777,32 +1777,32 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* mem = in(MemNode::Memory); const TypePtr *addr_t = phase->type(address)->isa_ptr(); - if (can_reshape && (addr_t != NULL)) { + if (can_reshape && (addr_t != nullptr)) { // try to optimize our memory input Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase); if (opt_mem != mem) { set_req_X(MemNode::Memory, opt_mem, phase); - if (phase->type( opt_mem ) == Type::TOP) return NULL; + if (phase->type( opt_mem ) == Type::TOP) return nullptr; return this; } const TypeOopPtr *t_oop = addr_t->isa_oopptr(); - if ((t_oop != NULL) && + if ((t_oop != nullptr) && (t_oop->is_known_instance_field() || t_oop->is_ptr_to_boxed_value())) { PhaseIterGVN *igvn = phase->is_IterGVN(); - assert(igvn != NULL, "must be PhaseIterGVN when can_reshape is true"); + assert(igvn != nullptr, "must be PhaseIterGVN when can_reshape is true"); if (igvn->_worklist.member(opt_mem)) { // Delay this transformation until memory Phi is processed. igvn->_worklist.push(this); - return NULL; + return nullptr; } // Split instance field load through Phi. Node* result = split_through_phi(phase); - if (result != NULL) return result; + if (result != nullptr) return result; if (t_oop->is_ptr_to_boxed_value()) { Node* result = eliminate_autobox(igvn); - if (result != NULL) return result; + if (result != nullptr) return result; } } } @@ -1810,16 +1810,16 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Is there a dominating load that loads the same value? Leave // anything that is not a load of a field/array element (like // barriers etc.) alone - if (in(0) != NULL && !adr_type()->isa_rawptr() && can_reshape) { + if (in(0) != nullptr && !adr_type()->isa_rawptr() && can_reshape) { for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { Node *use = mem->fast_out(i); if (use != this && use->Opcode() == Opcode() && - use->in(0) != NULL && + use->in(0) != nullptr && use->in(0) != in(0) && use->in(Address) == in(Address)) { Node* ctl = in(0); - for (int i = 0; i < 10 && ctl != NULL; i++) { + for (int i = 0; i < 10 && ctl != nullptr; i++) { ctl = IfNode::up_one_dom(ctl); if (ctl == use->in(0)) { set_req(0, use->in(0)); @@ -1843,14 +1843,14 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { // the alias index stuff. So instead, peek through Stores and IFF we can // fold up, do so. Node* prev_mem = find_previous_store(phase); - if (prev_mem != NULL) { + if (prev_mem != nullptr) { Node* value = can_see_arraycopy_value(prev_mem, phase); - if (value != NULL) { + if (value != nullptr) { return value; } } // Steps (a), (b): Walk past independent stores to find an exact match. - if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) { + if (prev_mem != nullptr && prev_mem != in(MemNode::Memory)) { // (c) See if we can fold up on the spot, but don't fold up here. // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or // just return a prior value, which is done by Identity calls. @@ -1861,7 +1861,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return progress ? this : NULL; + return progress ? this : nullptr; } // Helper to recognize certain Klass fields which are invariant across @@ -1888,7 +1888,7 @@ LoadNode::load_array_final_field(const TypeKlassPtr *tkls, } // No match. - return NULL; + return nullptr; } //------------------------------Value----------------------------------------- @@ -1899,7 +1899,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { if (t1 == Type::TOP) return Type::TOP; Node* adr = in(MemNode::Address); const TypePtr* tp = phase->type(adr)->isa_ptr(); - if (tp == NULL || tp->empty()) return Type::TOP; + if (tp == nullptr || tp->empty()) return Type::TOP; int off = tp->offset(); assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); Compile* C = phase->C; @@ -1922,12 +1922,12 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { if (FoldStableValues && !is_mismatched_access() && ary->is_stable()) { // Make sure the reference is not into the header and the offset is constant ciObject* aobj = ary->const_oop(); - if (aobj != NULL && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { + if (aobj != nullptr && off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { int stable_dimension = (ary->stable_dimension() > 0 ? ary->stable_dimension() - 1 : 0); const Type* con_type = Type::make_constant_from_array_element(aobj->as_array(), off, stable_dimension, memory_type(), is_unsigned()); - if (con_type != NULL) { + if (con_type != nullptr) { return con_type; } } @@ -1948,8 +1948,8 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { // In fact, that could have been the original type of p1, and p1 could have // had an original form like p1:(AddP x x (LShiftL quux 3)), where the // expression (LShiftL quux 3) independently optimized to the constant 8. - if ((t->isa_int() == NULL) && (t->isa_long() == NULL) - && (_type->isa_vect() == NULL) + if ((t->isa_int() == nullptr) && (t->isa_long() == nullptr) + && (_type->isa_vect() == nullptr) && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { // t might actually be lower than _type, if _type is a unique // concrete subclass of abstract class t. @@ -1964,13 +1964,13 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { if (phase->C->eliminate_boxing() && adr->is_AddP()) { // The pointers in the autobox arrays are always non-null Node* base = adr->in(AddPNode::Base); - if ((base != NULL) && base->is_DecodeN()) { + if ((base != nullptr) && base->is_DecodeN()) { // Get LoadN node which loads IntegerCache.cache field base = base->in(1); } - if ((base != NULL) && base->is_Con()) { + if ((base != nullptr) && base->is_Con()) { const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr(); - if ((base_type != NULL) && base_type->is_autobox_cache()) { + if ((base_type != nullptr) && base_type->is_autobox_cache()) { // It could be narrow oop assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity"); } @@ -1993,9 +1993,9 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { // Optimize loads from constant fields. const TypeInstPtr* tinst = tp->is_instptr(); ciObject* const_oop = tinst->const_oop(); - if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != NULL && const_oop->is_instance()) { + if (!is_mismatched_access() && off != Type::OffsetBot && const_oop != nullptr && const_oop->is_instance()) { const Type* con_type = Type::make_constant_from_field(const_oop->as_instance(), off, is_unsigned(), memory_type()); - if (con_type != NULL) { + if (con_type != nullptr) { return con_type; } } @@ -2018,7 +2018,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { */ Node* adr2 = adr->in(MemNode::Address); const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); - if (tkls != NULL && !StressReflectiveCode) { + if (tkls != nullptr && !StressReflectiveCode) { if (tkls->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) { ciKlass* klass = tkls->exact_klass(); assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror"); @@ -2029,7 +2029,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { } const TypeKlassPtr *tkls = tp->isa_klassptr(); - if (tkls != NULL) { + if (tkls != nullptr) { if (tkls->is_loaded() && tkls->klass_is_exact()) { ciKlass* klass = tkls->exact_klass(); // We are loading a field from a Klass metaobject whose identity @@ -2052,14 +2052,14 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { return ss ? TypeKlassPtr::make(ss, Type::trust_interfaces) : TypePtr::NULL_PTR; } const Type* aift = load_array_final_field(tkls, klass); - if (aift != NULL) return aift; + if (aift != nullptr) return aift; } // We can still check if we are loading from the primary_supers array at a // shallow enough depth. Even though the klass is not exact, entries less // than or equal to its super depth are correct. if (tkls->is_loaded()) { - ciKlass* klass = NULL; + ciKlass* klass = nullptr; if (tkls->isa_instklassptr()) { klass = tkls->is_instklassptr()->instance_klass(); } else { @@ -2070,7 +2070,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { klass = ciObjArrayKlass::make(klass, dims); } } - if (klass != NULL) { + if (klass != nullptr) { // Compute index into primary_supers array juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); // Check for overflowing; use unsigned compare to handle the negative case. @@ -2107,17 +2107,17 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { // if the load is provably beyond the header of the object. // (Also allow a variable load from a fresh array to produce zero.) const TypeOopPtr *tinst = tp->isa_oopptr(); - bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); - bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value(); + bool is_instance = (tinst != nullptr) && tinst->is_known_instance_field(); + bool is_boxed_value = (tinst != nullptr) && tinst->is_ptr_to_boxed_value(); if (ReduceFieldZeroing || is_instance || is_boxed_value) { Node* value = can_see_stored_value(mem,phase); - if (value != NULL && value->is_Con()) { + if (value != nullptr && value->is_Con()) { assert(value->bottom_type()->higher_equal(_type),"sanity"); return value->bottom_type(); } } - bool is_vect = (_type->isa_vect() != NULL); + bool is_vect = (_type->isa_vect() != nullptr); if (is_instance && !is_vect) { // If we have an instance type and our memory input is the // programs's initial memory state, there is no matching store, @@ -2131,7 +2131,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const { } Node* alloc = is_new_object_mark_load(phase); - if (alloc != NULL) { + if (alloc != nullptr) { return TypeX::make(markWord::prototype().value()); } @@ -2154,7 +2154,7 @@ uint LoadNode::match_edge(uint idx) const { Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL) { + if (value != nullptr) { Node* narrow = Compile::narrow_value(T_BYTE, value, _type, phase, false); if (narrow != value) { return narrow; @@ -2167,7 +2167,7 @@ Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadBNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL && value->is_Con() && + if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, // it must be truncated. We can't delay until Ideal call since @@ -2188,7 +2188,7 @@ const Type* LoadBNode::Value(PhaseGVN* phase) const { Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem, phase); - if (value != NULL) { + if (value != nullptr) { Node* narrow = Compile::narrow_value(T_BOOLEAN, value, _type, phase, false); if (narrow != value) { return narrow; @@ -2201,7 +2201,7 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadUBNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL && value->is_Con() && + if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, // it must be truncated. We can't delay until Ideal call since @@ -2222,7 +2222,7 @@ const Type* LoadUBNode::Value(PhaseGVN* phase) const { Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL) { + if (value != nullptr) { Node* narrow = Compile::narrow_value(T_CHAR, value, _type, phase, false); if (narrow != value) { return narrow; @@ -2235,7 +2235,7 @@ Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadUSNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL && value->is_Con() && + if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, // it must be truncated. We can't delay until Ideal call since @@ -2256,7 +2256,7 @@ const Type* LoadUSNode::Value(PhaseGVN* phase) const { Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL) { + if (value != nullptr) { Node* narrow = Compile::narrow_value(T_SHORT, value, _type, phase, false); if (narrow != value) { return narrow; @@ -2269,7 +2269,7 @@ Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) { const Type* LoadSNode::Value(PhaseGVN* phase) const { Node* mem = in(MemNode::Memory); Node* value = can_see_stored_value(mem,phase); - if (value != NULL && value->is_Con() && + if (value != nullptr && value->is_Con() && !value->bottom_type()->higher_equal(_type)) { // If the input to the store does not fit with the load's result type, // it must be truncated. We can't delay until Ideal call since @@ -2286,7 +2286,7 @@ const Type* LoadSNode::Value(PhaseGVN* phase) const { Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) { // sanity check the alias category against the created node type const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); - assert(adr_type != NULL, "expecting TypeKlassPtr"); + assert(adr_type != nullptr, "expecting TypeKlassPtr"); #ifdef _LP64 if (adr_type->is_ptr_to_narrowklass()) { assert(UseCompressedClassPointers, "no compressed klasses"); @@ -2322,7 +2322,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // Return a more precise klass, if possible const TypeInstPtr *tinst = tp->isa_instptr(); - if (tinst != NULL) { + if (tinst != nullptr) { ciInstanceKlass* ik = tinst->instance_klass(); int offset = tinst->offset(); if (ik == phase->C->env()->Class_klass() @@ -2332,7 +2332,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // the field which points to the VM's Klass metaobject. ciType* t = tinst->java_mirror_type(); // java_mirror_type returns non-null for compile-time Class constants. - if (t != NULL) { + if (t != nullptr) { // constant oop => constant klass if (offset == java_lang_Class::array_klass_offset()) { if (t->is_void()) { @@ -2343,7 +2343,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { return TypeKlassPtr::make(ciArrayKlass::make(t), Type::trust_interfaces); } if (!t->is_klass()) { - // a primitive Class (e.g., int.class) has NULL for a klass field + // a primitive Class (e.g., int.class) has null for a klass field return TypePtr::NULL_PTR; } // (Folds up the 1st indirection in aClassConstant.getModifiers().) @@ -2360,14 +2360,14 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // Check for loading klass from an array const TypeAryPtr *tary = tp->isa_aryptr(); - if (tary != NULL && tary->elem() != Type::BOTTOM && + if (tary != nullptr && tary->elem() != Type::BOTTOM && tary->offset() == oopDesc::klass_offset_in_bytes()) { return tary->as_klass_type(true); } // Check for loading klass from an array klass const TypeKlassPtr *tkls = tp->isa_klassptr(); - if (tkls != NULL && !StressReflectiveCode) { + if (tkls != nullptr && !StressReflectiveCode) { if (!tkls->is_loaded()) return _type; // Bail out if not loaded if (tkls->isa_aryklassptr() && tkls->is_aryklassptr()->elem()->isa_klassptr() && @@ -2380,7 +2380,7 @@ const Type* LoadNode::klass_value_common(PhaseGVN* phase) const { // according to the element type's subclassing. return tkls->is_aryklassptr()->elem(); } - if (tkls->isa_instklassptr() != NULL && tkls->klass_is_exact() && + if (tkls->isa_instklassptr() != nullptr && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::super_offset())) { ciKlass* sup = tkls->is_instklassptr()->instance_klass()->super(); // The field is Klass::_super. Return its (constant) value. @@ -2409,9 +2409,9 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) { Node* adr = in(MemNode::Address); intptr_t offset = 0; Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); - if (base == NULL) return this; + if (base == nullptr) return this; const TypeOopPtr* toop = phase->type(adr)->isa_oopptr(); - if (toop == NULL) return this; + if (toop == nullptr) return this; // Step over potential GC barrier for OopHandle resolve BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); @@ -2423,7 +2423,7 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) { // This works even if the klass is not constant (clone or newArray). if (offset == oopDesc::klass_offset_in_bytes()) { Node* allocated_klass = AllocateNode::Ideal_klass(base, phase); - if (allocated_klass != NULL) { + if (allocated_klass != nullptr) { return allocated_klass; } } @@ -2444,7 +2444,7 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) { if (base2->is_Load()) { /* direct load of a load which is the OopHandle */ Node* adr2 = base2->in(MemNode::Address); const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); - if (tkls != NULL && !tkls->empty() + if (tkls != nullptr && !tkls->empty() && (tkls->isa_instklassptr() || tkls->isa_aryklassptr()) && adr2->is_AddP() ) { @@ -2503,22 +2503,22 @@ const Type* LoadRangeNode::Value(PhaseGVN* phase) const { // Feed through the length in AllocateArray(...length...)._length. Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* p = MemNode::Ideal_common(phase, can_reshape); - if (p) return (p == NodeSentinel) ? NULL : p; + if (p) return (p == NodeSentinel) ? nullptr : p; // Take apart the address into an oop and offset. // Return 'this' if we cannot. Node* adr = in(MemNode::Address); intptr_t offset = 0; Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); - if (tary == NULL) return NULL; + if (tary == nullptr) return nullptr; // We can fetch the length directly through an AllocateArrayNode. // This works even if the length is not constant (clone or newArray). if (offset == arrayOopDesc::length_offset_in_bytes()) { AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); - if (alloc != NULL) { + if (alloc != nullptr) { Node* allocated_length = alloc->Ideal_length(); Node* len = alloc->make_ideal_length(tary, phase); if (allocated_length != len) { @@ -2528,7 +2528,7 @@ Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; + return nullptr; } //------------------------------Identity--------------------------------------- @@ -2542,15 +2542,15 @@ Node* LoadRangeNode::Identity(PhaseGVN* phase) { Node* adr = in(MemNode::Address); intptr_t offset = 0; Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); - if (base == NULL) return this; + if (base == nullptr) return this; const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); - if (tary == NULL) return this; + if (tary == nullptr) return this; // We can fetch the length directly through an AllocateArrayNode. // This works even if the length is not constant (clone or newArray). if (offset == arrayOopDesc::length_offset_in_bytes()) { AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); - if (alloc != NULL) { + if (alloc != nullptr) { Node* allocated_length = alloc->Ideal_length(); // Do not allow make_ideal_length to allocate a CastII node. Node* len = alloc->make_ideal_length(tary, phase, false); @@ -2572,7 +2572,7 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const assert((mo == unordered || mo == release), "unexpected"); Compile* C = gvn.C; assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || - ctl != NULL, "raw memory operations should have control edge"); + ctl != nullptr, "raw memory operations should have control edge"); switch (bt) { case T_BOOLEAN: val = gvn.transform(new AndINode(val, gvn.intcon(0x1))); // Fall through to T_BYTE case @@ -2602,7 +2602,7 @@ StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const } default: ShouldNotReachHere(); - return (StoreNode*)NULL; + return (StoreNode*)nullptr; } } @@ -2626,7 +2626,7 @@ uint StoreNode::hash() const { // try to capture it into the initialization, or hoist it above. Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* p = MemNode::Ideal_common(phase, can_reshape); - if (p) return (p == NodeSentinel) ? NULL : p; + if (p) return (p == NodeSentinel) ? nullptr : p; Node* mem = in(MemNode::Memory); Node* address = in(MemNode::Address); @@ -2684,7 +2684,7 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* moved = init->capture_store(this, offset, phase, can_reshape); // If the InitializeNode captured me, it made a raw copy of me, // and I need to disappear. - if (moved != NULL) { + if (moved != nullptr) { // %%% hack to ensure that Ideal returns a new node: mem = MergeMemNode::make(mem); return mem; // fold me away @@ -2705,7 +2705,7 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; // No further progress + return nullptr; // No further progress } //------------------------------Value----------------------------------------- @@ -2762,9 +2762,9 @@ Node* StoreNode::Identity(PhaseGVN* phase) { // the store may also apply to zero-bits in an earlier object Node* prev_mem = find_previous_store(phase); // Steps (a), (b): Walk past independent stores to find an exact match. - if (prev_mem != NULL) { + if (prev_mem != nullptr) { Node* prev_val = can_see_stored_value(prev_mem, phase); - if (prev_val != NULL && prev_val == val) { + if (prev_val != nullptr && prev_val == val) { // prev_val and val might differ by a cast; it would be good // to keep the more informative of the two. result = mem; @@ -2774,12 +2774,12 @@ Node* StoreNode::Identity(PhaseGVN* phase) { } PhaseIterGVN* igvn = phase->is_IterGVN(); - if (result != this && igvn != NULL) { + if (result != this && igvn != nullptr) { MemBarNode* trailing = trailing_membar(); - if (trailing != NULL) { + if (trailing != nullptr) { #ifdef ASSERT const TypeOopPtr* t_oop = phase->type(in(Address))->isa_oopptr(); - assert(t_oop == NULL || t_oop->is_known_instance_field(), "only for non escaping objects"); + assert(t_oop == nullptr || t_oop->is_known_instance_field(), "only for non escaping objects"); #endif trailing->remove(igvn); } @@ -2815,7 +2815,7 @@ Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { return this; } } - return NULL; + return nullptr; } @@ -2839,7 +2839,7 @@ Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { } } } - return NULL; + return nullptr; } //------------------------------value_never_loaded----------------------------------- @@ -2850,7 +2850,7 @@ Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { bool StoreNode::value_never_loaded( PhaseTransform *phase) const { Node *adr = in(Address); const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); - if (adr_oop == NULL) + if (adr_oop == nullptr) return false; if (!adr_oop->is_known_instance_field()) return false; // if not a distinct instance, there may be aliases of the address @@ -2865,13 +2865,13 @@ bool StoreNode::value_never_loaded( PhaseTransform *phase) const { MemBarNode* StoreNode::trailing_membar() const { if (is_release()) { - MemBarNode* trailing_mb = NULL; + MemBarNode* trailing_mb = nullptr; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node* u = fast_out(i); if (u->is_MemBar()) { if (u->as_MemBar()->trailing_store()) { assert(u->Opcode() == Op_MemBarVolatile, ""); - assert(trailing_mb == NULL, "only one"); + assert(trailing_mb == nullptr, "only one"); trailing_mb = u->as_MemBar(); #ifdef ASSERT Node* leading = u->as_MemBar()->leading_membar(); @@ -2886,7 +2886,7 @@ MemBarNode* StoreNode::trailing_membar() const { } return trailing_mb; } - return NULL; + return nullptr; } @@ -2897,10 +2897,10 @@ MemBarNode* StoreNode::trailing_membar() const { // (a left shift, then right shift) we can skip both. Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){ Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF); - if( progress != NULL ) return progress; + if( progress != nullptr ) return progress; progress = StoreNode::Ideal_sign_extended_input(phase, 24); - if( progress != NULL ) return progress; + if( progress != nullptr ) return progress; // Finally check the default case return StoreNode::Ideal(phase, can_reshape); @@ -2912,10 +2912,10 @@ Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){ // we can skip the AND operation Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){ Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF); - if( progress != NULL ) return progress; + if( progress != nullptr ) return progress; progress = StoreNode::Ideal_sign_extended_input(phase, 16); - if( progress != NULL ) return progress; + if( progress != nullptr ) return progress; // Finally check the default case return StoreNode::Ideal(phase, can_reshape); @@ -2939,7 +2939,7 @@ Node* StoreCMNode::Identity(PhaseGVN* phase) { //------------------------------Ideal--------------------------------------- Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){ Node* progress = StoreNode::Ideal(phase, can_reshape); - if (progress != NULL) return progress; + if (progress != nullptr) return progress; Node* my_store = in(MemNode::OopStore); if (my_store->is_MergeMem()) { @@ -2948,7 +2948,7 @@ Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){ return this; } - return NULL; + return nullptr; } //------------------------------Value----------------------------------------- @@ -2967,7 +2967,7 @@ const Type* StoreCMNode::Value(PhaseGVN* phase) const { //----------------------------------SCMemProjNode------------------------------ const Type* SCMemProjNode::Value(PhaseGVN* phase) const { - if (in(0) == NULL || phase->type(in(0)) == Type::TOP) { + if (in(0) == nullptr || phase->type(in(0)) == Type::TOP) { return Type::TOP; } return bottom_type(); @@ -3023,13 +3023,13 @@ bool LoadStoreNode::result_not_used() const { } MemBarNode* LoadStoreNode::trailing_membar() const { - MemBarNode* trailing = NULL; + MemBarNode* trailing = nullptr; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node* u = fast_out(i); if (u->is_MemBar()) { if (u->as_MemBar()->trailing_load_store()) { assert(u->Opcode() == Op_MemBarAcquire, ""); - assert(trailing == NULL, "only one"); + assert(trailing == nullptr, "only one"); trailing = u->as_MemBar(); #ifdef ASSERT Node* leading = trailing->leading_membar(); @@ -3050,7 +3050,7 @@ uint LoadStoreNode::size_of() const { return sizeof(*this); } //============================================================================= //----------------------------------LoadStoreConditionalNode-------------------- -LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) { +LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, nullptr, TypeInt::BOOL, 5) { init_req(ExpectedIn, ex ); } @@ -3067,7 +3067,7 @@ const Type* LoadStoreConditionalNode::Value(PhaseGVN* phase) const { //-------------------------------adr_type-------------------------------------- const TypePtr* ClearArrayNode::adr_type() const { Node *adr = in(3); - if (adr == NULL) return NULL; // node is dead + if (adr == nullptr) return nullptr; // node is dead return MemNode::calculate_adr_type(adr->bottom_type()); } @@ -3087,36 +3087,36 @@ Node* ClearArrayNode::Identity(PhaseGVN* phase) { // Clearing a short array is faster with stores Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Already know this is a large node, do not try to ideal it - if (_is_large) return NULL; + if (_is_large) return nullptr; const int unit = BytesPerLong; const TypeX* t = phase->type(in(2))->isa_intptr_t(); - if (!t) return NULL; - if (!t->is_con()) return NULL; + if (!t) return nullptr; + if (!t->is_con()) return nullptr; intptr_t raw_count = t->get_con(); intptr_t size = raw_count; if (!Matcher::init_array_count_is_in_bytes) size *= unit; // Clearing nothing uses the Identity call. // Negative clears are possible on dead ClearArrays // (see jck test stmt114.stmt11402.val). - if (size <= 0 || size % unit != 0) return NULL; + if (size <= 0 || size % unit != 0) return nullptr; intptr_t count = size / unit; // Length too long; communicate this to matchers and assemblers. // Assemblers are responsible to produce fast hardware clears for it. if (size > InitArrayShortSize) { return new ClearArrayNode(in(0), in(1), in(2), in(3), true); } else if (size > 2 && Matcher::match_rule_supported_vector(Op_ClearArray, 4, T_LONG)) { - return NULL; + return nullptr; } - if (!IdealizeClearArrayNode) return NULL; + if (!IdealizeClearArrayNode) return nullptr; Node *mem = in(1); - if( phase->type(mem)==Type::TOP ) return NULL; + if( phase->type(mem)==Type::TOP ) return nullptr; Node *adr = in(3); const Type* at = phase->type(adr); - if( at==Type::TOP ) return NULL; + if( at==Type::TOP ) return nullptr; const TypePtr* atp = at->isa_ptr(); // adjust atp to be the correct array element address type - if (atp == NULL) atp = TypePtr::BOTTOM; + if (atp == nullptr) atp = TypePtr::BOTTOM; else atp = atp->add_offset(Type::OffsetBot); // Get base for derived pointer purposes if( adr->Opcode() != Op_AddP ) Unimplemented(); @@ -3146,14 +3146,14 @@ bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* p // during macro nodes expansion. Before that ClearArray nodes are // only generated in PhaseMacroExpand::generate_arraycopy() (before // Allocate nodes are expanded) which follows allocations. - assert(alloc != NULL, "should have allocation"); + assert(alloc != nullptr, "should have allocation"); if (alloc->_idx == instance_id) { // Can not bypass initialization of the instance we are looking for. return false; } // Otherwise skip it. InitializeNode* init = alloc->initialization(); - if (init != NULL) + if (init != nullptr) *np = init->in(TypeFunc::Memory); else *np = alloc->in(TypeFunc::Memory); @@ -3242,7 +3242,7 @@ Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, //============================================================================= MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) - : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), + : MultiNode(TypeFunc::Parms + (precedent == nullptr? 0: 1)), _adr_type(C->get_adr_type(alias_idx)), _kind(Standalone) #ifdef ASSERT , _pair_idx(0) @@ -3253,7 +3253,7 @@ MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) init_req(TypeFunc::I_O,top); init_req(TypeFunc::FramePtr,top); init_req(TypeFunc::ReturnAdr,top); - if (precedent != NULL) + if (precedent != nullptr) init_req(TypeFunc::Parms, precedent); } @@ -3278,7 +3278,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { case Op_MemBarCPUOrder: return new MemBarCPUOrderNode(C, atp, pn); case Op_OnSpinWait: return new OnSpinWaitNode(C, atp, pn); case Op_Initialize: return new InitializeNode(C, atp, pn); - default: ShouldNotReachHere(); return NULL; + default: ShouldNotReachHere(); return nullptr; } } @@ -3289,15 +3289,15 @@ void MemBarNode::remove(PhaseIterGVN *igvn) { } if (trailing_store() || trailing_load_store()) { MemBarNode* leading = leading_membar(); - if (leading != NULL) { + if (leading != nullptr) { assert(leading->trailing_membar() == this, "inconsistent leading/trailing membars"); leading->remove(igvn); } } - if (proj_out_or_null(TypeFunc::Memory) != NULL) { + if (proj_out_or_null(TypeFunc::Memory) != nullptr) { igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); } - if (proj_out_or_null(TypeFunc::Control) != NULL) { + if (proj_out_or_null(TypeFunc::Control) != nullptr) { igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); } } @@ -3309,7 +3309,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (remove_dead_region(phase, can_reshape)) return this; // Don't bother trying to transform a dead node if (in(0) && in(0)->is_top()) { - return NULL; + return nullptr; } bool progress = false; @@ -3321,7 +3321,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Volatile field loads and stores. Node* my_mem = in(MemBarNode::Precedent); // The MembarAquire may keep an unused LoadNode alive through the Precedent edge - if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { + if ((my_mem != nullptr) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { // if the Precedent is a decodeN and its input (a Load) is used at more than one place, // replace this Precedent (decodeN) with the Load instead. if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { @@ -3333,14 +3333,14 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { assert(my_mem->unique_out() == this, "sanity"); del_req(Precedent); phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later - my_mem = NULL; + my_mem = nullptr; } progress = true; } - if (my_mem != NULL && my_mem->is_Mem()) { + if (my_mem != nullptr && my_mem->is_Mem()) { const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); // Check for scalar replaced object reference. - if( t_oop != NULL && t_oop->is_known_instance_field() && + if( t_oop != nullptr && t_oop->is_known_instance_field() && t_oop->offset() != Type::OffsetBot && t_oop->offset() != Type::OffsetTop) { eliminate = true; @@ -3349,7 +3349,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { } else if (opc == Op_MemBarRelease) { // Final field stores. Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); - if ((alloc != NULL) && alloc->is_Allocate() && + if ((alloc != nullptr) && alloc->is_Allocate() && alloc->as_Allocate()->does_not_escape_thread()) { // The allocated object does not escape. eliminate = true; @@ -3364,7 +3364,7 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { return new ConINode(TypeInt::ZERO); } } - return progress ? this : NULL; + return progress ? this : nullptr; } //------------------------------Value------------------------------------------ @@ -3384,7 +3384,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); } ShouldNotReachHere(); - return NULL; + return nullptr; } void MemBarNode::set_store_pair(MemBarNode* leading, MemBarNode* trailing) { @@ -3414,7 +3414,7 @@ MemBarNode* MemBarNode::trailing_membar() const { Node* c = trailing; uint i = 0; do { - trailing = NULL; + trailing = nullptr; for (; i < c->outcnt(); i++) { Node* next = c->raw_out(i); if (next != c && next->is_CFG()) { @@ -3429,7 +3429,7 @@ MemBarNode* MemBarNode::trailing_membar() const { break; } } - if (trailing != NULL && !seen.test_set(trailing->_idx)) { + if (trailing != nullptr && !seen.test_set(trailing->_idx)) { break; } while (multis.size() > 0) { @@ -3455,10 +3455,10 @@ MemBarNode* MemBarNode::leading_membar() const { VectorSet seen; Node_Stack regions(0); Node* leading = in(0); - while (leading != NULL && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) { - while (leading == NULL || leading->is_top() || seen.test_set(leading->_idx)) { - leading = NULL; - while (regions.size() > 0 && leading == NULL) { + while (leading != nullptr && (!leading->is_MemBar() || !leading->as_MemBar()->leading())) { + while (leading == nullptr || leading->is_top() || seen.test_set(leading->_idx)) { + leading = nullptr; + while (regions.size() > 0 && leading == nullptr) { Node* r = regions.node(); uint i = regions.index(); if (i < r->req()) { @@ -3468,9 +3468,9 @@ MemBarNode* MemBarNode::leading_membar() const { regions.pop(); } } - if (leading == NULL) { + if (leading == nullptr) { assert(regions.size() == 0, "all paths should have been tried"); - return NULL; + return nullptr; } } if (leading->is_Region()) { @@ -3489,7 +3489,7 @@ MemBarNode* MemBarNode::leading_membar() const { if (n->is_Region()) { for (uint j = 1; j < n->req(); j++) { Node* in = n->in(j); - if (in != NULL && !in->is_top()) { + if (in != nullptr && !in->is_top()) { wq.push(in); } } @@ -3499,16 +3499,16 @@ MemBarNode* MemBarNode::leading_membar() const { found++; } else { Node* in = n->in(0); - if (in != NULL && !in->is_top()) { + if (in != nullptr && !in->is_top()) { wq.push(in); } } } } - assert(found == 1 || (found == 0 && leading == NULL), "consistency check failed"); + assert(found == 1 || (found == 0 && leading == nullptr), "consistency check failed"); #endif - if (leading == NULL) { - return NULL; + if (leading == nullptr) { + return nullptr; } MemBarNode* mb = leading->as_MemBar(); assert((mb->_kind == LeadingStore && _kind == TrailingStore) || @@ -3618,7 +3618,7 @@ InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop) assert(adr_type == Compile::AliasIdxRaw, "only valid atp"); assert(in(RawAddress) == rawoop, "proper init"); - // Note: allocation() can be NULL, for secondary initialization barriers + // Note: allocation() can be null, for secondary initialization barriers } // Since this node is not matched, it will be processed by the @@ -3664,7 +3664,7 @@ void InitializeNode::set_complete(PhaseGVN* phase) { // return false if the init contains any stores already bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { InitializeNode* init = initialization(); - if (init == NULL || init->is_complete()) return false; + if (init == nullptr || init->is_complete()) return false; init->remove_extra_zeroes(); // for now, if this allocation has already collected any inits, bail: if (init->is_non_zero()) return false; @@ -3694,7 +3694,7 @@ intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) { intptr_t offset = -1; Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address), phase, offset); - if (base == NULL) return -1; // something is dead, + if (base == nullptr) return -1; // something is dead, if (offset < 0) return -1; // dead, dead return offset; } @@ -3717,7 +3717,7 @@ bool InitializeNode::detect_init_independence(Node* value, PhaseGVN* phase) { } Node* n = worklist.at(j); - if (n == NULL) continue; // (can this really happen?) + if (n == nullptr) continue; // (can this really happen?) if (n->is_Proj()) n = n->in(0); if (n == this) return false; // found a cycle if (n->is_Con()) continue; @@ -3730,7 +3730,7 @@ bool InitializeNode::detect_init_independence(Node* value, PhaseGVN* phase) { } Node* ctl = n->in(0); - if (ctl != NULL && !ctl->is_top()) { + if (ctl != nullptr && !ctl->is_top()) { if (ctl->is_Proj()) ctl = ctl->in(0); if (ctl == this) return false; @@ -3746,7 +3746,7 @@ bool InitializeNode::detect_init_independence(Node* value, PhaseGVN* phase) { // Check data edges for possible dependencies on 'this'. for (uint i = 1; i < n->req(); i++) { Node* m = n->in(i); - if (m == NULL || m == n || m->is_top()) continue; + if (m == nullptr || m == n || m->is_top()) continue; // Only process data inputs once worklist.push(m); @@ -3765,7 +3765,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool if (st->req() != MemNode::ValueIn + 1) return FAIL; // an inscrutable StoreNode (card mark?) Node* ctl = st->in(MemNode::Control); - if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this)) + if (!(ctl != nullptr && ctl->is_Proj() && ctl->in(0) == this)) return FAIL; // must be unconditional after the initialization Node* mem = st->in(MemNode::Memory); if (!(mem->is_Proj() && mem->in(0) == this)) @@ -3773,7 +3773,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool Node* adr = st->in(MemNode::Address); intptr_t offset; AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset); - if (alloc == NULL) + if (alloc == nullptr) return FAIL; // inscrutable address if (alloc != allocation()) return FAIL; // wrong allocation! (store needs to float up) @@ -3801,7 +3801,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool ResourceMark rm; Unique_Node_List mems; mems.push(mem); - Node* unique_merge = NULL; + Node* unique_merge = nullptr; for (uint next = 0; next < mems.size(); ++next) { Node *m = mems.at(next); for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { @@ -3811,7 +3811,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool } if (n == st) { continue; - } else if (n->in(0) != NULL && n->in(0) != ctl) { + } else if (n->in(0) != nullptr && n->in(0) != ctl) { // If the control of this use is different from the control // of the Store which is right after the InitializeNode then // this node cannot be between the InitializeNode and the @@ -3833,7 +3833,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool break; } else { const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr(); - if (other_t_adr != NULL) { + if (other_t_adr != nullptr) { int other_alias_idx = phase->C->get_alias_index(other_t_adr); if (other_alias_idx == alias_idx) { // A load from the same memory slice as the store right @@ -3844,7 +3844,7 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseGVN* phase, bool Node* base = other_adr; assert(base->is_AddP(), "should be addp but is %s", base->Name()); base = base->in(AddPNode::Base); - if (base != NULL) { + if (base != nullptr) { base = base->uncast(); if (base->is_Proj() && base->in(0) == alloc) { failed = true; @@ -3889,7 +3889,7 @@ int InitializeNode::captured_store_insertion_point(intptr_t start, if (is_complete()) return FAIL; // arraycopy got here first; punt - assert(allocation() != NULL, "must be present"); + assert(allocation() != nullptr, "must be present"); // no negatives, no header fields: if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; @@ -3941,7 +3941,7 @@ Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes, assert(stores_are_sane(phase), ""); int i = captured_store_insertion_point(start, size_in_bytes, phase); if (i == 0) { - return NULL; // something is dead + return nullptr; // something is dead } else if (i < 0) { return zero_memory(); // just primordial zero bits here } else { @@ -3985,14 +3985,14 @@ Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape) { assert(stores_are_sane(phase), ""); - if (start < 0) return NULL; + if (start < 0) return nullptr; assert(can_capture_store(st, phase, can_reshape) == start, "sanity"); Compile* C = phase->C; int size_in_bytes = st->memory_size(); int i = captured_store_insertion_point(start, size_in_bytes, phase); - if (i == 0) return NULL; // bail out - Node* prev_mem = NULL; // raw memory for the captured store + if (i == 0) return nullptr; // bail out + Node* prev_mem = nullptr; // raw memory for the captured store if (i > 0) { prev_mem = in(i); // there is a pre-existing store under this one set_req(i, C->top()); // temporarily disconnect it @@ -4026,7 +4026,7 @@ Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, // The caller may now kill the old guy. DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase)); - assert(check_st == new_st || check_st == NULL, "must be findable"); + assert(check_st == new_st || check_st == nullptr, "must be findable"); assert(!is_complete(), ""); return new_st; } @@ -4154,7 +4154,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size, st = nodes[j]; st_off -= BytesPerInt; con = intcon[0]; - if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { + if (con != 0 && st != nullptr && st->Opcode() == Op_StoreI) { assert(st_off >= header_size, "still ignoring header"); assert(get_store_offset(st, phase) == st_off, "must be"); assert(in(i-1) == zmem, "must be"); @@ -4163,7 +4163,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size, // Undo the effects of the previous loop trip, which swallowed st: intcon[0] = 0; // undo store_constant() set_req(i-1, st); // undo set_req(i, zmem) - nodes[j] = NULL; // undo nodes[j] = st + nodes[j] = nullptr; // undo nodes[j] = st --old_subword; // undo ++old_subword } continue; // This StoreI is already optimal. @@ -4200,7 +4200,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size, } Node* old = nodes[j]; - assert(old != NULL, "need the prior store"); + assert(old != nullptr, "need the prior store"); intptr_t offset = (j * BytesPerLong); bool split = !Matcher::isSimpleConstant64(con); @@ -4277,7 +4277,7 @@ InitializeNode::coalesce_subword_stores(intptr_t header_size, if (PrintCompilation && WizardMode) tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long", old_subword, old_long, new_int, new_long); - if (C->log() != NULL) + if (C->log() != nullptr) C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'", old_subword, old_long, new_int, new_long); @@ -4348,7 +4348,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, PhaseIterGVN* phase) { assert(!is_complete(), "not already complete"); assert(stores_are_sane(phase), ""); - assert(allocation() != NULL, "must be present"); + assert(allocation() != nullptr, "must be present"); remove_extra_zeroes(); @@ -4451,7 +4451,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, // a large constant tile can be filled in by smaller non-constant stores. assert(st_off >= last_init_off, "inits do not reverse"); last_init_off = st_off; - const Type* val = NULL; + const Type* val = nullptr; if (st_size >= BytesPerInt && (val = phase->type(st->in(MemNode::ValueIn)))->singleton() && (int)val->basic_type() < (int)T_OBJECT) { @@ -4477,8 +4477,8 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); if (zeroes_done + BytesPerLong >= size_limit) { AllocateNode* alloc = allocation(); - assert(alloc != NULL, "must be present"); - if (alloc != NULL && alloc->Opcode() == Op_Allocate) { + assert(alloc != nullptr, "must be present"); + if (alloc != nullptr && alloc->Opcode() == Op_Allocate) { Node* klass_node = alloc->in(AllocateNode::KlassNode); ciKlass* k = phase->type(klass_node)->is_instklassptr()->instance_klass(); if (zeroes_done == k->layout_helper()) @@ -4500,7 +4500,7 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, bool InitializeNode::stores_are_sane(PhaseTransform* phase) { if (is_complete()) return true; // stores could be anything at this point - assert(allocation() != NULL, "must be present"); + assert(allocation() != nullptr, "must be present"); intptr_t last_off = allocation()->minimum_header_size(); for (uint i = InitializeNode::RawStores; i < req(); i++) { Node* st = in(i); @@ -4576,7 +4576,7 @@ bool InitializeNode::stores_are_sane(PhaseTransform* phase) { // memory state has an edge in(AliasIdxBot) which is a "wide" memory state, // containing all alias categories. // -// MergeMem nodes never (?) have control inputs, so in(0) is NULL. +// MergeMem nodes never (?) have control inputs, so in(0) is null. // // All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either // a memory state for the alias type , or else the top node, meaning that @@ -4629,7 +4629,7 @@ Node* MergeMemNode::make_empty_memory() { MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) { init_class_id(Class_MergeMem); // all inputs are nullified in Node::Node(int) - // set_input(0, NULL); // no control input + // set_input(0, nullptr); // no control input // Initialize the edges uniformly to top, for starters. Node* empty_mem = make_empty_memory(); @@ -4638,7 +4638,7 @@ MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) { } assert(empty_memory() == empty_mem, ""); - if( new_base != NULL && new_base->is_MergeMem() ) { + if( new_base != nullptr && new_base->is_MergeMem() ) { MergeMemNode* mdef = new_base->as_MergeMem(); assert(mdef->empty_memory() == empty_mem, "consistent sentinels"); for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) { @@ -4688,19 +4688,19 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { // relative to the "in(Bot)". Since we are patching both at the same time, // we have to be careful to read each "in(i)" relative to the old "in(Bot)", // but rewrite each "in(i)" relative to the new "in(Bot)". - Node *progress = NULL; + Node *progress = nullptr; Node* old_base = base_memory(); Node* empty_mem = empty_memory(); if (old_base == empty_mem) - return NULL; // Dead memory path. + return nullptr; // Dead memory path. MergeMemNode* old_mbase; - if (old_base != NULL && old_base->is_MergeMem()) + if (old_base != nullptr && old_base->is_MergeMem()) old_mbase = old_base->as_MergeMem(); else - old_mbase = NULL; + old_mbase = nullptr; Node* new_base = old_base; // simplify stacked MergeMems in base memory @@ -4728,10 +4728,10 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { // simplify stacked MergeMems Node* new_mem = old_mem; MergeMemNode* old_mmem; - if (old_mem != NULL && old_mem->is_MergeMem()) + if (old_mem != nullptr && old_mem->is_MergeMem()) old_mmem = old_mem->as_MergeMem(); else - old_mmem = NULL; + old_mmem = nullptr; if (old_mmem == this) { // This can happen if loops break up and safepoints disappear. // A merge of BotPtr (default) with a RawPtr memory derived from a @@ -4746,7 +4746,7 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { // from start. Update the input to TOP. new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base; } - else if (old_mmem != NULL) { + else if (old_mmem != nullptr) { new_mem = old_mmem->memory_at(i); } // else preceding memory was not a MergeMem @@ -4781,7 +4781,7 @@ Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { if( base_memory()->is_MergeMem() ) { MergeMemNode *new_mbase = base_memory()->as_MergeMem(); Node *m = phase->transform(new_mbase); // Rollup any cycles - if( m != NULL && + if( m != nullptr && (m->is_top() || (m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem)) ) { // propagate rollup of dead cycle to self @@ -4843,7 +4843,7 @@ void MergeMemNode::dump_spec(outputStream *st) const { st->print(" {"); Node* base_mem = base_memory(); for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) { - Node* mem = (in(i) != NULL) ? memory_at(i) : base_mem; + Node* mem = (in(i) != nullptr) ? memory_at(i) : base_mem; if (mem == base_mem) { st->print(" -"); continue; } st->print( " N%d:", mem->_idx ); Compile::current()->get_adr_type(i)->dump_on(st); @@ -4867,7 +4867,7 @@ static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) { if (VMError::is_error_reported()) return; // muzzle asserts when debugging an error if (Node::in_dump()) return; // muzzle asserts when printing assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel"); - assert(n != NULL, ""); + assert(n != nullptr, ""); // Elide intervening MergeMem's while (n->is_MergeMem()) { n = n->as_MergeMem()->memory_at(alias_idx); @@ -4877,7 +4877,7 @@ static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) { if (n == m->empty_memory()) { // Implicit copy of base_memory() } else if (n_adr_type != TypePtr::BOTTOM) { - assert(n_adr_type != NULL, "new memory must have a well-defined adr_type"); + assert(n_adr_type != nullptr, "new memory must have a well-defined adr_type"); assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice"); } else { // A few places like make_runtime_call "know" that VM calls are narrow, @@ -4913,8 +4913,8 @@ Node* MergeMemNode::memory_at(uint alias_idx) const { // the array is sparse; empty slots are the "top" node n = base_memory(); assert(Node::in_dump() - || n == NULL || n->bottom_type() == Type::TOP - || n->adr_type() == NULL // address is TOP + || n == nullptr || n->bottom_type() == Type::TOP + || n->adr_type() == nullptr // address is TOP || n->adr_type() == TypePtr::BOTTOM || n->adr_type() == TypeRawPtr::BOTTOM || !Compile::current()->do_aliasing(), @@ -4956,7 +4956,7 @@ void MergeMemNode::set_memory_at(uint alias_idx, Node *n) { //--------------------------iteration_setup------------------------------------ void MergeMemNode::iteration_setup(const MergeMemNode* other) { - if (other != NULL) { + if (other != nullptr) { grow_to_match(other); // invariant: the finite support of mm2 is within mm->req() #ifdef ASSERT @@ -4967,7 +4967,7 @@ void MergeMemNode::iteration_setup(const MergeMemNode* other) { } // Replace spurious copies of base_memory by top. Node* base_mem = base_memory(); - if (base_mem != NULL && !base_mem->is_top()) { + if (base_mem != nullptr && !base_mem->is_top()) { for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) { if (in(i) == base_mem) set_req(i, empty_memory()); @@ -4997,7 +4997,7 @@ bool MergeMemNode::verify_sparse() const { // The following can happen in degenerate cases, since empty==top. if (is_empty_memory(base_mem)) return true; for (uint i = Compile::AliasIdxRaw; i < req(); i++) { - assert(in(i) != NULL, "sane slice"); + assert(in(i) != nullptr, "sane slice"); if (in(i) == base_mem) return false; // should have been the sentinel value! } return true; diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp index 92040e81c4b..88b9a3a56b0 100644 --- a/src/hotspot/share/opto/memnode.hpp +++ b/src/hotspot/share/opto/memnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ class PhaseCCP; class PhaseTransform; //------------------------------MemNode---------------------------------------- -// Load or Store, possibly throwing a NULL pointer exception +// Load or Store, possibly throwing a null pointer exception class MemNode : public Node { private: bool _unaligned_access; // Unaligned access from unsafe @@ -92,7 +92,7 @@ protected: debug_only(_adr_type=at; adr_type();) } - virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return NULL; } + virtual Node* find_previous_arraycopy(PhaseTransform* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; } ArrayCopyNode* find_array_copy_clone(PhaseTransform* phase, Node* ld_alloc, Node* mem) const; static bool check_if_adr_maybe_raw(Node* adr); @@ -111,10 +111,10 @@ public: virtual const class TypePtr *adr_type() const; // returns bottom_type of address // Shared code for Ideal methods: - Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. + Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit null. // Helper function for adr_type() implementations. - static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); + static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = nullptr); // Raw access function, to allow copying of adr_type efficiently in // product builds and retain the debug info for debug builds. @@ -262,13 +262,13 @@ public: virtual const Type *bottom_type() const; // Following method is copied from TypeNode: void set_type(const Type* t) { - assert(t != NULL, "sanity"); + assert(t != nullptr, "sanity"); debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); *(const Type**)&_type = t; // cast away const-ness // If this node is in the hash table, make sure it doesn't need a rehash. assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); } - const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; + const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; // Do not match memory edge virtual uint match_edge(uint idx) const; @@ -808,7 +808,7 @@ public: virtual const Type *bottom_type() const {return Type::MEMORY;} virtual const TypePtr *adr_type() const { Node* ctrl = in(0); - if (ctrl == NULL) return NULL; // node is dead + if (ctrl == nullptr) return nullptr; // node is dead return ctrl->in(MemNode::Memory)->adr_type(); } virtual uint ideal_reg() const { return 0;} // memory projections don't have a register @@ -1168,7 +1168,7 @@ public: // Optional 'precedent' becomes an extra edge if not null. static MemBarNode* make(Compile* C, int opcode, int alias_idx = Compile::AliasIdxBot, - Node* precedent = NULL); + Node* precedent = nullptr); MemBarNode* trailing_membar() const; MemBarNode* leading_membar() const; @@ -1362,12 +1362,12 @@ public: intptr_t can_capture_store(StoreNode* st, PhaseGVN* phase, bool can_reshape); // Capture another store; reformat it to write my internal raw memory. - // Return the captured copy, else NULL if there is some sort of problem. + // Return the captured copy, else null if there is some sort of problem. Node* capture_store(StoreNode* st, intptr_t start, PhaseGVN* phase, bool can_reshape); // Find captured store which corresponds to the range [start..start+size). // Return my own memory projection (meaning the initial zero bits) - // if there is no such store. Return NULL if there is a problem. + // if there is no such store. Return null if there is a problem. Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); // Called when the associated AllocateNode is expanded into CFG. @@ -1431,7 +1431,7 @@ public: static Node* make_empty_memory(); // where the sentinel comes from bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } // hook for the iterator, to perform any necessary setup - void iteration_setup(const MergeMemNode* other = NULL); + void iteration_setup(const MergeMemNode* other = nullptr); // push sentinels until I am at least as long as the other (semantic no-op) void grow_to_match(const MergeMemNode* other); bool verify_sparse() const PRODUCT_RETURN0; @@ -1451,7 +1451,7 @@ class MergeMemStream : public StackObj { Node* _mem2; int _cnt2; - void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { + void init(MergeMemNode* mm, const MergeMemNode* mm2 = nullptr) { // subsume_node will break sparseness at times, whenever a memory slice // folds down to a copy of the base ("fat") memory. In such a case, // the raw edge will update to base, although it should be top. @@ -1465,15 +1465,15 @@ class MergeMemStream : public StackObj { // // Also, iteration_setup repairs sparseness. assert(mm->verify_sparse(), "please, no dups of base"); - assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); + assert(mm2==nullptr || mm2->verify_sparse(), "please, no dups of base"); _mm = mm; _mm_base = mm->base_memory(); _mm2 = mm2; _cnt = mm->req(); _idx = Compile::AliasIdxBot-1; // start at the base memory - _mem = NULL; - _mem2 = NULL; + _mem = nullptr; + _mem2 = nullptr; } #ifdef ASSERT @@ -1531,7 +1531,7 @@ class MergeMemStream : public StackObj { return _mm_base; } const MergeMemNode* all_memory2() const { - assert(_mm2 != NULL, ""); + assert(_mm2 != nullptr, ""); return _mm2; } bool at_base_memory() const { @@ -1602,7 +1602,7 @@ class MergeMemStream : public StackObj { private: // find the next item, which might be empty bool next(bool have_mm2) { - assert((_mm2 != NULL) == have_mm2, "use other next"); + assert((_mm2 != nullptr) == have_mm2, "use other next"); assert_synch(); if (++_idx < _cnt) { // Note: This iterator allows _mm to be non-sparse. diff --git a/src/hotspot/share/opto/movenode.cpp b/src/hotspot/share/opto/movenode.cpp index 6fdf49dc5ea..950d394d773 100644 --- a/src/hotspot/share/opto/movenode.cpp +++ b/src/hotspot/share/opto/movenode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,12 +75,12 @@ // Return a node which is more "ideal" than the current node. // Move constants to the right. Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (in(0) != NULL && remove_dead_region(phase, can_reshape)) { + if (in(0) != nullptr && remove_dead_region(phase, can_reshape)) { return this; } // Don't bother trying to transform a dead node - if (in(0) != NULL && in(0)->is_top()) { - return NULL; + if (in(0) != nullptr && in(0)->is_top()) { + return nullptr; } assert(in(Condition) != this && in(IfFalse) != this && @@ -88,14 +88,14 @@ Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) { if (phase->type(in(Condition)) == Type::TOP || phase->type(in(IfFalse)) == Type::TOP || phase->type(in(IfTrue)) == Type::TOP) { - return NULL; + return nullptr; } // Canonicalize the node by moving constants to the right input. if (in(Condition)->is_Bool() && phase->type(in(IfFalse))->singleton() && !phase->type(in(IfTrue))->singleton()) { BoolNode* b = in(Condition)->as_Bool()->negate(phase); return make(in(Control), phase->transform(b), in(IfTrue), in(IfFalse), _type); } - return NULL; + return nullptr; } //------------------------------is_cmove_id------------------------------------ @@ -108,7 +108,7 @@ Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f // Give up this identity check for floating points because it may choose incorrect // value around 0.0 and -0.0 if ( cmp->Opcode()==Op_CmpF || cmp->Opcode()==Op_CmpD ) - return NULL; + return nullptr; // Check for "(t==f)?t:f;" and replace with "f" if( b->_test._test == BoolTest::eq ) return f; @@ -117,7 +117,7 @@ Node *CMoveNode::is_cmove_id( PhaseTransform *phase, Node *cmp, Node *t, Node *f if( b->_test._test == BoolTest::ne ) return t; } - return NULL; + return nullptr; } //------------------------------Identity--------------------------------------- @@ -180,7 +180,7 @@ CMoveNode *CMoveNode::make(Node *c, Node *bol, Node *left, Node *right, const Ty case T_NARROWOOP: return new CMoveNNode( c, bol, left, right, t ); default: ShouldNotReachHere(); - return NULL; + return nullptr; } } @@ -214,26 +214,26 @@ Node *CMoveINode::Ideal(PhaseGVN *phase, bool can_reshape) { if( phase->type(in(IfFalse)) == TypeInt::ZERO && phase->type(in(IfTrue)) == TypeInt::ONE ) { flip = 1 - flip; } else if( phase->type(in(IfFalse)) == TypeInt::ONE && phase->type(in(IfTrue)) == TypeInt::ZERO ) { - } else return NULL; + } else return nullptr; // Check for eq/ne test - if( !in(1)->is_Bool() ) return NULL; + if( !in(1)->is_Bool() ) return nullptr; BoolNode *bol = in(1)->as_Bool(); if( bol->_test._test == BoolTest::eq ) { } else if( bol->_test._test == BoolTest::ne ) { flip = 1-flip; - } else return NULL; + } else return nullptr; // Check for vs 0 or 1 - if( !bol->in(1)->is_Cmp() ) return NULL; + if( !bol->in(1)->is_Cmp() ) return nullptr; const CmpNode *cmp = bol->in(1)->as_Cmp(); if( phase->type(cmp->in(2)) == TypeInt::ZERO ) { } else if( phase->type(cmp->in(2)) == TypeInt::ONE ) { // Allow cmp-vs-1 if the other input is bounded by 0-1 if( phase->type(cmp->in(1)) != TypeInt::BOOL ) - return NULL; + return nullptr; flip = 1 - flip; - } else return NULL; + } else return nullptr; // Convert to a bool (flipped) // Build int->bool conversion @@ -258,7 +258,7 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) { int phi_x_idx = 0; // Index of phi input where to find naked x // Find the Bool - if( !in(1)->is_Bool() ) return NULL; + if( !in(1)->is_Bool() ) return nullptr; BoolNode *bol = in(1)->as_Bool(); // Check bool sense switch( bol->_test._test ) { @@ -266,13 +266,13 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) { case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break; case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break; case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break; - default: return NULL; break; + default: return nullptr; break; } // Find zero input of CmpF; the other input is being abs'd Node *cmpf = bol->in(1); - if( cmpf->Opcode() != Op_CmpF ) return NULL; - Node *X = NULL; + if( cmpf->Opcode() != Op_CmpF ) return nullptr; + Node *X = nullptr; bool flip = false; if( phase->type(cmpf->in(cmp_zero_idx)) == TypeF::ZERO ) { X = cmpf->in(3 - cmp_zero_idx); @@ -281,18 +281,18 @@ Node *CMoveFNode::Ideal(PhaseGVN *phase, bool can_reshape) { X = cmpf->in(cmp_zero_idx); flip = true; } else { - return NULL; + return nullptr; } // If X is found on the appropriate phi input, find the subtract on the other - if( X != in(phi_x_idx) ) return NULL; + if( X != in(phi_x_idx) ) return nullptr; int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue; Node *sub = in(phi_sub_idx); // Allow only SubF(0,X) and fail out for all others; NegF is not OK if( sub->Opcode() != Op_SubF || sub->in(2) != X || - phase->type(sub->in(1)) != TypeF::ZERO ) return NULL; + phase->type(sub->in(1)) != TypeF::ZERO ) return nullptr; Node *abs = new AbsFNode( X ); if( flip ) @@ -314,7 +314,7 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) { int phi_x_idx = 0; // Index of phi input where to find naked x // Find the Bool - if( !in(1)->is_Bool() ) return NULL; + if( !in(1)->is_Bool() ) return nullptr; BoolNode *bol = in(1)->as_Bool(); // Check bool sense switch( bol->_test._test ) { @@ -322,13 +322,13 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) { case BoolTest::le: cmp_zero_idx = 2; phi_x_idx = IfFalse; break; case BoolTest::gt: cmp_zero_idx = 2; phi_x_idx = IfTrue; break; case BoolTest::ge: cmp_zero_idx = 1; phi_x_idx = IfFalse; break; - default: return NULL; break; + default: return nullptr; break; } // Find zero input of CmpD; the other input is being abs'd Node *cmpd = bol->in(1); - if( cmpd->Opcode() != Op_CmpD ) return NULL; - Node *X = NULL; + if( cmpd->Opcode() != Op_CmpD ) return nullptr; + Node *X = nullptr; bool flip = false; if( phase->type(cmpd->in(cmp_zero_idx)) == TypeD::ZERO ) { X = cmpd->in(3 - cmp_zero_idx); @@ -337,18 +337,18 @@ Node *CMoveDNode::Ideal(PhaseGVN *phase, bool can_reshape) { X = cmpd->in(cmp_zero_idx); flip = true; } else { - return NULL; + return nullptr; } // If X is found on the appropriate phi input, find the subtract on the other - if( X != in(phi_x_idx) ) return NULL; + if( X != in(phi_x_idx) ) return nullptr; int phi_sub_idx = phi_x_idx == IfTrue ? IfFalse : IfTrue; Node *sub = in(phi_sub_idx); // Allow only SubD(0,X) and fail out for all others; NegD is not OK if( sub->Opcode() != Op_SubD || sub->in(2) != X || - phase->type(sub->in(1)) != TypeD::ZERO ) return NULL; + phase->type(sub->in(1)) != TypeD::ZERO ) return nullptr; Node *abs = new AbsDNode( X ); if( flip ) @@ -364,7 +364,7 @@ Node* MoveNode::Ideal(PhaseGVN* phase, bool can_reshape) { // Fold reinterpret cast into memory operation: // MoveX2Y (LoadX mem) => LoadY mem LoadNode* ld = in(1)->isa_Load(); - if (ld != NULL && (ld->outcnt() == 1)) { // replace only + if (ld != nullptr && (ld->outcnt() == 1)) { // replace only const Type* rt = bottom_type(); if (ld->has_reinterpret_variant(rt)) { if (phase->C->post_loop_opts_phase()) { @@ -375,7 +375,7 @@ Node* MoveNode::Ideal(PhaseGVN* phase, bool can_reshape) { } } } - return NULL; + return nullptr; } Node* MoveNode::Identity(PhaseGVN* phase) { diff --git a/src/hotspot/share/opto/movenode.hpp b/src/hotspot/share/opto/movenode.hpp index 94e16c0147e..96189654649 100644 --- a/src/hotspot/share/opto/movenode.hpp +++ b/src/hotspot/share/opto/movenode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ class CMoveNode : public TypeNode { { init_class_id(Class_CMove); // all inputs are nullified in Node::Node(int) - // init_req(Control,NULL); + // init_req(Control,nullptr); init_req(Condition,bol); init_req(IfFalse,left); init_req(IfTrue,right); @@ -100,7 +100,7 @@ class CMoveNNode : public CMoveNode { // class MoveNode : public Node { protected: - MoveNode(Node* value) : Node(NULL, value) { + MoveNode(Node* value) : Node(nullptr, value) { init_class_id(Class_Move); } diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp index 99bb0ca6681..b66a0df2422 100644 --- a/src/hotspot/share/opto/mulnode.cpp +++ b/src/hotspot/share/opto/mulnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ Node* MulNode::Identity(PhaseGVN* phase) { Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) { Node* in1 = in(1); Node* in2 = in(2); - Node* progress = NULL; // Progress flag + Node* progress = nullptr; // Progress flag // This code is used by And nodes too, but some conversions are // only valid for the actual Mul nodes. @@ -123,7 +123,7 @@ Node *MulNode::Ideal(PhaseGVN *phase, bool can_reshape) { if( t2->singleton() && // Right input is a constant? op != Op_MulF && // Float & double cannot reassociate op != Op_MulD ) { - if( t2 == Type::TOP ) return NULL; + if( t2 == Type::TOP ) return nullptr; Node *mul1 = in(1); #ifdef ASSERT // Check for dead loop @@ -221,7 +221,7 @@ MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) { default: fatal("Not implemented for %s", type2name(bt)); } - return NULL; + return nullptr; } @@ -239,7 +239,7 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Now we have a constant Node on the right and the constant in con. if (con == 1) { // By one is handled by Identity call - return NULL; + return nullptr; } // Check for negative constant; if so negate the final result @@ -251,7 +251,7 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) { } // Get low bit; check for being the only bit - Node *res = NULL; + Node *res = nullptr; unsigned int bit1 = submultiple_power_of_2(abs_con); if (bit1 == abs_con) { // Found a power of 2? res = new LShiftINode(in(1), phase->intcon(log2i_exact(bit1))); @@ -334,7 +334,7 @@ Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Now we have a constant Node on the right and the constant in con. if (con == 1) { // By one is handled by Identity call - return NULL; + return nullptr; } // Check for negative constant; if so negate the final result @@ -345,7 +345,7 @@ Node *MulLNode::Ideal(PhaseGVN *phase, bool can_reshape) { } // Get low bit; check for being the only bit - Node *res = NULL; + Node *res = nullptr; julong bit1 = submultiple_power_of_2(abs_con); if (bit1 == abs_con) { // Found a power of 2? res = new LShiftLNode(in(1), phase->intcon(log2i_exact(bit1))); @@ -429,7 +429,7 @@ Node* MulFNode::Ideal(PhaseGVN* phase, bool can_reshape) { const TypeF *t2 = phase->type(in(2))->isa_float_constant(); // x * 2 -> x + x - if (t2 != NULL && t2->getf() == 2) { + if (t2 != nullptr && t2->getf() == 2) { Node* base = in(1); return new AddFNode(base, base); } @@ -452,7 +452,7 @@ Node* MulDNode::Ideal(PhaseGVN* phase, bool can_reshape) { const TypeD *t2 = phase->type(in(2))->isa_double_constant(); // x * 2 -> x + x - if (t2 != NULL && t2->getd() == 2) { + if (t2 != nullptr && t2->getd() == 2) { Node* base = in(1); return new AddDNode(base, base); } @@ -548,7 +548,7 @@ Node* AndINode::Identity(PhaseGVN* phase) { int con = t2->get_con(); // Masking off high bits which are always zero is useless. const TypeInt* t1 = phase->type(in(1))->isa_int(); - if (t1 != NULL && t1->_lo >= 0) { + if (t1 != nullptr && t1->_lo >= 0) { jint t1_support = right_n_bits(1 + log2i_graceful(t1->_hi)); if ((t1_support & con) == t1_support) return in1; @@ -573,7 +573,7 @@ Node* AndINode::Identity(PhaseGVN* phase) { Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) { // pattern similar to (v1 + (v2 << 2)) & 3 transformed to v1 & 3 Node* progress = AndIL_add_shift_and_mask(phase, T_INT); - if (progress != NULL) { + if (progress != nullptr) { return progress; } @@ -686,7 +686,7 @@ Node* AndLNode::Identity(PhaseGVN* phase) { jlong con = t2->get_con(); // Masking off high bits which are always zero is useless. const TypeLong* t1 = phase->type( in(1) )->isa_long(); - if (t1 != NULL && t1->_lo >= 0) { + if (t1 != nullptr && t1->_lo >= 0) { int bit_count = log2i_graceful(t1->_hi) + 1; jlong t1_support = jlong(max_julong >> (BitsPerJavaLong - bit_count)); if ((t1_support & con) == t1_support) @@ -713,7 +713,7 @@ Node* AndLNode::Identity(PhaseGVN* phase) { Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // pattern similar to (v1 + (v2 << 2)) & 3 transformed to v1 & 3 Node* progress = AndIL_add_shift_and_mask(phase, T_LONG); - if (progress != NULL) { + if (progress != nullptr) { return progress; } @@ -765,14 +765,14 @@ LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) { default: fatal("Not implemented for %s", type2name(bt)); } - return NULL; + return nullptr; } //============================================================================= static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) { const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int(); - if (tcount != NULL && tcount->is_con()) { + if (tcount != nullptr && tcount->is_con()) { *count = tcount->get_con(); return true; } @@ -816,7 +816,7 @@ Node* LShiftINode::Identity(PhaseGVN* phase) { Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { int con = maskShiftAmount(phase, this, BitsPerJavaInteger); if (con == 0) { - return NULL; + return nullptr; } // Left input is an add? @@ -872,7 +872,7 @@ Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { phase->type(add1->in(2)) == TypeInt::make( bits_mask ) ) return new LShiftINode( add1->in(1), in(2) ); - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -939,7 +939,7 @@ Node* LShiftLNode::Identity(PhaseGVN* phase) { Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) { int con = maskShiftAmount(phase, this, BitsPerJavaLong); if (con == 0) { - return NULL; + return nullptr; } // Left input is an add? @@ -995,7 +995,7 @@ Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) { phase->type(add1->in(2)) == TypeLong::make( bits_mask ) ) return new LShiftLNode( add1->in(1), in(2) ); - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -1063,7 +1063,7 @@ Node* RShiftINode::Identity(PhaseGVN* phase) { int lo = (-1 << (BitsPerJavaInteger - ((uint)count)-1)); // FFFF8000 int hi = ~lo; // 00007FFF const TypeInt* t11 = phase->type(in(1)->in(1))->isa_int(); - if (t11 == NULL) { + if (t11 == nullptr) { return this; } // Does actual value fit inside of mask? @@ -1079,11 +1079,11 @@ Node* RShiftINode::Identity(PhaseGVN* phase) { Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Inputs may be TOP if they are dead. const TypeInt *t1 = phase->type(in(1))->isa_int(); - if (!t1) return NULL; // Left input is an integer + if (!t1) return nullptr; // Left input is an integer const TypeInt *t3; // type of in(1).in(2) int shift = maskShiftAmount(phase, this, BitsPerJavaInteger); if (shift == 0) { - return NULL; + return nullptr; } // Check for (x & 0xFF000000) >> 24, whose mask can be made smaller. @@ -1101,7 +1101,7 @@ Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { // Check for "(short[i] <<16)>>16" which simply sign-extends const Node *shl = in(1); - if( shl->Opcode() != Op_LShiftI ) return NULL; + if( shl->Opcode() != Op_LShiftI ) return nullptr; if( shift == 16 && (t3 = phase->type(shl->in(2))->isa_int()) && @@ -1137,7 +1137,7 @@ Node *RShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -1277,7 +1277,7 @@ Node* URShiftINode::Identity(PhaseGVN* phase) { t_lshift_count == phase->type(in(2))) { Node *x = add->in(1)->in(1); const TypeInt *t_x = phase->type(x)->isa_int(); - if (t_x != NULL && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) { + if (t_x != nullptr && 0 <= t_x->_lo && t_x->_hi <= (max_jint>>LogBytesPerWord)) { return x; } } @@ -1291,7 +1291,7 @@ Node* URShiftINode::Identity(PhaseGVN* phase) { Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { int con = maskShiftAmount(phase, this, BitsPerJavaInteger); if (con == 0) { - return NULL; + return nullptr; } // We'll be wanting the right-shift amount as a mask of that many bits @@ -1363,7 +1363,7 @@ Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -1455,7 +1455,7 @@ Node* URShiftLNode::Identity(PhaseGVN* phase) { Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) { int con = maskShiftAmount(phase, this, BitsPerJavaLong); if (con == 0) { - return NULL; + return nullptr; } // We'll be wanting the right-shift amount as a mask of that many bits @@ -1508,7 +1508,7 @@ Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) { return new URShiftLNode(in11, phase->intcon(63)); } } - return NULL; + return nullptr; } //------------------------------Value------------------------------------------ @@ -1702,7 +1702,7 @@ Node* RotateLeftNode::Ideal(PhaseGVN *phase, bool can_reshape) { return new RotateRightNode(in(1), phase->intcon(64 - (lshift & 63)), TypeLong::LONG); } } - return NULL; + return nullptr; } Node* RotateRightNode::Identity(PhaseGVN* phase) { @@ -1781,27 +1781,27 @@ const Type* RotateRightNode::Value(PhaseGVN* phase) const { // Because the optimization might work for a non-constant // mask M, we check the AndX for both operand orders. bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift, Node* mask, BasicType bt, bool check_reverse) { - if (mask == NULL || shift == NULL) { + if (mask == nullptr || shift == nullptr) { return false; } shift = shift->uncast(); - if (shift == NULL) { + if (shift == nullptr) { return false; } const TypeInteger* mask_t = phase->type(mask)->isa_integer(bt); const TypeInteger* shift_t = phase->type(shift)->isa_integer(bt); - if (mask_t == NULL || shift_t == NULL) { + if (mask_t == nullptr || shift_t == nullptr) { return false; } BasicType shift_bt = bt; if (bt == T_LONG && shift->Opcode() == Op_ConvI2L) { bt = T_INT; Node* val = shift->in(1); - if (val == NULL) { + if (val == nullptr) { return false; } val = val->uncast(); - if (val == NULL) { + if (val == nullptr) { return false; } if (val->Opcode() == Op_LShiftI) { @@ -1819,7 +1819,7 @@ bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift, return false; } Node* shift2 = shift->in(2); - if (shift2 == NULL) { + if (shift2 == nullptr) { return false; } const Type* shift2_t = phase->type(shift2); @@ -1852,8 +1852,8 @@ bool MulNode::AndIL_shift_and_mask_is_always_zero(PhaseGVN* phase, Node* shift, Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) { Node* add = in(1); Node* mask = in(2); - if (add == NULL || mask == NULL) { - return NULL; + if (add == nullptr || mask == nullptr) { + return nullptr; } int addidx = 0; if (add->Opcode() == Op_Add(bt)) { @@ -1866,7 +1866,7 @@ Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) { if (addidx > 0) { Node* add1 = add->in(1); Node* add2 = add->in(2); - if (add1 != NULL && add2 != NULL) { + if (add1 != nullptr && add2 != nullptr) { if (AndIL_shift_and_mask_is_always_zero(phase, add1, mask, bt, false)) { set_req_X(addidx, add2, phase); return this; @@ -1876,5 +1876,5 @@ Node* MulNode::AndIL_add_shift_and_mask(PhaseGVN* phase, BasicType bt) { } } } - return NULL; + return nullptr; } diff --git a/src/hotspot/share/opto/mulnode.hpp b/src/hotspot/share/opto/mulnode.hpp index 933c71f290a..84307fb00fb 100644 --- a/src/hotspot/share/opto/mulnode.hpp +++ b/src/hotspot/share/opto/mulnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ class PhaseTransform; class MulNode : public Node { virtual uint hash() const; public: - MulNode(Node *in1, Node *in2): Node(NULL,in1,in2) { + MulNode(Node *in1, Node *in2): Node(nullptr,in1,in2) { init_class_id(Class_Mul); } @@ -227,7 +227,7 @@ public: class LShiftNode : public Node { public: - LShiftNode(Node *in1, Node *in2) : Node(NULL,in1,in2) { + LShiftNode(Node *in1, Node *in2) : Node(nullptr,in1,in2) { init_class_id(Class_LShift); } diff --git a/src/hotspot/share/opto/multnode.cpp b/src/hotspot/share/opto/multnode.cpp index a7f2d283c96..814f20ca224 100644 --- a/src/hotspot/share/opto/multnode.cpp +++ b/src/hotspot/share/opto/multnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,24 +59,24 @@ ProjNode* MultiNode::proj_out_or_null(uint which_proj) const { continue; } } - return NULL; + return nullptr; } ProjNode* MultiNode::proj_out_or_null(uint which_proj, bool is_io_use) const { for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { ProjNode* proj = fast_out(i)->isa_Proj(); - if (proj != NULL && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) { + if (proj != nullptr && (proj->_con == which_proj) && (proj->_is_io_use == is_io_use)) { return proj; } } - return NULL; + return nullptr; } // Get a named projection ProjNode* MultiNode::proj_out(uint which_proj) const { assert((Opcode() != Op_If && Opcode() != Op_RangeCheck) || outcnt() == 2, "bad if #1"); ProjNode* p = proj_out_or_null(which_proj); - assert(p != NULL, "named projection %u not found", which_proj); + assert(p != nullptr, "named projection %u not found", which_proj); return p; } @@ -113,7 +113,7 @@ const Type* ProjNode::proj_type(const Type* t) const { } const Type *ProjNode::bottom_type() const { - if (in(0) == NULL) return Type::TOP; + if (in(0) == nullptr) return Type::TOP; return proj_type(in(0)->bottom_type()); } @@ -121,16 +121,16 @@ const TypePtr *ProjNode::adr_type() const { if (bottom_type() == Type::MEMORY) { // in(0) might be a narrow MemBar; otherwise we will report TypePtr::BOTTOM Node* ctrl = in(0); - if (ctrl == NULL) return NULL; // node is dead + if (ctrl == nullptr) return nullptr; // node is dead const TypePtr* adr_type = ctrl->adr_type(); #ifdef ASSERT if (!VMError::is_error_reported() && !Node::in_dump()) - assert(adr_type != NULL, "source must have adr_type"); + assert(adr_type != nullptr, "source must have adr_type"); #endif return adr_type; } assert(bottom_type()->base() != Type::Memory, "no other memories?"); - return NULL; + return nullptr; } bool ProjNode::pinned() const { return in(0)->pinned(); } @@ -142,7 +142,7 @@ void ProjNode::dump_compact_spec(outputStream *st) const { Node* o = this->out(i); if (not_a_node(o)) { st->print("[?]"); - } else if (o == NULL) { + } else if (o == nullptr) { st->print("[_]"); } else { st->print("[%d]", o->_idx); @@ -155,7 +155,7 @@ void ProjNode::dump_compact_spec(outputStream *st) const { //----------------------------check_con---------------------------------------- void ProjNode::check_con() const { Node* n = in(0); - if (n == NULL) return; // should be assert, but NodeHash makes bogons + if (n == nullptr) return; // should be assert, but NodeHash makes bogons if (n->is_Mach()) return; // mach. projs. are not type-safe if (n->is_Start()) return; // alas, starts can have mach. projs. also if (_con == SCMemProjNode::SCMEMPROJCON ) return; @@ -166,7 +166,7 @@ void ProjNode::check_con() const { //------------------------------Value------------------------------------------ const Type* ProjNode::Value(PhaseGVN* phase) const { - if (in(0) == NULL) return Type::TOP; + if (in(0) == nullptr) return Type::TOP; return proj_type(phase->type(in(0))); } @@ -183,14 +183,14 @@ uint ProjNode::ideal_reg() const { //-------------------------------is_uncommon_trap_proj---------------------------- // Return uncommon trap call node if proj is for "proj->[region->..]call_uct" -// NULL otherwise +// null otherwise CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) { int path_limit = 10; Node* out = this; for (int ct = 0; ct < path_limit; ct++) { out = out->unique_ctrl_out_or_null(); - if (out == NULL) - return NULL; + if (out == nullptr) + return nullptr; if (out->is_CallStaticJava()) { CallStaticJavaNode* call = out->as_CallStaticJava(); int req = call->uncommon_trap_request(); @@ -200,12 +200,12 @@ CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason return call; } } - return NULL; // don't do further after call + return nullptr; // don't do further after call } if (out->Opcode() != Op_Region) - return NULL; + return nullptr; } - return NULL; + return nullptr; } //-------------------------------is_uncommon_trap_if_pattern------------------------- @@ -213,31 +213,31 @@ CallStaticJavaNode* ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason // | // V // other_proj->[region->..]call_uct" -// NULL otherwise +// null otherwise // "must_reason_predicate" means the uct reason must be Reason_predicate CallStaticJavaNode* ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) { Node *in0 = in(0); - if (!in0->is_If()) return NULL; + if (!in0->is_If()) return nullptr; // Variation of a dead If node. - if (in0->outcnt() < 2) return NULL; + if (in0->outcnt() < 2) return nullptr; IfNode* iff = in0->as_If(); // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate if (reason != Deoptimization::Reason_none) { if (iff->in(1)->Opcode() != Op_Conv2B || iff->in(1)->in(1)->Opcode() != Op_Opaque1) { - return NULL; + return nullptr; } } ProjNode* other_proj = iff->proj_out(1-_con); CallStaticJavaNode* call = other_proj->is_uncommon_trap_proj(reason); - if (call != NULL) { + if (call != nullptr) { assert(reason == Deoptimization::Reason_none || Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list"); return call; } - return NULL; + return nullptr; } ProjNode* ProjNode::other_if_proj() const { diff --git a/src/hotspot/share/opto/multnode.hpp b/src/hotspot/share/opto/multnode.hpp index efd43bc03f2..09552508aa3 100644 --- a/src/hotspot/share/opto/multnode.hpp +++ b/src/hotspot/share/opto/multnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,13 +92,13 @@ public: #endif // Return uncommon trap call node if proj is for "proj->[region->..]call_uct" - // NULL otherwise + // null otherwise CallStaticJavaNode* is_uncommon_trap_proj(Deoptimization::DeoptReason reason); // Return uncommon trap call node for "if(test)-> proj -> ... // | // V // other_proj->[region->..]call_uct" - // NULL otherwise + // null otherwise CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason); // Return other proj node when this is a If proj node diff --git a/src/hotspot/share/opto/narrowptrnode.hpp b/src/hotspot/share/opto/narrowptrnode.hpp index 91bf76140db..e7cd19cb424 100644 --- a/src/hotspot/share/opto/narrowptrnode.hpp +++ b/src/hotspot/share/opto/narrowptrnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ class EncodeNarrowPtrNode : public TypeNode { EncodeNarrowPtrNode(Node* value, const Type* type): TypeNode(type, 2) { init_class_id(Class_EncodeNarrowPtr); - init_req(0, NULL); + init_req(0, nullptr); init_req(1, value); } public: @@ -77,7 +77,7 @@ class DecodeNarrowPtrNode : public TypeNode { DecodeNarrowPtrNode(Node* value, const Type* type): TypeNode(type, 2) { init_class_id(Class_DecodeNarrowPtr); - init_req(0, NULL); + init_req(0, nullptr); init_req(1, value); } public: diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 7bc749e7812..20892cfef73 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -67,7 +67,7 @@ extern int nodes_created; //-------------------------- construct_node------------------------------------ // Set a breakpoint here to identify where a particular node index is built. void Node::verify_construction() { - _debug_orig = NULL; + _debug_orig = nullptr; int old_debug_idx = Compile::debug_idx(); int new_debug_idx = old_debug_idx + 1; if (new_debug_idx > 0) { @@ -95,7 +95,7 @@ void Node::verify_construction() { BREAKPOINT; } #if OPTO_DU_ITERATOR_ASSERT - _last_del = NULL; + _last_del = nullptr; _del_tick = 0; #endif _hash_lock = 0; @@ -110,7 +110,7 @@ void DUIterator_Common::sample(const Node* node) { _node = node; _outcnt = node->_outcnt; _del_tick = node->_del_tick; - _last = NULL; + _last = nullptr; } void DUIterator_Common::verify(const Node* node, bool at_end_ok) { @@ -291,7 +291,7 @@ void DUIterator_Last::verify_step(uint num_edges) { // This constant used to initialize _out may be any non-null value. -// The value NULL is reserved for the top node only. +// The value null is reserved for the top node only. #define NO_OUT_ARRAY ((Node**)-1) // Out-of-line code from node constructors. @@ -313,7 +313,7 @@ inline int Node::Init(int req) { } // If there are default notes floating around, capture them: Node_Notes* nn = C->default_node_notes(); - if (nn != NULL) init_node_notes(C, idx, nn); + if (nn != nullptr) init_node_notes(C, idx, nn); // Note: At this point, C is dead, // and we begin to initialize the new Node. @@ -338,11 +338,11 @@ Node::Node(uint req) debug_only( verify_construction() ); NOT_PRODUCT(nodes_created++); if (req == 0) { - _in = NULL; + _in = nullptr; } else { Node** to = _in; for(uint i = 0; i < req; i++) { - to[i] = NULL; + to[i] = nullptr; } } } @@ -357,7 +357,7 @@ Node::Node(Node *n0) debug_only( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -371,8 +371,8 @@ Node::Node(Node *n0, Node *n1) NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -387,9 +387,9 @@ Node::Node(Node *n0, Node *n1, Node *n2) assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); assert( is_not_dead(n2), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); - _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); + _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -405,10 +405,10 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) assert( is_not_dead(n1), "can not use dead node"); assert( is_not_dead(n2), "can not use dead node"); assert( is_not_dead(n3), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); - _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); - _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); + _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this); + _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -425,11 +425,11 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) assert( is_not_dead(n2), "can not use dead node"); assert( is_not_dead(n3), "can not use dead node"); assert( is_not_dead(n4), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); - _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); - _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); - _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); + _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this); + _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this); + _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -448,12 +448,12 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, assert( is_not_dead(n3), "can not use dead node"); assert( is_not_dead(n4), "can not use dead node"); assert( is_not_dead(n5), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); - _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); - _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); - _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); - _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); + _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this); + _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this); + _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this); + _in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this); } //------------------------------Node------------------------------------------- @@ -473,13 +473,13 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, assert( is_not_dead(n4), "can not use dead node"); assert( is_not_dead(n5), "can not use dead node"); assert( is_not_dead(n6), "can not use dead node"); - _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this); - _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this); - _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this); - _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this); - _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this); - _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this); - _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this); + _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); + _in[1] = n1; if (n1 != nullptr) n1->add_out((Node *)this); + _in[2] = n2; if (n2 != nullptr) n2->add_out((Node *)this); + _in[3] = n3; if (n3 != nullptr) n3->add_out((Node *)this); + _in[4] = n4; if (n4 != nullptr) n4->add_out((Node *)this); + _in[5] = n5; if (n5 != nullptr) n5->add_out((Node *)this); + _in[6] = n6; if (n6 != nullptr) n6->add_out((Node *)this); } #ifdef __clang__ @@ -508,7 +508,7 @@ Node *Node::clone() const { for( i = 0; i < len(); i++ ) { Node *x = in(i); n->_in[i] = x; - if (x != NULL) x->add_out(n); + if (x != nullptr) x->add_out(n); } if (is_macro()) { C->add_macro_node(n); @@ -557,7 +557,7 @@ Node *Node::clone() const { if (n->is_Call()) { // CallGenerator is linked to the original node. CallGenerator* cg = n->as_Call()->generator(); - if (cg != NULL) { + if (cg != nullptr) { CallGenerator* cloned_cg = cg->with_call_node(n->as_Call()); n->as_Call()->set_generator(cloned_cg); @@ -583,10 +583,10 @@ void Node::setup_is_top() { if (this == (Node*)Compile::current()->top()) { // This node has just become top. Kill its out array. _outcnt = _outmax = 0; - _out = NULL; // marker value for top + _out = nullptr; // marker value for top assert(is_top(), "must be top"); } else { - if (_out == NULL) _out = NO_OUT_ARRAY; + if (_out == nullptr) _out = NO_OUT_ARRAY; assert(!is_top(), "must not be top"); } } @@ -594,8 +594,8 @@ void Node::setup_is_top() { //------------------------------~Node------------------------------------------ // Fancy destructor; eagerly attempt to reclaim Node numberings and storage void Node::destruct(PhaseValues* phase) { - Compile* compile = (phase != NULL) ? phase->C : Compile::current(); - if (phase != NULL && phase->is_IterGVN()) { + Compile* compile = (phase != nullptr) ? phase->C : Compile::current(); + if (phase != nullptr && phase->is_IterGVN()) { phase->is_IterGVN()->_worklist.remove(this); } // If this is the most recently created node, reclaim its index. Otherwise, @@ -607,12 +607,12 @@ void Node::destruct(PhaseValues* phase) { } // Clear debug info: Node_Notes* nn = compile->node_notes_at(_idx); - if (nn != NULL) nn->clear(); + if (nn != nullptr) nn->clear(); // Walk the input array, freeing the corresponding output edges _cnt = _max; // forget req/prec distinction uint i; for( i = 0; i < _max; i++ ) { - set_req(i, NULL); + set_req(i, nullptr); //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim"); } assert(outcnt() == 0, "deleting a node must not leave a dangling use"); @@ -645,7 +645,7 @@ void Node::destruct(PhaseValues* phase) { int out_edge_size = _outmax*sizeof(void*); char *in_array = ((char*)_in); char *edge_end = in_array + edge_size; - char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out); + char *out_array = (char*)(_out == NO_OUT_ARRAY? nullptr: _out); int node_size = size_of(); #ifdef ASSERT @@ -687,10 +687,10 @@ void Node::grow(uint len) { _max = 4; _in = (Node**)arena->Amalloc(4*sizeof(Node*)); Node** to = _in; - to[0] = NULL; - to[1] = NULL; - to[2] = NULL; - to[3] = NULL; + to[0] = nullptr; + to[1] = nullptr; + to[2] = nullptr; + to[3] = nullptr; return; } new_max = next_power_of_2(len); @@ -698,7 +698,7 @@ void Node::grow(uint len) { // Previously I was using only powers-of-2 which peaked at 128 edges. //if( new_max >= limit ) new_max = limit-1; _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*)); - Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space + Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // null all new space _max = new_max; // Record new max length // This assertion makes sure that Node::_max is wide enough to // represent the numerical value of new_max. @@ -720,9 +720,9 @@ void Node::out_grow( uint len ) { // Trimming to limit allows a uint8 to handle up to 255 edges. // Previously I was using only powers-of-2 which peaked at 128 edges. //if( new_max >= limit ) new_max = limit-1; - assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value"); + assert(_out != nullptr && _out != NO_OUT_ARRAY, "out must have sensible value"); _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*)); - //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space + //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // null all new space _outmax = new_max; // Record new max length // This assertion makes sure that Node::_max is wide enough to // represent the numerical value of new_max. @@ -736,7 +736,7 @@ bool Node::is_dead() const { if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) ) return false; for( uint i = 0; i < _max; i++ ) - if( _in[i] != NULL ) + if( _in[i] != nullptr ) return false; return true; } @@ -767,7 +767,7 @@ bool Node::is_reachable_from_root() const { //------------------------------is_unreachable--------------------------------- bool Node::is_unreachable(PhaseIterGVN &igvn) const { assert(!is_Mach(), "doesn't work with MachNodes"); - return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top()); + return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != nullptr && in(0)->is_top()); } //------------------------------add_req---------------------------------------- @@ -776,19 +776,19 @@ void Node::add_req( Node *n ) { assert( is_not_dead(n), "can not use dead node"); // Look to see if I can move precedence down one without reallocating - if( (_cnt >= _max) || (in(_max-1) != NULL) ) + if( (_cnt >= _max) || (in(_max-1) != nullptr) ) grow( _max+1 ); // Find a precedence edge to move - if( in(_cnt) != NULL ) { // Next precedence edge is busy? + if( in(_cnt) != nullptr ) { // Next precedence edge is busy? uint i; for( i=_cnt; i<_max; i++ ) - if( in(i) == NULL ) // Find the NULL at end of prec edge list + if( in(i) == nullptr ) // Find the null at end of prec edge list break; // There must be one, since we grew the array _in[i] = in(_cnt); // Move prec over, making space for req edge } _in[_cnt++] = n; // Stuff over old prec edge - if (n != NULL) n->add_out((Node *)this); + if (n != nullptr) n->add_out((Node *)this); Compile::current()->record_modified_node(this); } @@ -808,10 +808,10 @@ void Node::add_req_batch( Node *n, uint m ) { grow( _max+m ); // Find a precedence edge to move - if( _in[_cnt] != NULL ) { // Next precedence edge is busy? + if( _in[_cnt] != nullptr ) { // Next precedence edge is busy? uint i; for( i=_cnt; i<_max; i++ ) - if( _in[i] == NULL ) // Find the NULL at end of prec edge list + if( _in[i] == nullptr ) // Find the null at end of prec edge list break; // There must be one, since we grew the array // Slide all the precs over by m positions (assume #prec << m). Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*))); @@ -823,7 +823,7 @@ void Node::add_req_batch( Node *n, uint m ) { } // Insert multiple out edges on the node. - if (n != NULL && !n->is_top()) { + if (n != nullptr && !n->is_top()) { for(uint i=0; iadd_out((Node *)this); } @@ -839,7 +839,7 @@ void Node::del_req( uint idx ) { "remove node from hash table before modifying it"); // First remove corresponding def-use edge Node *n = in(idx); - if (n != NULL) n->del_out((Node *)this); + if (n != nullptr) n->del_out((Node *)this); _in[idx] = in(--_cnt); // Compact the array // Avoid spec violation: Gap in prec edges. close_prec_gap_at(_cnt); @@ -854,7 +854,7 @@ void Node::del_req_ordered( uint idx ) { "remove node from hash table before modifying it"); // First remove corresponding def-use edge Node *n = in(idx); - if (n != NULL) n->del_out((Node *)this); + if (n != nullptr) n->del_out((Node *)this); if (idx < --_cnt) { // Not last edge ? Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*))); } @@ -867,14 +867,14 @@ void Node::del_req_ordered( uint idx ) { // Insert a new required input at the end void Node::ins_req( uint idx, Node *n ) { assert( is_not_dead(n), "can not use dead node"); - add_req(NULL); // Make space + add_req(nullptr); // Make space assert( idx < _max, "Must have allocated enough space"); // Slide over if(_cnt-idx-1 > 0) { Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*))); } _in[idx] = n; // Stuff over old required edge - if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge + if (n != nullptr) n->add_out((Node *)this); // Add reciprocal def-use edge Compile::current()->record_modified_node(this); } @@ -893,13 +893,13 @@ int Node::replace_edge(Node* old, Node* neww, PhaseGVN* gvn) { for (uint i = 0; i < len(); i++) { if (in(i) == old) { if (i < req()) { - if (gvn != NULL) { + if (gvn != nullptr) { set_req_X(i, neww, gvn); } else { set_req(i, neww); } } else { - assert(gvn == NULL || gvn->is_IterGVN() == NULL, "no support for igvn here"); + assert(gvn == nullptr || gvn->is_IterGVN() == nullptr, "no support for igvn here"); assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx); set_prec(i, neww); } @@ -925,7 +925,7 @@ int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end, Phas } //-------------------------disconnect_inputs----------------------------------- -// NULL out all inputs to eliminate incoming Def-Use edges. +// null out all inputs to eliminate incoming Def-Use edges. void Node::disconnect_inputs(Compile* C) { // the layout of Node::_in // r: a required input, null is allowed @@ -944,7 +944,7 @@ void Node::disconnect_inputs(Compile* C) { // Remove precedence edges if any exist // Note: Safepoints may have precedence edges, even during parsing for (uint i = len(); i > req(); ) { - rm_prec(--i); // no-op if _in[i] is nullptr + rm_prec(--i); // no-op if _in[i] is null } #ifdef ASSERT @@ -981,12 +981,12 @@ Node* Node::find_out_with(int opcode) { return use; } } - return NULL; + return nullptr; } // Return true if the current node has an out that matches opcode. bool Node::has_out_with(int opcode) { - return (find_out_with(opcode) != NULL); + return (find_out_with(opcode) != nullptr); } // Return true if the current node has an out that matches any of the opcodes. @@ -1017,7 +1017,7 @@ Node* Node::uncast_helper(const Node* p, bool keep_deps) { } assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); #endif - if (p == NULL || p->req() != 2) { + if (p == nullptr || p->req() != 2) { break; } else if (p->is_ConstraintCast()) { if (keep_deps && p->as_ConstraintCast()->carry_dependency()) { @@ -1033,36 +1033,36 @@ Node* Node::uncast_helper(const Node* p, bool keep_deps) { //------------------------------add_prec--------------------------------------- // Add a new precedence input. Precedence inputs are unordered, with -// duplicates removed and NULLs packed down at the end. +// duplicates removed and nulls packed down at the end. void Node::add_prec( Node *n ) { assert( is_not_dead(n), "can not use dead node"); - // Check for NULL at end + // Check for null at end if( _cnt >= _max || in(_max-1) ) grow( _max+1 ); // Find a precedence edge to move uint i = _cnt; - while( in(i) != NULL ) { + while( in(i) != nullptr ) { if (in(i) == n) return; // Avoid spec violation: duplicated prec edge. i++; } - _in[i] = n; // Stuff prec edge over NULL - if ( n != NULL) n->add_out((Node *)this); // Add mirror edge + _in[i] = n; // Stuff prec edge over null + if ( n != nullptr) n->add_out((Node *)this); // Add mirror edge #ifdef ASSERT - while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); } + while ((++i)<_max) { assert(_in[i] == nullptr, "spec violation: Gap in prec edges (node %d)", _idx); } #endif Compile::current()->record_modified_node(this); } //------------------------------rm_prec---------------------------------------- // Remove a precedence input. Precedence inputs are unordered, with -// duplicates removed and NULLs packed down at the end. +// duplicates removed and nulls packed down at the end. void Node::rm_prec( uint j ) { assert(j < _max, "oob: i=%d, _max=%d", j, _max); assert(j >= _cnt, "not a precedence edge"); - if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges. + if (_in[j] == nullptr) return; // Avoid spec violation: Gap in prec edges. _in[j]->del_out((Node *)this); close_prec_gap_at(j); Compile::current()->record_modified_node(this); @@ -1075,12 +1075,12 @@ uint Node::size_of() const { return sizeof(*this); } uint Node::ideal_reg() const { return 0; } //------------------------------jvms------------------------------------------- -JVMState* Node::jvms() const { return NULL; } +JVMState* Node::jvms() const { return nullptr; } #ifdef ASSERT //------------------------------jvms------------------------------------------- bool Node::verify_jvms(const JVMState* using_jvms) const { - for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { + for (JVMState* jvms = this->jvms(); jvms != nullptr; jvms = jvms->caller()) { if (jvms == using_jvms) return true; } return false; @@ -1160,13 +1160,13 @@ const Type* Node::Value(PhaseGVN* phase) const { // pointer. If ANY change is made, it must return the root of the reshaped // graph - even if the root is the same Node. Example: swapping the inputs // to an AddINode gives the same answer and same root, but you still have to -// return the 'this' pointer instead of NULL. +// return the 'this' pointer instead of null. // // You cannot return an OLD Node, except for the 'this' pointer. Use the // Identity call to return an old Node; basically if Identity can find -// another Node have the Ideal call make no change and return NULL. +// another Node have the Ideal call make no change and return null. // Example: AddINode::Ideal must check for add of zero; in this case it -// returns NULL instead of doing any graph reshaping. +// returns null instead of doing any graph reshaping. // // You cannot modify any old Nodes except for the 'this' pointer. Due to // sharing there may be other users of the old Nodes relying on their current @@ -1201,7 +1201,7 @@ const Type* Node::Value(PhaseGVN* phase) const { // the same Opcode as the 'this' pointer use 'clone'. // Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) { - return NULL; // Default to being Ideal already + return nullptr; // Default to being Ideal already } // Some nodes have specific Ideal subgraph transformations only if they are @@ -1237,17 +1237,17 @@ bool Node::has_special_unique_user() const { //--------------------------find_exact_control--------------------------------- // Skip Proj and CatchProj nodes chains. Check for Null and Top. Node* Node::find_exact_control(Node* ctrl) { - if (ctrl == NULL && this->is_Region()) + if (ctrl == nullptr && this->is_Region()) ctrl = this->as_Region()->is_copy(); - if (ctrl != NULL && ctrl->is_CatchProj()) { + if (ctrl != nullptr && ctrl->is_CatchProj()) { if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index) ctrl = ctrl->in(0); - if (ctrl != NULL && !ctrl->is_top()) + if (ctrl != nullptr && !ctrl->is_top()) ctrl = ctrl->in(0); } - if (ctrl != NULL && ctrl->is_Proj()) + if (ctrl != nullptr && ctrl->is_Proj()) ctrl = ctrl->in(0); return ctrl; @@ -1261,7 +1261,7 @@ Node* Node::find_exact_control(Node* ctrl) { // not an exhaustive search for a counterexample. bool Node::dominates(Node* sub, Node_List &nlist) { assert(this->is_CFG(), "expecting control"); - assert(sub != NULL && sub->is_CFG(), "expecting control"); + assert(sub != nullptr && sub->is_CFG(), "expecting control"); // detect dead cycle without regions int iterations_without_region_limit = DominatorSearchLimit; @@ -1278,7 +1278,7 @@ bool Node::dominates(Node* sub, Node_List &nlist) { // same region again, go through a different input. Eventually we // will either exit through the loop head, or give up. // (If we get confused, break out and return a conservative 'false'.) - while (sub != NULL) { + while (sub != nullptr) { if (sub->is_top()) break; // Conservative answer for dead code. if (sub == dom) { if (nlist.size() == 0) { @@ -1345,7 +1345,7 @@ bool Node::dominates(Node* sub, Node_List &nlist) { uint skip = region_was_visited_before ? 1 : 0; for (uint i = 1; i < sub->req(); i++) { Node* in = sub->in(i); - if (in != NULL && !in->is_top() && in != sub) { + if (in != nullptr && !in->is_top() && in != sub) { if (skip == 0) { up = in; break; @@ -1426,7 +1426,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { Node* in = use->in(j); if (in == dead) { // Turn all dead inputs into TOP use->set_req(j, top); - } else if (in != NULL && !in->is_top()) { + } else if (in != nullptr && !in->is_top()) { dead_use = false; } } @@ -1450,7 +1450,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) { // Kill all inputs to the dead guy for (uint i=0; i < dead->req(); i++) { Node *n = dead->in(i); // Get input to dead guy - if (n != NULL && !n->is_top()) { // Input is valid? + if (n != nullptr && !n->is_top()) { // Input is valid? dead->set_req(i, top); // Smash input away if (n->outcnt() == 0) { // Input also goes dead? if (!n->is_Con()) @@ -1499,7 +1499,7 @@ bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) { uint Node::hash() const { uint sum = 0; for( uint i=0; i<_cnt; i++ ) // Add in all inputs - sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs + sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded nulls return (sum>>2) + _cnt + Opcode(); } @@ -1536,7 +1536,7 @@ const TypeInt* Node::find_int_type() const { assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); return this->bottom_type()->isa_int(); } - return NULL; + return nullptr; } const TypeInteger* Node::find_integer_type(BasicType bt) const { @@ -1546,7 +1546,7 @@ const TypeInteger* Node::find_integer_type(BasicType bt) const { assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); return this->bottom_type()->isa_integer(bt); } - return NULL; + return nullptr; } // Get a pointer constant from a ConstNode. @@ -1571,7 +1571,7 @@ const TypeLong* Node::find_long_type() const { assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode"); return this->bottom_type()->isa_long(); } - return NULL; + return nullptr; } @@ -1581,9 +1581,9 @@ const TypeLong* Node::find_long_type() const { const TypePtr* Node::get_ptr_type() const { const TypePtr* tp = this->bottom_type()->make_ptr(); #ifdef ASSERT - if (tp == NULL) { + if (tp == nullptr) { this->dump(1); - assert((tp != NULL), "unexpected node type"); + assert((tp != nullptr), "unexpected node type"); } #endif return tp; @@ -1766,8 +1766,8 @@ Node* Node::find_ctrl(int idx) { //------------------------------find------------------------------------------- // Tries to find the node with the index |idx| starting from this node. If idx is negative, -// the search also includes forward (out) edges. Returns NULL if not found. -// If only_ctrl is set, the search will only be done on control nodes. Returns NULL if +// the search also includes forward (out) edges. Returns null if not found. +// If only_ctrl is set, the search will only be done on control nodes. Returns null if // not found or if the node to be found is not a control node (search will not find it). Node* Node::find(const int idx, bool only_ctrl) { ResourceMark rm; @@ -2070,10 +2070,10 @@ void PrintBFS::print_options_help(bool print_examples) { tty->print("Arguments:\n"); tty->print(" this/start: staring point of BFS\n"); tty->print(" target:\n"); - tty->print(" if nullptr: simple BFS\n"); + tty->print(" if null: simple BFS\n"); tty->print(" else: shortest path or all paths between this/start and target\n"); tty->print(" options:\n"); - tty->print(" if nullptr: same as \"cdmox@B\"\n"); + tty->print(" if null: same as \"cdmox@B\"\n"); tty->print(" else: use combination of following characters\n"); tty->print(" h: display this help info\n"); tty->print(" H: display this help info, with examples\n"); @@ -2447,7 +2447,7 @@ const char *Node::Name() const { return NodeClassNames[Opcode()]; } static bool is_disconnected(const Node* n) { for (uint i = 0; i < n->req(); i++) { - if (n->in(i) != NULL) return false; + if (n->in(i) != nullptr) return false; } return true; } @@ -2456,15 +2456,15 @@ static bool is_disconnected(const Node* n) { void Node::dump_orig(outputStream *st, bool print_key) const { Compile* C = Compile::current(); Node* orig = _debug_orig; - if (not_a_node(orig)) orig = NULL; - if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; - if (orig == NULL) return; + if (not_a_node(orig)) orig = nullptr; + if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr; + if (orig == nullptr) return; if (print_key) { st->print(" !orig="); } Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops - if (not_a_node(fast)) fast = NULL; - while (orig != NULL) { + if (not_a_node(fast)) fast = nullptr; + while (orig != nullptr) { bool discon = is_disconnected(orig); // if discon, print [123] else 123 if (discon) st->print("["); if (!Compile::current()->node_arena()->contains(orig)) @@ -2472,16 +2472,16 @@ void Node::dump_orig(outputStream *st, bool print_key) const { st->print("%d", orig->_idx); if (discon) st->print("]"); orig = orig->debug_orig(); - if (not_a_node(orig)) orig = NULL; - if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL; - if (orig != NULL) st->print(","); - if (fast != NULL) { + if (not_a_node(orig)) orig = nullptr; + if (orig != nullptr && !C->node_arena()->contains(orig)) orig = nullptr; + if (orig != nullptr) st->print(","); + if (fast != nullptr) { // Step fast twice for each single step of orig: fast = fast->debug_orig(); - if (not_a_node(fast)) fast = NULL; - if (fast != NULL && fast != orig) { + if (not_a_node(fast)) fast = nullptr; + if (fast != nullptr && fast != orig) { fast = fast->debug_orig(); - if (not_a_node(fast)) fast = NULL; + if (not_a_node(fast)) fast = nullptr; } if (fast == orig) { st->print("..."); @@ -2494,16 +2494,16 @@ void Node::dump_orig(outputStream *st, bool print_key) const { void Node::set_debug_orig(Node* orig) { _debug_orig = orig; if (BreakAtNode == 0) return; - if (not_a_node(orig)) orig = NULL; + if (not_a_node(orig)) orig = nullptr; int trip = 10; - while (orig != NULL) { + while (orig != nullptr) { if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) { tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d", this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx()); BREAKPOINT; } orig = orig->debug_orig(); - if (not_a_node(orig)) orig = NULL; + if (not_a_node(orig)) orig = nullptr; if (trip-- <= 0) break; } } @@ -2552,7 +2552,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc) const Type *t = bottom_type(); - if (t != NULL && (t->isa_instptr() || t->isa_instklassptr())) { + if (t != nullptr && (t->isa_instptr() || t->isa_instklassptr())) { const TypeInstPtr *toop = t->isa_instptr(); const TypeInstKlassPtr *tkls = t->isa_instklassptr(); if (toop) { @@ -2578,8 +2578,8 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc) if (is_new) { DEBUG_ONLY(dump_orig(st)); Node_Notes* nn = C->node_notes_at(_idx); - if (nn != NULL && !nn->is_clear()) { - if (nn->jvms() != NULL) { + if (nn != nullptr && !nn->is_clear()) { + if (nn->jvms() != nullptr) { st->print(" !jvms:"); nn->jvms()->dump_spec(st); } @@ -2599,7 +2599,7 @@ void Node::dump_req(outputStream* st, DumpConfig* dc) const { // Dump the required input edges for (uint i = 0; i < req(); i++) { // For all required inputs Node* d = in(i); - if (d == NULL) { + if (d == nullptr) { st->print("_ "); } else if (not_a_node(d)) { st->print("not_a_node "); // uninitialized, sentinel, garbage, etc. @@ -2617,7 +2617,7 @@ void Node::dump_prec(outputStream* st, DumpConfig* dc) const { int any_prec = 0; for (uint i = req(); i < len(); i++) { // For all precedence inputs Node* p = in(i); - if (p != NULL) { + if (p != nullptr) { if (!any_prec++) st->print(" |"); if (not_a_node(p)) { st->print("not_a_node "); continue; } p->dump_idx(false, st, dc); @@ -2633,7 +2633,7 @@ void Node::dump_out(outputStream* st, DumpConfig* dc) const { // Dump the output edges for (uint i = 0; i < _outcnt; i++) { // For all outputs Node* u = _out[i]; - if (u == NULL) { + if (u == nullptr) { st->print("_ "); } else if (not_a_node(u)) { st->print("not_a_node "); @@ -2691,7 +2691,7 @@ void Node::verify(int verify_depth, VectorSet& visited, Node_List& worklist) { Node* n = worklist[list_index]; if (n->is_Con() && n->bottom_type() == Type::TOP) { - if (C->cached_top_node() == NULL) { + if (C->cached_top_node() == nullptr) { C->set_cached_top_node((Node*)n); } assert(C->cached_top_node() == n, "TOP node must be unique"); @@ -2787,14 +2787,14 @@ void Node_Array::insert(uint i, Node* n) { void Node_Array::remove(uint i) { Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i + 1], (HeapWord*)&_nodes[i], ((_max - i - 1) * sizeof(Node*))); - _nodes[_max - 1] = NULL; + _nodes[_max - 1] = nullptr; } void Node_Array::dump() const { #ifndef PRODUCT for (uint i = 0; i < _max; i++) { Node* nn = _nodes[i]; - if (nn != NULL) { + if (nn != nullptr) { tty->print("%5d--> ",i); nn->dump(); } } @@ -2809,7 +2809,7 @@ bool Node::is_iteratively_computed() { if (ideal_reg()) { // does operation have a result register? for (uint i = 1; i < req(); i++) { Node* n = in(i); - if (n != NULL && n->is_Phi()) { + if (n != nullptr && n->is_Phi()) { for (uint j = 1; j < n->req(); j++) { if (n->in(j) == this) { return true; @@ -2823,7 +2823,7 @@ bool Node::is_iteratively_computed() { //--------------------------find_similar------------------------------ // Return a node with opcode "opc" and same inputs as "this" if one can -// be found; Otherwise return NULL; +// be found; Otherwise return null; Node* Node::find_similar(int opc) { if (req() >= 2) { Node* def = in(1); @@ -2846,19 +2846,19 @@ Node* Node::find_similar(int opc) { } } } - return NULL; + return nullptr; } //--------------------------unique_ctrl_out_or_null------------------------- // Return the unique control out if only one. Null if none or more than one. Node* Node::unique_ctrl_out_or_null() const { - Node* found = NULL; + Node* found = nullptr; for (uint i = 0; i < outcnt(); i++) { Node* use = raw_out(i); if (use->is_CFG() && use != this) { - if (found != NULL) { - return NULL; + if (found != nullptr) { + return nullptr; } found = use; } @@ -2870,12 +2870,12 @@ Node* Node::unique_ctrl_out_or_null() const { // Return the unique control out. Asserts if none or more than one control out. Node* Node::unique_ctrl_out() const { Node* ctrl = unique_ctrl_out_or_null(); - assert(ctrl != NULL, "control out is assumed to be unique"); + assert(ctrl != nullptr, "control out is assumed to be unique"); return ctrl; } void Node::ensure_control_or_add_prec(Node* c) { - if (in(0) == NULL) { + if (in(0) == nullptr) { set_req(0, c); } else if (in(0) != c) { add_prec(c); @@ -2886,7 +2886,7 @@ bool Node::is_dead_loop_safe() const { if (is_Phi()) { return true; } - if (is_Proj() && in(0) == NULL) { + if (is_Proj() && in(0) == nullptr) { return true; } if ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0) { @@ -2939,7 +2939,7 @@ void Node_List::dump_simple() const { if( _nodes[i] ) { tty->print(" %d", _nodes[i]->_idx); } else { - tty->print(" NULL"); + tty->print(" null"); } } #endif @@ -2965,7 +2965,7 @@ void Unique_Node_List::remove(Node* n) { void Unique_Node_List::remove_useless_nodes(VectorSet &useful) { for (uint i = 0; i < size(); ++i) { Node *n = at(i); - assert( n != NULL, "Did not expect null entries in worklist"); + assert( n != nullptr, "Did not expect null entries in worklist"); if (!useful.test(n->_idx)) { _in_worklist.remove(n->_idx); map(i, Node_List::pop()); @@ -2993,7 +2993,7 @@ Node* Node_Stack::find(uint idx) const { return node_at(i); } } - return NULL; + return nullptr; } //============================================================================= diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 12fde6db48b..107654752bf 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -254,7 +254,7 @@ public: // Create a new Node with given input edges. // This version requires use of the "edge-count" new. - // E.g. new (C,3) FooNode( C, NULL, left, right ); + // E.g. new (C,3) FooNode( C, nullptr, left, right ); Node( Node *n0 ); Node( Node *n0, Node *n1 ); Node( Node *n0, Node *n1, Node *n2 ); @@ -270,10 +270,10 @@ public: // Clone a Node, immediately supplying one or two new edges. // The first and second arguments, if non-null, replace in(1) and in(2), // respectively. - Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { + Node* clone_with_data_edge(Node* in1, Node* in2 = nullptr) const { Node* nn = clone(); - if (in1 != NULL) nn->set_req(1, in1); - if (in2 != NULL) nn->set_req(2, in2); + if (in1 != nullptr) nn->set_req(1, in1); + if (in2 != nullptr) nn->set_req(2, in2); return nn; } @@ -292,10 +292,10 @@ protected: Node **_out; // Array of def-use references to Nodes // Input edges are split into two categories. Required edges are required - // for semantic correctness; order is important and NULLs are allowed. + // for semantic correctness; order is important and nulls are allowed. // Precedence edges are used to help determine execution order and are // added, e.g., for scheduling purposes. They are unordered and not - // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 + // duplicated; they have no embedded nulls. Edges from 0 to _cnt-1 // are required, from _cnt to _max-1 are precedence edges. node_idx_t _cnt; // Total number of required Node inputs. @@ -390,8 +390,8 @@ protected: // Reference to the i'th input Node. Error if out of bounds. Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } - // Reference to the i'th input Node. NULL if out of bounds. - Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } + // Reference to the i'th input Node. null if out of bounds. + Node* lookup(uint i) const { return ((i < _max) ? _in[i] : nullptr); } // Reference to the i'th output Node. Error if out of bounds. // Use this accessor sparingly. We are going trying to use iterators instead. Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } @@ -434,9 +434,9 @@ protected: assert( !VerifyHashTableKeys || _hash_lock == 0, "remove node from hash table before modifying it"); Node** p = &_in[i]; // cache this._in, across the del_out call - if (*p != NULL) (*p)->del_out((Node *)this); + if (*p != nullptr) (*p)->del_out((Node *)this); (*p) = n; - if (n != NULL) n->add_out((Node *)this); + if (n != nullptr) n->add_out((Node *)this); Compile::current()->record_modified_node(this); } // Light version of set_req() to init inputs after node creation. @@ -446,9 +446,9 @@ protected: assert( i < _cnt, "oob"); assert( !VerifyHashTableKeys || _hash_lock == 0, "remove node from hash table before modifying it"); - assert( _in[i] == NULL, "sanity"); + assert( _in[i] == nullptr, "sanity"); _in[i] = n; - if (n != NULL) n->add_out((Node *)this); + if (n != nullptr) n->add_out((Node *)this); Compile::current()->record_modified_node(this); } // Find first occurrence of n among my edges: @@ -456,22 +456,22 @@ protected: int find_prec_edge(Node* n) { for (uint i = req(); i < len(); i++) { if (_in[i] == n) return i; - if (_in[i] == NULL) { - DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) + if (_in[i] == nullptr) { + DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == nullptr, "Gap in prec edges!"); ) break; } } return -1; } - int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = NULL); + int replace_edge(Node* old, Node* neww, PhaseGVN* gvn = nullptr); int replace_edges_in_range(Node* old, Node* neww, int start, int end, PhaseGVN* gvn); - // NULL out all inputs to eliminate incoming Def-Use edges. + // null out all inputs to eliminate incoming Def-Use edges. void disconnect_inputs(Compile* C); // Quickly, return true if and only if I am Compile::current()->top(). bool is_top() const { - assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); - return (_out == NULL); + assert((this == (Node*) Compile::current()->top()) == (_out == nullptr), ""); + return (_out == nullptr); } // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) void setup_is_top(); @@ -519,14 +519,14 @@ private: void close_prec_gap_at(uint gap) { assert(_cnt <= gap && gap < _max, "no valid prec edge"); uint i = gap; - Node *last = NULL; + Node *last = nullptr; for (; i < _max-1; ++i) { Node *next = _in[i+1]; - if (next == NULL) break; + if (next == nullptr) break; last = next; } - _in[gap] = last; // Move last slot to empty one. - _in[i] = NULL; // NULL out last slot. + _in[gap] = last; // Move last slot to empty one. + _in[i] = nullptr; // null out last slot. } public: @@ -553,11 +553,11 @@ public: assert(i >= _cnt, "not a precedence edge"); // Avoid spec violation: duplicated prec edge. if (_in[i] == n) return; - if (n == NULL || find_prec_edge(n) != -1) { + if (n == nullptr || find_prec_edge(n) != -1) { rm_prec(i); return; } - if (_in[i] != NULL) _in[i]->del_out((Node *)this); + if (_in[i] != nullptr) _in[i]->del_out((Node *)this); _in[i] = n; n->add_out((Node *)this); Compile::current()->record_modified_node(this); @@ -582,7 +582,7 @@ public: // Iterators over input Nodes for a Node X are written as: // for( i = 0; i < X.req(); i++ ) ... X[i] ... - // NOTE: Required edges can contain embedded NULL pointers. + // NOTE: Required edges can contain embedded null pointers. //----------------- Other Node Properties @@ -837,11 +837,11 @@ public: return ((_class_id & ClassMask_##type) == Class_##type); \ } \ type##Node *as_##type() const { \ - assert(is_##type(), "invalid node class: %s", Name()); \ + assert(is_##type(), "invalid node class: %s", Name()); \ return (type##Node*)this; \ } \ type##Node* isa_##type() const { \ - return (is_##type()) ? as_##type() : NULL; \ + return (is_##type()) ? as_##type() : nullptr; \ } DEFINE_CLASS_QUERY(AbstractLock) @@ -1000,7 +1000,7 @@ public: // The node is a "macro" node which needs to be expanded before matching bool is_macro() const { return (_flags & Flag_is_macro) != 0; } // The node is expensive: the best control is set during loop opts - bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } + bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != nullptr; } // An arithmetic node which accumulates a data in a loop. // It must have the loop's phi as input and provide a def to the phi. @@ -1026,10 +1026,10 @@ public: void raise_bottom_type(const Type* new_type); // Get the address type with which this node uses and/or defs memory, - // or NULL if none. The address type is conservatively wide. + // or null if none. The address type is conservatively wide. // Returns non-null for calls, membars, loads, stores, etc. // Returns TypePtr::BOTTOM if the node touches memory "broadly". - virtual const class TypePtr *adr_type() const { return NULL; } + virtual const class TypePtr *adr_type() const { return nullptr; } // Return an existing node which computes the same function as this node. // The optimistic combined algorithm requires this to return a Node which @@ -1087,7 +1087,7 @@ public: bool is_cloop_ind_var() const; // Return a node with opcode "opc" and same inputs as "this" if one can - // be found; Otherwise return NULL; + // be found; Otherwise return null; Node* find_similar(int opc); // Return the unique control out if only one. Null if none or more than one. @@ -1117,7 +1117,7 @@ public: // Should we clone rather than spill this instruction? bool rematerialize() const; - // Return JVM State Object if this Node carries debug info, or NULL otherwise + // Return JVM State Object if this Node carries debug info, or null otherwise virtual JVMState* jvms() const; // Print as assembly @@ -1133,12 +1133,12 @@ public: // return value_if_unknown. jint find_int_con(jint value_if_unknown) const { const TypeInt* t = find_int_type(); - return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; + return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; } // Return the constant, knowing it is an integer constant already jint get_int() const { const TypeInt* t = find_int_type(); - guarantee(t != NULL, "must be con"); + guarantee(t != nullptr, "must be con"); return t->get_con(); } // Here's where the work is done. Can produce non-constant int types too. @@ -1148,23 +1148,23 @@ public: // Same thing for long (and intptr_t, via type.hpp): jlong get_long() const { const TypeLong* t = find_long_type(); - guarantee(t != NULL, "must be con"); + guarantee(t != nullptr, "must be con"); return t->get_con(); } jlong find_long_con(jint value_if_unknown) const { const TypeLong* t = find_long_type(); - return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; + return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; } const TypeLong* find_long_type() const; jlong get_integer_as_long(BasicType bt) const { const TypeInteger* t = find_integer_type(bt); - guarantee(t != NULL && t->is_con(), "must be con"); + guarantee(t != nullptr && t->is_con(), "must be con"); return t->get_con_as_long(bt); } jlong find_integer_as_long(BasicType bt, jlong value_if_unknown) const { const TypeInteger* t = find_integer_type(bt); - if (t == NULL || !t->is_con()) return value_if_unknown; + if (t == nullptr || !t->is_con()) return value_if_unknown; return t->get_con_as_long(bt); } const TypePtr* get_ptr_type() const; @@ -1258,7 +1258,7 @@ public: }; inline bool not_a_node(const Node* n) { - if (n == NULL) return true; + if (n == nullptr) return true; if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. if (*(address*)n == badAddress) return true; // kill by Node::destruct return false; @@ -1518,7 +1518,7 @@ class SimpleDUIterator : public StackObj { //----------------------------------------------------------------------------- // Map dense integer indices to Nodes. Uses classic doubling-array trick. -// Abstractly provides an infinite array of Node*'s, initialized to NULL. +// Abstractly provides an infinite array of Node*'s, initialized to null. // Note that the constructor just zeros things, and since I use Arena // allocation I do not need a destructor to reclaim storage. class Node_Array : public AnyObj { @@ -1535,15 +1535,15 @@ public: } Node_Array(Node_Array* na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} - Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped - { return (i<_max) ? _nodes[i] : (Node*)NULL; } + Node *operator[] ( uint i ) const // Lookup, or null for not mapped + { return (i<_max) ? _nodes[i] : (Node*)nullptr; } Node* at(uint i) const { assert(i<_max,"oob"); return _nodes[i]; } Node** adr() { return _nodes; } // Extend the mapping: index i maps to Node *n. void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } void insert( uint i, Node *n ); void remove( uint i ); // Remove, preserving order - // Clear all entries in _nodes to NULL but keep storage + // Clear all entries in _nodes to null but keep storage void clear() { Copy::zero_to_bytes(_nodes, _max * sizeof(Node*)); } @@ -1646,7 +1646,7 @@ public: void add(Node* node) { if (not_a_node(node)) { - return; // Gracefully handle NULL, -1, 0xabababab, etc. + return; // Gracefully handle null, -1, 0xabababab, etc. } if (_visited_set[node] == nullptr) { _visited_set.Insert(node, node); @@ -1756,7 +1756,7 @@ class Node_Notes { JVMState* _jvms; public: - Node_Notes(JVMState* jvms = NULL) { + Node_Notes(JVMState* jvms = nullptr) { _jvms = jvms; } @@ -1765,12 +1765,12 @@ public: // True if there is nothing here. bool is_clear() { - return (_jvms == NULL); + return (_jvms == nullptr); } // Make there be nothing here. void clear() { - _jvms = NULL; + _jvms = nullptr; } // Make a new, clean node notes. @@ -1789,8 +1789,8 @@ public: // Absorb any information from source. bool update_from(Node_Notes* source) { bool changed = false; - if (source != NULL) { - if (source->jvms() != NULL) { + if (source != nullptr) { + if (source->jvms() != nullptr) { set_jvms(source->jvms()); changed = true; } @@ -1805,22 +1805,22 @@ Compile::locate_node_notes(GrowableArray* arr, int idx, bool can_grow) { assert(idx >= 0, "oob"); int block_idx = (idx >> _log2_node_notes_block_size); - int grow_by = (block_idx - (arr == NULL? 0: arr->length())); + int grow_by = (block_idx - (arr == nullptr? 0: arr->length())); if (grow_by >= 0) { - if (!can_grow) return NULL; + if (!can_grow) return nullptr; grow_node_notes(arr, grow_by + 1); } - if (arr == NULL) return NULL; + if (arr == nullptr) return nullptr; // (Every element of arr is a sub-array of length _node_notes_block_size.) return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); } inline bool Compile::set_node_notes_at(int idx, Node_Notes* value) { - if (value == NULL || value->is_clear()) + if (value == nullptr || value->is_clear()) return false; // nothing to write => write nothing Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); - assert(loc != NULL, ""); + assert(loc != nullptr, ""); return loc->update_from(value); } @@ -1835,13 +1835,13 @@ protected: const Type* const _type; public: void set_type(const Type* t) { - assert(t != NULL, "sanity"); + assert(t != nullptr, "sanity"); debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); *(const Type**)&_type = t; // cast away const-ness // If this node is in the hash table, make sure it doesn't need a rehash. assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); } - const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; + const Type* type() const { assert(_type != nullptr, "sanity"); return _type; }; TypeNode( const Type *t, uint required ) : Node(required), _type(t) { init_class_id(Class_Type); } diff --git a/src/hotspot/share/opto/opaquenode.cpp b/src/hotspot/share/opto/opaquenode.cpp index 42e59cacd49..3a5ea42f44a 100644 --- a/src/hotspot/share/opto/opaquenode.cpp +++ b/src/hotspot/share/opto/opaquenode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,7 +76,7 @@ Node *ProfileBooleanNode::Ideal(PhaseGVN *phase, bool can_reshape) { _delay_removal = false; return this; } else { - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/opto/opaquenode.hpp b/src/hotspot/share/opto/opaquenode.hpp index a5722a705f5..1d880b6d996 100644 --- a/src/hotspot/share/opto/opaquenode.hpp +++ b/src/hotspot/share/opto/opaquenode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ class Opaque1Node : public Node { virtual uint hash() const ; // { return NO_HASH; } virtual bool cmp( const Node &n ) const; public: - Opaque1Node(Compile* C, Node *n) : Node(NULL, n) { + Opaque1Node(Compile* C, Node *n) : Node(nullptr, n) { // Put it on the Macro nodes list to removed during macro nodes expansion. init_flags(Flag_is_macro); init_class_id(Class_Opaque1); @@ -44,13 +44,13 @@ class Opaque1Node : public Node { } // Special version for the pre-loop to hold the original loop limit // which is consumed by range check elimination. - Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(NULL, n, orig_limit) { + Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(nullptr, n, orig_limit) { // Put it on the Macro nodes list to removed during macro nodes expansion. init_flags(Flag_is_macro); init_class_id(Class_Opaque1); C->add_macro_node(this); } - Node* original_loop_limit() { return req()==3 ? in(2) : NULL; } + Node* original_loop_limit() { return req()==3 ? in(2) : nullptr; } virtual int Opcode() const; virtual const Type *bottom_type() const { return TypeInt::INT; } virtual Node* Identity(PhaseGVN* phase); @@ -115,7 +115,7 @@ class Opaque3Node : public Node { // GraphKit::must_be_not_null(). class Opaque4Node : public Node { public: - Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(NULL, tst, final_tst) { + Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(nullptr, tst, final_tst) { init_flags(Flag_is_macro); C->add_macro_node(this); } diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 7ee4000efde..4802125959a 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -225,29 +225,29 @@ PhaseOutput::PhaseOutput() _handler_table(), _inc_table(), _stub_list(), - _oop_map_set(NULL), - _scratch_buffer_blob(NULL), - _scratch_locs_memory(NULL), + _oop_map_set(nullptr), + _scratch_buffer_blob(nullptr), + _scratch_locs_memory(nullptr), _scratch_const_size(-1), _in_scratch_emit_size(false), _frame_slots(0), _code_offsets(), _node_bundling_limit(0), - _node_bundling_base(NULL), + _node_bundling_base(nullptr), _orig_pc_slot(0), _orig_pc_slot_offset_in_bytes(0), _buf_sizes(), - _block(NULL), + _block(nullptr), _index(0) { C->set_output(this); - if (C->stub_name() == NULL) { + if (C->stub_name() == nullptr) { _orig_pc_slot = C->fixed_slots() - (sizeof(address) / VMRegImpl::stack_slot_size); } } PhaseOutput::~PhaseOutput() { - C->set_output(NULL); - if (_scratch_buffer_blob != NULL) { + C->set_output(nullptr); + if (_scratch_buffer_blob != nullptr) { BufferBlob::free(_scratch_buffer_blob); } } @@ -348,7 +348,7 @@ void PhaseOutput::Output() { // Complete sizing of codebuffer CodeBuffer* cb = init_buffer(); - if (cb == NULL || C->failing()) { + if (cb == nullptr || C->failing()) { return; } @@ -369,7 +369,7 @@ bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const { // unexpected stack overflow (compiled method stack banging should // guarantee it doesn't happen) so we always need the stack bang in // a debug VM. - return (C->stub_function() == NULL && + return (C->stub_function() == nullptr && (C->has_java_calls() || frame_size_in_bytes > (int)(os::vm_page_size())>>3 DEBUG_ONLY(|| true))); } @@ -379,7 +379,7 @@ bool PhaseOutput::need_register_stack_bang() const { // This is only used on architectures which have split register // and memory stacks (ie. IA64). // Bang if the method is not a stub function and has java calls - return (C->stub_function() == NULL && C->has_java_calls()); + return (C->stub_function() == nullptr && C->has_java_calls()); } @@ -585,8 +585,8 @@ void PhaseOutput::shorten_branches(uint* blk_starts) { for (uint i = 0; i < nblocks; i++) { Block* block = C->cfg()->get_block(i); int idx = jmp_nidx[i]; - MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach(); - if (mach != NULL && mach->may_be_short_branch()) { + MachNode* mach = (idx == -1) ? nullptr: block->get_node(idx)->as_Mach(); + if (mach != nullptr && mach->may_be_short_branch()) { #ifdef ASSERT assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity"); int j; @@ -650,7 +650,7 @@ void PhaseOutput::shorten_branches(uint* blk_starts) { has_short_branch_candidate = true; } } // (mach->may_be_short_branch()) - if (mach != NULL && (mach->may_be_short_branch() || + if (mach != nullptr && (mach->may_be_short_branch() || mach->avoid_back_to_back(MachNode::AVOID_AFTER))) { last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i]; } @@ -716,12 +716,12 @@ PhaseOutput::sv_for_node_id(GrowableArray *objs, int id) { } } // Otherwise.. - return NULL; + return nullptr; } void PhaseOutput::set_sv_for_object_node(GrowableArray *objs, ObjectValue* sv ) { - assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition"); + assert(sv_for_node_id(objs, sv->id()) == nullptr, "Precondition"); objs->append(sv); } @@ -750,7 +750,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local, SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject(); ObjectValue* sv = sv_for_node_id(objs, spobj->_idx); - if (sv == NULL) { + if (sv == nullptr) { ciKlass* cik = t->is_oopptr()->exact_klass(); assert(cik->is_instance_klass() || cik->is_array_klass(), "Not supported allocation."); @@ -845,7 +845,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local, ShouldNotReachHere(); // Caller should skip 2nd halves break; case Type::AnyPtr: - array->append(new ConstantOopWriteValue(NULL)); + array->append(new ConstantOopWriteValue(nullptr)); break; case Type::AryPtr: case Type::InstPtr: // fall through @@ -853,7 +853,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local, break; case Type::NarrowOop: if (t == TypeNarrowOop::NULL_PTR) { - array->append(new ConstantOopWriteValue(NULL)); + array->append(new ConstantOopWriteValue(nullptr)); } else { array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding())); } @@ -945,7 +945,7 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { // Add the safepoint in the DebugInfoRecorder if( !mach->is_MachCall() ) { - mcall = NULL; + mcall = nullptr; C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map); } else { mcall = mach->as_MachCall(); @@ -968,7 +968,7 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { } // Loop over the JVMState list to add scope information - // Do not skip safepoints with a NULL method, they need monitor info + // Do not skip safepoints with a null method, they need monitor info JVMState* youngest_jvms = sfn->jvms(); int max_depth = youngest_jvms->depth(); @@ -981,13 +981,13 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { for (int depth = 1; depth <= max_depth; depth++) { JVMState* jvms = youngest_jvms->of_depth(depth); int idx; - ciMethod* method = jvms->has_method() ? jvms->method() : NULL; + ciMethod* method = jvms->has_method() ? jvms->method() : nullptr; // Safepoints that do not have method() set only provide oop-map and monitor info // to support GC; these do not support deoptimization. - int num_locs = (method == NULL) ? 0 : jvms->loc_size(); - int num_exps = (method == NULL) ? 0 : jvms->stk_size(); + int num_locs = (method == nullptr) ? 0 : jvms->loc_size(); + int num_exps = (method == nullptr) ? 0 : jvms->stk_size(); int num_mon = jvms->nof_monitors(); - assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(), + assert(method == nullptr || jvms->bci() < 0 || num_locs == method->max_locals(), "JVMS local count must match that of the method"); // Add Local and Expression Stack Information @@ -1022,12 +1022,12 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { Node* obj_node = sfn->monitor_obj(jvms, idx); // Create ScopeValue for object - ScopeValue *scval = NULL; + ScopeValue *scval = nullptr; if (obj_node->is_SafePointScalarObject()) { SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject(); scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx); - if (scval == NULL) { + if (scval == nullptr) { const Type *t = spobj->bottom_type(); ciKlass* cik = t->is_oopptr()->exact_klass(); assert(cik->is_instance_klass() || @@ -1111,7 +1111,7 @@ class NonSafepointEmitter { public: NonSafepointEmitter(Compile* compile) { this->C = compile; - _pending_jvms = NULL; + _pending_jvms = nullptr; _pending_offset = 0; } @@ -1119,19 +1119,19 @@ class NonSafepointEmitter { if (!C->debug_info()->recording_non_safepoints()) return; Node_Notes* nn = C->node_notes_at(n->_idx); - if (nn == NULL || nn->jvms() == NULL) return; - if (_pending_jvms != NULL && + if (nn == nullptr || nn->jvms() == nullptr) return; + if (_pending_jvms != nullptr && _pending_jvms->same_calls_as(nn->jvms())) { // Repeated JVMS? Stretch it up here. _pending_offset = pc_offset; } else { - if (_pending_jvms != NULL && + if (_pending_jvms != nullptr && _pending_offset < pc_offset) { emit_non_safepoint(); } - _pending_jvms = NULL; + _pending_jvms = nullptr; if (pc_offset > C->debug_info()->last_pc_offset()) { - // This is the only way _pending_jvms can become non-NULL: + // This is the only way _pending_jvms can become non-null: _pending_jvms = nn->jvms(); _pending_offset = pc_offset; } @@ -1140,19 +1140,19 @@ class NonSafepointEmitter { // Stay out of the way of real safepoints: void observe_safepoint(JVMState* jvms, int pc_offset) { - if (_pending_jvms != NULL && + if (_pending_jvms != nullptr && !_pending_jvms->same_calls_as(jvms) && _pending_offset < pc_offset) { emit_non_safepoint(); } - _pending_jvms = NULL; + _pending_jvms = nullptr; } void flush_at_end() { - if (_pending_jvms != NULL) { + if (_pending_jvms != nullptr) { emit_non_safepoint(); } - _pending_jvms = NULL; + _pending_jvms = nullptr; } }; @@ -1161,7 +1161,7 @@ void NonSafepointEmitter::emit_non_safepoint() { int pc_offset = _pending_offset; // Clear it now: - _pending_jvms = NULL; + _pending_jvms = nullptr; DebugInformationRecorder* debug_info = C->debug_info(); assert(debug_info->recording_non_safepoints(), "sanity"); @@ -1172,7 +1172,7 @@ void NonSafepointEmitter::emit_non_safepoint() { // Visit scopes from oldest to youngest. for (int depth = 1; depth <= max_depth; depth++) { JVMState* jvms = youngest_jvms->of_depth(depth); - ciMethod* method = jvms->has_method() ? jvms->method() : NULL; + ciMethod* method = jvms->has_method() ? jvms->method() : nullptr; assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest"); methodHandle null_mh; debug_info->describe_scope(pc_offset, null_mh, method, jvms->bci(), jvms->should_reexecute()); @@ -1270,9 +1270,9 @@ CodeBuffer* PhaseOutput::init_buffer() { cb->initialize(total_req, _buf_sizes._reloc); // Have we run out of code space? - if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { + if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) { C->record_failure("CodeCache is full"); - return NULL; + return nullptr; } // Configure the code buffer. cb->initialize_consts_size(const_req); @@ -1327,13 +1327,13 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // Create an array of unused labels, one for each basic block, if printing is enabled #if defined(SUPPORT_OPTO_ASSEMBLY) - int* node_offsets = NULL; + int* node_offsets = nullptr; uint node_offset_limit = C->unique(); if (C->print_assembly()) { node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit); } - if (node_offsets != NULL) { + if (node_offsets != nullptr) { // We need to initialize. Unused array elements may contain garbage and mess up PrintOptoAssembly. memset(node_offsets, 0, node_offset_limit*sizeof(int)); } @@ -1356,7 +1356,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { } // Now fill in the code buffer - Node* delay_slot = NULL; + Node* delay_slot = nullptr; for (uint i = 0; i < nblocks; i++) { Block* block = C->cfg()->get_block(i); _block = block; @@ -1397,7 +1397,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // See if delay slots are supported if (valid_bundle_info(n) && node_bundling(n)->used_in_unconditional_delay()) { - assert(delay_slot == NULL, "no use of delay slot node"); + assert(delay_slot == nullptr, "no use of delay slot node"); assert(n->size(C->regalloc()) == Pipeline::instr_unit_size(), "delay slot instruction wrong size"); delay_slot = n; @@ -1447,7 +1447,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { C->cfg()->map_node_to_block(nop, block); // Ensure enough space. cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); - if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { + if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) { C->record_failure("CodeCache is full"); return; } @@ -1476,7 +1476,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { if (!is_mcall) { MachSafePointNode *sfn = mach->as_MachSafePoint(); // !!!!! Stubs only need an oopmap right now, so bail out - if (sfn->jvms()->method() == NULL) { + if (sfn->jvms()->method() == nullptr) { // Write the oopmap directly to the code blob??!! continue; } @@ -1503,7 +1503,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { bool delay_slot_is_used = valid_bundle_info(n) && C->output()->node_bundling(n)->use_unconditional_delay(); if (!delay_slot_is_used && mach->may_be_short_branch()) { - assert(delay_slot == NULL, "not expecting delay slot node"); + assert(delay_slot == nullptr, "not expecting delay slot node"); int br_size = n->size(C->regalloc()); int offset = blk_starts[block_num] - current_offset; if (block_num >= i) { @@ -1568,7 +1568,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { int count = 0; for (uint prec = mach->req(); prec < mach->len(); prec++) { Node *oop_store = mach->in(prec); // Precedence edge - if (oop_store == NULL) continue; + if (oop_store == nullptr) continue; count++; uint i4; for (i4 = 0; i4 < last_inst; ++i4) { @@ -1599,14 +1599,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // Verify that there is sufficient space remaining cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); - if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { + if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) { C->record_failure("CodeCache is full"); return; } // Save the offset for the listing #if defined(SUPPORT_OPTO_ASSEMBLY) - if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) { + if ((node_offsets != nullptr) && (n->_idx < node_offset_limit)) { node_offsets[n->_idx] = cb->insts_size(); } #endif @@ -1662,14 +1662,14 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // See if this instruction has a delay slot if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { - guarantee(delay_slot != NULL, "expecting delay slot node"); + guarantee(delay_slot != nullptr, "expecting delay slot node"); // Back up 1 instruction cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size()); // Save the offset for the listing #if defined(SUPPORT_OPTO_ASSEMBLY) - if ((node_offsets != NULL) && (delay_slot->_idx < node_offset_limit)) { + if ((node_offsets != nullptr) && (delay_slot->_idx < node_offset_limit)) { node_offsets[delay_slot->_idx] = cb->insts_size(); } #endif @@ -1678,9 +1678,9 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { if (delay_slot->is_MachSafePoint()) { MachNode *mach = delay_slot->as_Mach(); // !!!!! Stubs only need an oopmap right now, so bail out - if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) { + if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == nullptr) { // Write the oopmap directly to the code blob??!! - delay_slot = NULL; + delay_slot = nullptr; continue; } @@ -1695,7 +1695,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { delay_slot->emit(*cb, C->regalloc()); // Don't reuse it - delay_slot = NULL; + delay_slot = nullptr; } } // End for all instructions in block @@ -1783,7 +1783,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { } // One last check for failed CodeBuffer::expand: - if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { + if ((cb->blob() == nullptr) || (!CompileBroker::should_compile_new_jobs())) { C->record_failure("CodeCache is full"); return; } @@ -1805,7 +1805,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // make sure the end tag is coherent, and that xmlStream->pop_tag is done thread safe. ResourceMark rm; stringStream method_metadata_str; - if (C->method() != NULL) { + if (C->method() != nullptr) { C->method()->print_metadata(&method_metadata_str); } stringStream dump_asm_str; @@ -1816,21 +1816,21 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // This output goes directly to the tty, not the compiler log. // To enable tools to match it up with the compilation activity, // be sure to tag this tty output with the compile ID. - if (xtty != NULL) { + if (xtty != nullptr) { xtty->head("opto_assembly compile_id='%d'%s", C->compile_id(), C->is_osr_compilation() ? " compile_kind='osr'" : ""); } - if (C->method() != NULL) { + if (C->method() != nullptr) { tty->print_cr("----------------------- MetaData before Compile_id = %d ------------------------", C->compile_id()); tty->print_raw(method_metadata_str.freeze()); - } else if (C->stub_name() != NULL) { + } else if (C->stub_name() != nullptr) { tty->print_cr("----------------------------- RuntimeStub %s -------------------------------", C->stub_name()); } tty->cr(); tty->print_cr("------------------------ OptoAssembly for Compile_id = %d -----------------------", C->compile_id()); tty->print_raw(dump_asm_str.freeze()); tty->print_cr("--------------------------------------------------------------------------------"); - if (xtty != NULL) { + if (xtty != nullptr) { xtty->tail("opto_assembly"); } } @@ -1844,7 +1844,7 @@ void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_s uint inct_cnt = 0; for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { Block* block = C->cfg()->get_block(i); - Node *n = NULL; + Node *n = nullptr; int j; // Find the branch; ignore trailing NOPs. @@ -1905,7 +1905,7 @@ void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_s // Set the offset of the return from the call assert(handler_bcis.find(-1) != -1, "must have default handler"); - _handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos); + _handler_table.add_subtable(call_return, &handler_bcis, nullptr, &handler_pcos); continue; } @@ -1943,7 +1943,7 @@ Scheduling::Scheduling(Arena *arena, Compile &compile) _available(arena), _reg_node(arena), _pinch_free_list(arena), - _next_node(NULL), + _next_node(nullptr), _bundle_instr_count(0), _bundle_cycle_number(0), _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]) @@ -2155,9 +2155,9 @@ Node * Scheduling::ChooseNodeToBundle() { #ifndef PRODUCT if (_cfg->C->trace_opto_output()) - tty->print("# ChooseNodeToBundle: NULL\n"); + tty->print("# ChooseNodeToBundle: null\n"); #endif - return (NULL); + return (nullptr); } // Fast path, if only 1 instruction in the bundle @@ -2506,7 +2506,7 @@ void Scheduling::ComputeUseCount(const Block *bb) { _scheduled.clear(); // No delay slot specified - _unconditional_delay_slot = NULL; + _unconditional_delay_slot = nullptr; #ifdef ASSERT for( uint i=0; i < bb->number_of_nodes(); i++ ) @@ -2568,7 +2568,7 @@ void Scheduling::DoScheduling() { tty->print("# -> DoScheduling\n"); #endif - Block *succ_bb = NULL; + Block *succ_bb = nullptr; Block *bb; Compile* C = Compile::current(); @@ -2667,7 +2667,7 @@ void Scheduling::DoScheduling() { // Schedule the remaining instructions in the block while ( _available.size() > 0 ) { Node *n = ChooseNodeToBundle(); - guarantee(n != NULL, "no nodes available"); + guarantee(n != nullptr, "no nodes available"); AddNodeToBundle(n,bb); } @@ -2742,7 +2742,7 @@ void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) { prior_use->dump(); assert(edge_from_to(prior_use,n), "%s", msg); } - _reg_node.map(def,NULL); // Kill live USEs + _reg_node.map(def,nullptr); // Kill live USEs } } @@ -2819,7 +2819,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is } Node *pinch = _reg_node[def_reg]; // Get pinch point - if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet? + if ((pinch == nullptr) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet? is_def ) { // Check for a true def (not a kill) _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point return; @@ -2829,7 +2829,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is debug_only( def = (Node*)((intptr_t)0xdeadbeef); ) // After some number of kills there _may_ be a later def - Node *later_def = NULL; + Node *later_def = nullptr; Compile* C = Compile::current(); @@ -2851,9 +2851,9 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is _reg_node.map(def_reg,pinch); // Record pinch-point //regalloc()->set_bad(pinch->_idx); // Already initialized this way. if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill - pinch->init_req(0, C->top()); // set not NULL for the next call + pinch->init_req(0, C->top()); // set not null for the next call add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch - later_def = NULL; // and no later def + later_def = nullptr; // and no later def } pinch->set_req(0,later_def); // Hook later def so we can find it } else { // Else have valid pinch point @@ -2872,7 +2872,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is if( _regalloc->get_reg_first(uses->in(i)) == def_reg || _regalloc->get_reg_second(uses->in(i)) == def_reg ) { // Yes, found a use/kill pinch-point - pinch->set_req(0,NULL); // + pinch->set_req(0,nullptr); // pinch->replace_by(kill); // Move anti-dep edges up pinch = kill; _reg_node.map(def_reg,pinch); @@ -2890,7 +2890,7 @@ void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) { return; Node *pinch = _reg_node[use_reg]; // Get pinch point // Check for no later def_reg/kill in block - if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b && + if ((pinch != nullptr) && _cfg->get_block_for_node(pinch) == b && // Use has to be block-local as well _cfg->get_block_for_node(use) == b) { if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?) @@ -2942,14 +2942,14 @@ void Scheduling::ComputeRegisterAntidependencies(Block *b) { // put an edge from the pinch point to the USE. // To be expedient, the _reg_node array is pre-allocated for the whole - // compilation. _reg_node is lazily initialized; it either contains a NULL, + // compilation. _reg_node is lazily initialized; it either contains a null, // or a valid def/kill/pinch-point, or a leftover node from some prior - // block. Leftover node from some prior block is treated like a NULL (no + // block. Leftover node from some prior block is treated like a null (no // prior def, so no anti-dependence needed). Valid def is distinguished by // it being in the current block. bool fat_proj_seen = false; uint last_safept = _bb_end-1; - Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL; + Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : nullptr; Node* last_safept_node = end_node; for( uint i = _bb_end-1; i >= _bb_start; i-- ) { Node *n = b->get_node(i); @@ -3067,12 +3067,12 @@ void Scheduling::garbage_collect_pinch_nodes() { int trace_cnt = 0; for (uint k = 0; k < _reg_node.Size(); k++) { Node* pinch = _reg_node[k]; - if ((pinch != NULL) && pinch->Opcode() == Op_Node && + if ((pinch != nullptr) && pinch->Opcode() == Op_Node && // no predecence input edges - (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) { + (pinch->req() == pinch->len() || pinch->in(pinch->req()) == nullptr) ) { cleanup_pinch(pinch); _pinch_free_list.push(pinch); - _reg_node.map(k, NULL); + _reg_node.map(k, nullptr); #ifndef PRODUCT if (_cfg->C->trace_opto_output()) { trace_cnt++; @@ -3107,7 +3107,7 @@ void Scheduling::cleanup_pinch( Node *pinch ) { i -= uses_found; // we deleted 1 or more copies of this edge } // May have a later_def entry - pinch->set_req(0, NULL); + pinch->set_req(0, nullptr); } #ifndef PRODUCT @@ -3160,10 +3160,10 @@ void PhaseOutput::init_scratch_buffer_blob(int const_size) { // constant section is big enough, use it. Otherwise free the // current and allocate a new one. BufferBlob* blob = scratch_buffer_blob(); - if ((blob != NULL) && (const_size <= _scratch_const_size)) { + if ((blob != nullptr) && (const_size <= _scratch_const_size)) { // Use the current blob. } else { - if (blob != NULL) { + if (blob != nullptr) { BufferBlob::free(blob); } @@ -3174,7 +3174,7 @@ void PhaseOutput::init_scratch_buffer_blob(int const_size) { // Record the buffer blob for next time. set_scratch_buffer_blob(blob); // Have we run out of code space? - if (scratch_buffer_blob() == NULL) { + if (scratch_buffer_blob() == nullptr) { // Let CompilerBroker disable further compilations. C->record_failure("Not enough space for scratch buffer in CodeCache"); return; @@ -3204,7 +3204,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) { // The allocation of the scratch buffer blob is particularly // expensive, since it has to grab the code cache lock. BufferBlob* blob = this->scratch_buffer_blob(); - assert(blob != NULL, "Initialize BufferBlob at start"); + assert(blob != nullptr, "Initialize BufferBlob at start"); assert(blob->size() > MAX_inst_size, "sanity"); relocInfo* locs_buf = scratch_locs_memory(); address blob_begin = blob->content_begin(); @@ -3213,7 +3213,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) { CodeBuffer buf(blob_begin, blob_end - blob_begin); buf.initialize_consts_size(_scratch_const_size); buf.initialize_stubs_size(MAX_stubs_size); - assert(locs_buf != NULL, "sanity"); + assert(locs_buf != nullptr, "sanity"); int lsize = MAX_locs_size / 3; buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize); buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize); @@ -3226,7 +3226,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) { // Do the emission. Label fakeL; // Fake label for branch instructions. - Label* saveL = NULL; + Label* saveL = nullptr; uint save_bnum = 0; bool is_branch = n->is_MachBranch(); if (is_branch) { @@ -3252,7 +3252,7 @@ uint PhaseOutput::scratch_emit_size(const Node* n) { void PhaseOutput::install() { if (!C->should_install_code()) { return; - } else if (C->stub_function() != NULL) { + } else if (C->stub_function() != nullptr) { install_stub(C->stub_name()); } else { install_code(C->method(), @@ -3304,14 +3304,14 @@ void PhaseOutput::install_code(ciMethod* target, 0, C->rtm_state()); - if (C->log() != NULL) { // Print code cache state into compiler log + if (C->log() != nullptr) { // Print code cache state into compiler log C->log()->code_cache_state(); } } } void PhaseOutput::install_stub(const char* stub_name) { // Entry point will be accessed using stub_entry_point(); - if (code_buffer() == NULL) { + if (code_buffer() == nullptr) { Matcher::soft_match_failure(); } else { if (PrintAssembly && (WizardMode || Verbose)) @@ -3329,7 +3329,7 @@ void PhaseOutput::install_stub(const char* stub_name) { frame_size_in_words(), oop_map_set(), false); - assert(rs != NULL && rs->is_runtime_stub(), "sanity check"); + assert(rs != nullptr && rs->is_runtime_stub(), "sanity check"); C->set_stub_entry_point(rs->entry_point()); } @@ -3372,7 +3372,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { int pc_digits = 3; // #chars required for pc int sb_chars = 3; // #chars for "start bundle" indicator int tab_size = 8; - if (pcs != NULL) { + if (pcs != nullptr) { int max_pc = 0; for (uint i = 0; i < pc_limit; i++) { max_pc = (max_pc < pcs[i]) ? pcs[i] : max_pc; @@ -3391,7 +3391,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { char starts_bundle = ' '; C->regalloc()->dump_frame(); - Node *n = NULL; + Node *n = nullptr; for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) { if (VMThread::should_terminate()) { cut_short = true; @@ -3402,7 +3402,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { continue; } n = block->head(); - if ((pcs != NULL) && (n->_idx < pc_limit)) { + if ((pcs != nullptr) && (n->_idx < pc_limit)) { pc = pcs[n->_idx]; st->print("%*.*x", pc_digits, pc_digits, pc); } @@ -3417,7 +3417,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { } // For all instructions - Node *delay = NULL; + Node *delay = nullptr; for (uint j = 0; j < block->number_of_nodes(); j++) { if (VMThread::should_terminate()) { cut_short = true; @@ -3449,7 +3449,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { !n->is_top() && // Debug info table constants !(n->is_Con() && !n->is_Mach())// Debug info table constants ) { - if ((pcs != NULL) && (n->_idx < pc_limit)) { + if ((pcs != nullptr) && (n->_idx < pc_limit)) { pc = pcs[n->_idx]; st->print("%*.*x", pc_digits, pc_digits, pc); } else { @@ -3466,12 +3466,12 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { // then back up and print it if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { // Coverity finding - Explicit null dereferenced. - guarantee(delay != NULL, "no unconditional delay instruction"); + guarantee(delay != nullptr, "no unconditional delay instruction"); if (WizardMode) delay->dump(); if (node_bundling(delay)->starts_bundle()) starts_bundle = '+'; - if ((pcs != NULL) && (n->_idx < pc_limit)) { + if ((pcs != nullptr) && (n->_idx < pc_limit)) { pc = pcs[n->_idx]; st->print("%*.*x", pc_digits, pc_digits, pc); } else { @@ -3482,7 +3482,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { st->fill_to(prefix_len); delay->format(C->regalloc(), st); st->cr(); - delay = NULL; + delay = nullptr; } // Dump the exception table as well @@ -3493,7 +3493,7 @@ void PhaseOutput::dump_asm_on(outputStream* st, int* pcs, uint pc_limit) { st->bol(); // Make sure we start on a new line } st->cr(); // one empty line between blocks - assert(cut_short || delay == NULL, "no unconditional delay branch"); + assert(cut_short || delay == nullptr, "no unconditional delay branch"); } // End of per-block dump if (cut_short) st->print_cr("*** disassembly is cut short ***"); diff --git a/src/hotspot/share/opto/output.hpp b/src/hotspot/share/opto/output.hpp index 09e012d34c3..e764e83df10 100644 --- a/src/hotspot/share/opto/output.hpp +++ b/src/hotspot/share/opto/output.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ public: // The architecture description provides short branch variants for some long // branch instructions. Replace eligible long branches with short branches. void shorten_branches(uint* blk_starts); - // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. + // If "objs" contains an ObjectValue whose id is "id", returns it, else null. static ObjectValue* sv_for_node_id(GrowableArray *objs, int id); static void set_sv_for_object_node(GrowableArray *objs, ObjectValue* sv); void FillLocArray( int idx, MachSafePointNode* sfpt, Node *local, diff --git a/src/hotspot/share/opto/parse.hpp b/src/hotspot/share/opto/parse.hpp index f06e242f444..72929729812 100644 --- a/src/hotspot/share/opto/parse.hpp +++ b/src/hotspot/share/opto/parse.hpp @@ -188,14 +188,14 @@ class Parse : public GraphKit { void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } // True after any predecessor flows control into this block - bool is_merged() const { return _start_map != NULL; } + bool is_merged() const { return _start_map != nullptr; } #ifdef ASSERT // True after backedge predecessor flows control into this block bool has_merged_backedge() const { return _has_merged_backedge; } void mark_merged_backedge(Block* pred) { assert(is_SEL_head(), "should be loop head"); - if (pred != NULL && is_SEL_backedge(pred)) { + if (pred != nullptr && is_SEL_backedge(pred)) { assert(is_parsed(), "block should be parsed before merging backedges"); _has_merged_backedge = true; } @@ -285,7 +285,7 @@ class Parse : public GraphKit { // path number ("pnum"). int add_new_path(); - // Initialize me by recording the parser's map. My own map must be NULL. + // Initialize me by recording the parser's map. My own map must be null. void record_state(Parse* outer); }; @@ -405,7 +405,7 @@ class Parse : public GraphKit { void set_wrote_fields(bool z) { _wrote_fields = z; } Node* alloc_with_final() const { return _alloc_with_final; } void set_alloc_with_final(Node* n) { - assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?"); + assert((_alloc_with_final == nullptr) || (_alloc_with_final == n), "different init objects?"); _alloc_with_final = n; } @@ -432,7 +432,7 @@ class Parse : public GraphKit { Block* start_block() { return rpo_at(flow()->start_block()->rpo()); } - // Can return NULL if the flow pass did not complete a block. + // Can return null if the flow pass did not complete a block. Block* successor_for_bci(int bci) { return block()->successor_for_bci(bci); } @@ -631,7 +631,7 @@ class UnstableIfTrap { public: UnstableIfTrap(CallStaticJavaNode* call, Parse::Block* path): _unc(call), _modified(false) { - assert(_unc != NULL && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if, + assert(_unc != nullptr && Deoptimization::trap_request_reason(_unc->uncommon_trap_request()) == Deoptimization::Reason_unstable_if, "invalid uncommon_trap call!"); _next_bci = path != nullptr ? path->start() : -1; } diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index 559653252d2..a17fe66ee02 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -78,7 +78,7 @@ void Parse::print_statistics() { tty->print_cr("Blocks parsed: %d Blocks seen: %d", blocks_parsed, blocks_seen); if (explicit_null_checks_inserted) { - tty->print_cr("%d original NULL checks - %d elided (%2d%%); optimizer leaves %d,", + tty->print_cr("%d original null checks - %d elided (%2d%%); optimizer leaves %d,", explicit_null_checks_inserted, explicit_null_checks_elided, (100*explicit_null_checks_elided)/explicit_null_checks_inserted, all_null_checks_found); @@ -112,7 +112,7 @@ Node *Parse::fetch_interpreter_state(int index, // Very similar to LoadNode::make, except we handle un-aligned longs and // doubles on Sparc. Intel can handle them just fine directly. - Node *l = NULL; + Node *l = nullptr; switch (bt) { // Signature is flattened case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break; case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break; @@ -152,7 +152,7 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type, // TypeFlow may assert null-ness if a type appears unloaded. if (type == TypePtr::NULL_PTR || - (tp != NULL && !tp->is_loaded())) { + (tp != nullptr && !tp->is_loaded())) { // Value must be null, not a real oop. Node* chk = _gvn.transform( new CmpPNode(l, null()) ); Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); @@ -168,9 +168,9 @@ Node* Parse::check_interpreter_type(Node* l, const Type* type, // When paths are cut off, values at later merge points can rise // toward more specific classes. Make sure these specific classes // are still in effect. - if (tp != NULL && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) { + if (tp != nullptr && !tp->is_same_java_type_as(TypeInstPtr::BOTTOM)) { // TypeFlow asserted a specific object type. Value must have that type. - Node* bad_type_ctrl = NULL; + Node* bad_type_ctrl = nullptr; l = gen_checkcast(l, makecon(tp->as_klass_type()->cast_to_exactness(true)), &bad_type_ctrl); bad_type_exit->control()->add_req(bad_type_ctrl); } @@ -270,7 +270,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { const Type *type = osr_block->local_type_at(index); - if (type->isa_oopptr() != NULL) { + if (type->isa_oopptr() != nullptr) { // 6403625: Verify that the interpreter oopMap thinks that the oop is live // else we might load a stale oop if the MethodLiveness disagrees with the @@ -279,7 +279,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { // if (!live_oops.at(index)) { - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("OSR_mismatch local_index='%d'",index); } set_local(index, null()); @@ -301,7 +301,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { // Construct code to access the appropriate local. BasicType bt = type->basic_type(); if (type == TypePtr::NULL_PTR) { - // Ptr types are mixed together with T_ADDRESS but NULL is + // Ptr types are mixed together with T_ADDRESS but null is // really for T_OBJECT types so correct it. bt = T_OBJECT; } @@ -336,7 +336,7 @@ void Parse::load_interpreter_state(Node* osr_buf) { Node* l = local(index); if (l->is_top()) continue; // nothing here const Type *type = osr_block->local_type_at(index); - if (type->isa_oopptr() != NULL) { + if (type->isa_oopptr() != nullptr) { if (!live_oops.at(index)) { // skip type check for dead oops continue; @@ -398,10 +398,10 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) _wrote_volatile = false; _wrote_stable = false; _wrote_fields = false; - _alloc_with_final = NULL; + _alloc_with_final = nullptr; _entry_bci = InvocationEntryBci; - _tf = NULL; - _block = NULL; + _tf = nullptr; + _block = nullptr; _first_return = true; _replaced_nodes_for_exceptions = false; _new_idx = C->unique(); @@ -451,7 +451,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) } CompileLog* log = C->log(); - if (log != NULL) { + if (log != nullptr) { log->begin_head("parse method='%d' uses='%f'", log->identify(parse_method), expected_uses); if (depth() == 1 && C->is_osr_compilation()) { @@ -477,7 +477,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) if (total_count < old_count || total_count < md_count) total_count = (uint)-1; C->set_trap_count(reason, total_count); - if (log != NULL) + if (log != nullptr) log->elem("observe trap='%s' count='%d' total='%d'", Deoptimization::trap_reason_name(reason), md_count, total_count); @@ -486,11 +486,11 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) // Accumulate total sum of decompilations, also. C->set_decompile_count(C->decompile_count() + md->decompile_count()); - if (log != NULL && method()->has_exception_handlers()) { + if (log != nullptr && method()->has_exception_handlers()) { log->elem("observe that='has_exception_handlers'"); } - assert(InlineTree::check_can_parse(method()) == NULL, "Can not parse this method, cutout earlier"); + assert(InlineTree::check_can_parse(method()) == nullptr, "Can not parse this method, cutout earlier"); assert(method()->has_balanced_monitors(), "Can not parse unbalanced monitors, cutout earlier"); // Always register dependence if JVMTI is enabled, because @@ -556,7 +556,7 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses) SafePointNode* entry_map = create_entry_map(); // Check for bailouts during map initialization - if (failing() || entry_map == NULL) { + if (failing() || entry_map == nullptr) { if (log) log->done("parse"); return; } @@ -812,7 +812,7 @@ void Parse::build_exits() { //----------------------------build_start_state------------------------------- // Construct a state which contains only the incoming arguments from an -// unknown caller. The method & bci will be NULL & InvocationEntryBci. +// unknown caller. The method & bci will be null & InvocationEntryBci. JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { int arg_size = tf->domain()->cnt(); int max_size = MAX2(arg_size, (int)tf->range()->cnt()); @@ -821,7 +821,7 @@ JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { record_for_igvn(map); assert(arg_size == TypeFunc::Parms + (is_osr_compilation() ? 1 : method()->arg_size()), "correct arg_size"); Node_Notes* old_nn = default_node_notes(); - if (old_nn != NULL && has_method()) { + if (old_nn != nullptr && has_method()) { Node_Notes* entry_nn = old_nn->clone(this); JVMState* entry_jvms = new(this) JVMState(method(), old_nn->jvms()); entry_jvms->set_offsets(0); @@ -847,7 +847,7 @@ JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) { //-----------------------------make_node_notes--------------------------------- Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) { - if (caller_nn == NULL) return NULL; + if (caller_nn == nullptr) return nullptr; Node_Notes* nn = caller_nn->clone(C); JVMState* caller_jvms = nn->jvms(); JVMState* jvms = new (C) JVMState(method(), caller_jvms); @@ -909,14 +909,14 @@ void Parse::do_exceptions() { if (failing()) { // Pop them all off and throw them away. - while (pop_exception_state() != NULL) ; + while (pop_exception_state() != nullptr) ; return; } PreserveJVMState pjvms(this, false); SafePointNode* ex_map; - while ((ex_map = pop_exception_state()) != NULL) { + while ((ex_map = pop_exception_state()) != nullptr) { if (!method()->has_exception_handlers()) { // Common case: Transfer control outward. // Doing it this early allows the exceptions to common up @@ -1069,7 +1069,7 @@ void Parse::do_exits() { SafePointNode* normal_map = kit.map(); // keep this guy safe // Now re-collect the exceptions into _exits: SafePointNode* ex_map; - while ((ex_map = kit.pop_exception_state()) != NULL) { + while ((ex_map = kit.pop_exception_state()) != nullptr) { Node* ex_oop = kit.use_exception_state(ex_map); // Force the exiting JVM state to have this method at InvocationEntryBci. // The exiting JVM state is otherwise a copy of the calling JVMS. @@ -1104,7 +1104,7 @@ void Parse::do_exits() { // Capture very early exceptions (receiver null checks) from caller JVMS GraphKit caller(_caller); SafePointNode* ex_map; - while ((ex_map = caller.pop_exception_state()) != NULL) { + while ((ex_map = caller.pop_exception_state()) != nullptr) { _exits.add_exception_state(ex_map); } } @@ -1120,7 +1120,7 @@ SafePointNode* Parse::create_entry_map() { uint len = TypeFunc::Parms + method()->max_locals() + method()->max_stack(); if (len >= 32760) { C->record_method_not_compilable("too many local variables"); - return NULL; + return nullptr; } // clear current replaced nodes that are of no use from here on (map was cloned in build_exits). @@ -1134,21 +1134,21 @@ SafePointNode* Parse::create_entry_map() { if (kit.stopped()) { _exits.add_exception_states_from(_caller); _exits.set_jvms(_caller); - return NULL; + return nullptr; } } - assert(method() != NULL, "parser must have a method"); + assert(method() != nullptr, "parser must have a method"); // Create an initial safepoint to hold JVM state during parsing - JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : NULL); + JVMState* jvms = new (C) JVMState(method(), _caller->has_method() ? _caller : nullptr); set_map(new SafePointNode(len, jvms)); jvms->set_map(map()); record_for_igvn(map()); assert(jvms->endoff() == len, "correct jvms sizing"); SafePointNode* inmap = _caller->map(); - assert(inmap != NULL, "must have inmap"); + assert(inmap != nullptr, "must have inmap"); // In case of null check on receiver above map()->transfer_replaced_nodes_from(inmap, _new_idx); @@ -1204,7 +1204,7 @@ void Parse::do_method_entry() { Node* receiver_obj = local(0); const TypeInstPtr* receiver_type = _gvn.type(receiver_obj)->isa_instptr(); - if (receiver_type != NULL && !receiver_type->higher_equal(holder_type)) { + if (receiver_type != nullptr && !receiver_type->higher_equal(holder_type)) { // Receiver should always be a subtype of callee holder. // But, since C2 type system doesn't properly track interfaces, // the invariant can't be expressed in the type system for default methods. @@ -1234,7 +1234,7 @@ void Parse::do_method_entry() { // FastLockNode becomes the new control parent to pin it to the start. // Setup Object Pointer - Node *lock_obj = NULL; + Node *lock_obj = nullptr; if (method()->is_static()) { ciInstance* mirror = _method->holder()->java_mirror(); const TypeInstPtr *t_lock = TypeInstPtr::make(mirror); @@ -1282,11 +1282,11 @@ Parse::Block::Block(Parse* outer, int rpo) : _live_locals() { _is_parsed = false; _is_handler = false; _has_merged_backedge = false; - _start_map = NULL; + _start_map = nullptr; _has_predicates = false; _num_successors = 0; _all_successors = 0; - _successors = NULL; + _successors = nullptr; assert(pred_count() == 0 && preds_parsed() == 0, "sanity"); assert(!(is_merged() || is_parsed() || is_handler() || has_merged_backedge()), "sanity"); assert(_live_locals.size() == 0, "sanity"); @@ -1305,7 +1305,7 @@ void Parse::Block::init_graph(Parse* outer) { int ne = tfe->length(); _num_successors = ns; _all_successors = ns+ne; - _successors = (ns+ne == 0) ? NULL : NEW_RESOURCE_ARRAY(Block*, ns+ne); + _successors = (ns+ne == 0) ? nullptr : NEW_RESOURCE_ARRAY(Block*, ns+ne); int p = 0; for (int i = 0; i < ns+ne; i++) { ciTypeFlow::Block* tf2 = (i < ns) ? tfs->at(i) : tfe->at(i-ns); @@ -1348,7 +1348,7 @@ Parse::Block* Parse::Block::successor_for_bci(int bci) { // of bytecodes. For example, "obj.field = null" is executable even // if the field's type is an unloaded class; the flow pass used to // make a trap for such code. - return NULL; + return nullptr; } @@ -1533,7 +1533,7 @@ void Parse::do_one_block() { } assert(bci() < block()->limit(), "bci still in block"); - if (log != NULL) { + if (log != nullptr) { // Output an optional context marker, to help place actions // that occur during parsing of this BC. If there is no log // output until the next context string, this context string @@ -1568,7 +1568,7 @@ void Parse::do_one_block() { NOT_PRODUCT( parse_histogram()->record_change(); ); - if (log != NULL) + if (log != nullptr) log->clear_context(); // skip marker if nothing was printed // Fall into next bytecode. Each bytecode normally has 1 sequential @@ -1583,7 +1583,7 @@ void Parse::do_one_block() { void Parse::set_parse_bci(int bci) { set_bci(bci); Node_Notes* nn = C->default_node_notes(); - if (nn == NULL) return; + if (nn == nullptr) return; // Collect debug info for inlined calls unless -XX:-DebugInlinedCalls. if (!DebugInlinedCalls && depth() > 1) { @@ -1592,7 +1592,7 @@ void Parse::set_parse_bci(int bci) { // Update the JVMS annotation, if present. JVMState* jvms = nn->jvms(); - if (jvms != NULL && jvms->bci() != bci) { + if (jvms != nullptr && jvms->bci() != bci) { // Update the JVMS. jvms = jvms->clone_shallow(C); jvms->set_bci(bci); @@ -1604,7 +1604,7 @@ void Parse::set_parse_bci(int bci) { // Merge the current mapping into the basic block starting at bci void Parse::merge(int target_bci) { Block* target = successor_for_bci(target_bci); - if (target == NULL) { handle_missing_successor(target_bci); return; } + if (target == nullptr) { handle_missing_successor(target_bci); return; } assert(!target->is_ready(), "our arrival must be expected"); int pnum = target->next_path_num(); merge_common(target, pnum); @@ -1614,7 +1614,7 @@ void Parse::merge(int target_bci) { // Merge the current mapping into the basic block, using a new path void Parse::merge_new_path(int target_bci) { Block* target = successor_for_bci(target_bci); - if (target == NULL) { handle_missing_successor(target_bci); return; } + if (target == nullptr) { handle_missing_successor(target_bci); return; } assert(!target->is_ready(), "new path into frozen graph"); int pnum = target->add_new_path(); merge_common(target, pnum); @@ -1631,7 +1631,7 @@ void Parse::merge_exception(int target_bci) { #endif assert(sp() == 1, "must have only the throw exception on the stack"); Block* target = successor_for_bci(target_bci); - if (target == NULL) { handle_missing_successor(target_bci); return; } + if (target == nullptr) { handle_missing_successor(target_bci); return; } assert(target->is_handler(), "exceptions are handled by special blocks"); int pnum = target->add_new_path(); merge_common(target, pnum); @@ -1692,8 +1692,8 @@ void Parse::merge_common(Parse::Block* target, int pnum) { RegionNode *r = new RegionNode(edges+1); gvn().set_type(r, Type::CONTROL); record_for_igvn(r); - // zap all inputs to NULL for debugging (done in Node(uint) constructor) - // for (int j = 1; j < edges+1; j++) { r->init_req(j, NULL); } + // zap all inputs to null for debugging (done in Node(uint) constructor) + // for (int j = 1; j < edges+1; j++) { r->init_req(j, nullptr); } r->init_req(pnum, control()); set_control(r); target->copy_irreducible_status_to(r, jvms()); @@ -1753,7 +1753,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) { if (m->is_Phi() && m->as_Phi()->region() == r) phi = m->as_Phi(); else - phi = NULL; + phi = nullptr; if (m != n) { // Different; must merge switch (j) { // Frame pointer and Return Address never changes @@ -1761,11 +1761,11 @@ void Parse::merge_common(Parse::Block* target, int pnum) { case TypeFunc::ReturnAdr: break; case TypeFunc::Memory: // Merge inputs to the MergeMem node - assert(phi == NULL, "the merge contains phis, not vice versa"); + assert(phi == nullptr, "the merge contains phis, not vice versa"); merge_memory_edges(n->as_MergeMem(), pnum, nophi); continue; default: // All normal stuff - if (phi == NULL) { + if (phi == nullptr) { const JVMState* jvms = map()->jvms(); if (EliminateNestedLocks && jvms->is_mon(j) && jvms->is_monitor_box(j)) { @@ -1787,7 +1787,7 @@ void Parse::merge_common(Parse::Block* target, int pnum) { // - the corresponding control edges is top (a dead incoming path) // It is a bug if we create a phi which sees a garbage value on a live path. - if (phi != NULL) { + if (phi != nullptr) { assert(n != top() || r->in(pnum) == top(), "live value must not be garbage"); assert(phi->region() == r, ""); phi->set_req(pnum, n); // Then add 'n' to the merge @@ -1833,15 +1833,15 @@ void Parse::merge_common(Parse::Block* target, int pnum) { //--------------------------merge_memory_edges--------------------------------- void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { // (nophi means we must not create phis, because we already parsed here) - assert(n != NULL, ""); + assert(n != nullptr, ""); // Merge the inputs to the MergeMems MergeMemNode* m = merged_memory(); assert(control()->is_Region(), "must be merging to a region"); RegionNode* r = control()->as_Region(); - PhiNode* base = NULL; - MergeMemNode* remerge = NULL; + PhiNode* base = nullptr; + MergeMemNode* remerge = nullptr; for (MergeMemStream mms(m, n); mms.next_non_empty2(); ) { Node *p = mms.force_memory(); Node *q = mms.memory2(); @@ -1849,9 +1849,9 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { // Trouble: No new splits allowed after a loop body is parsed. // Instead, wire the new split into a MergeMem on the backedge. // The optimizer will sort it out, slicing the phi. - if (remerge == NULL) { - guarantee(base != NULL, ""); - assert(base->in(0) != NULL, "should not be xformed away"); + if (remerge == nullptr) { + guarantee(base != nullptr, ""); + assert(base->in(0) != nullptr, "should not be xformed away"); remerge = MergeMemNode::make(base->in(pnum)); gvn().set_type(remerge, Type::MEMORY); base->set_req(pnum, remerge); @@ -1867,10 +1867,10 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { if (p->is_Phi() && p->as_Phi()->region() == r) phi = p->as_Phi(); else - phi = NULL; + phi = nullptr; } // Insert q into local phi - if (phi != NULL) { + if (phi != nullptr) { assert(phi->region() == r, ""); p = phi; phi->set_req(pnum, q); @@ -1884,7 +1884,7 @@ void Parse::merge_memory_edges(MergeMemNode* n, int pnum, bool nophi) { } } // Transform base last, in case we must fiddle with remerging. - if (base != NULL && pnum == 1) { + if (base != nullptr && pnum == 1) { record_for_igvn(base); m->set_base_memory( _gvn.transform_no_reclaim(base) ); } @@ -1943,7 +1943,7 @@ int Parse::Block::add_new_path() { // Add new path to the region. uint pnum = r->req(); - r->add_req(NULL); + r->add_req(nullptr); for (uint i = 1; i < map->req(); i++) { Node* n = map->in(i); @@ -1953,13 +1953,13 @@ int Parse::Block::add_new_path() { Node* phi = mms.memory(); if (phi->is_Phi() && phi->as_Phi()->region() == r) { assert(phi->req() == pnum, "must be same size as region"); - phi->add_req(NULL); + phi->add_req(nullptr); } } } else { if (n->is_Phi() && n->as_Phi()->region() == r) { assert(n->req() == pnum, "must be same size as region"); - n->add_req(NULL); + n->add_req(nullptr); } } } @@ -1975,9 +1975,9 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) { assert(region->is_Region(), ""); Node* o = map->in(idx); - assert(o != NULL, ""); + assert(o != nullptr, ""); - if (o == top()) return NULL; // TOP always merges into TOP + if (o == top()) return nullptr; // TOP always merges into TOP if (o->is_Phi() && o->as_Phi()->region() == region) { return o->as_Phi(); @@ -1986,7 +1986,7 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) { // Now use a Phi here for merging assert(!nocreate, "Cannot build a phi for a block already parsed."); const JVMState* jvms = map->jvms(); - const Type* t = NULL; + const Type* t = nullptr; if (jvms->is_loc(idx)) { t = block()->local_type_at(idx - jvms->locoff()); } else if (jvms->is_stk(idx)) { @@ -2005,14 +2005,14 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) { // makes it go dead. if (t == Type::BOTTOM) { map->set_req(idx, top()); - return NULL; + return nullptr; } // Do not create phis for top either. // A top on a non-null control flow must be an unused even after the.phi. if (t == Type::TOP || t == Type::HALF) { map->set_req(idx, top()); - return NULL; + return nullptr; } PhiNode* phi = PhiNode::make(region, o, t); @@ -2030,7 +2030,7 @@ PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) { assert(region->is_Region(), ""); Node *o = (idx == Compile::AliasIdxBot)? mem->base_memory(): mem->memory_at(idx); - assert(o != NULL && o != top(), ""); + assert(o != nullptr && o != top(), ""); PhiNode* phi; if (o->is_Phi() && o->as_Phi()->region() == region) { @@ -2064,11 +2064,11 @@ PhiNode *Parse::ensure_memory_phi(int idx, bool nocreate) { // class need finalization. void Parse::call_register_finalizer() { Node* receiver = local(0); - assert(receiver != NULL && receiver->bottom_type()->isa_instptr() != NULL, + assert(receiver != nullptr && receiver->bottom_type()->isa_instptr() != nullptr, "must have non-null instance type"); const TypeInstPtr *tinst = receiver->bottom_type()->isa_instptr(); - if (tinst != NULL && tinst->is_loaded() && !tinst->klass_is_exact()) { + if (tinst != nullptr && tinst->is_loaded() && !tinst->klass_is_exact()) { // The type isn't known exactly so see if CHA tells us anything. ciInstanceKlass* ik = tinst->instance_klass(); if (!Dependencies::has_finalizable_subclass(ik)) { @@ -2082,10 +2082,10 @@ void Parse::call_register_finalizer() { // finalization. In general this will fold up since the concrete // class is often visible so the access flags are constant. Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() ); - Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); + Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), klass_addr, TypeInstPtr::KLASS)); Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset())); - Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); + Node* access_flags = make_load(nullptr, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered); Node* mask = _gvn.transform(new AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER))); Node* check = _gvn.transform(new CmpINode(mask, intcon(0))); @@ -2108,7 +2108,7 @@ void Parse::call_register_finalizer() { Node *call = make_runtime_call(RC_NO_LEAF, OptoRuntime::register_finalizer_Type(), OptoRuntime::register_finalizer_Java(), - NULL, TypePtr::BOTTOM, + nullptr, TypePtr::BOTTOM, receiver); make_slow_call_ex(call, env()->Throwable_klass(), true); @@ -2212,7 +2212,7 @@ void Parse::return_current(Node* value) { } // frame pointer is always same, already captured - if (value != NULL) { + if (value != nullptr) { // If returning oops to an interface-return, there is a silent free // cast from oop to interface allowed by the Verifier. Make it explicit // here. @@ -2239,7 +2239,7 @@ void Parse::add_safepoint() { kill_dead_locals(); // Clone the JVM State - SafePointNode *sfpnt = new SafePointNode(parms, NULL); + SafePointNode *sfpnt = new SafePointNode(parms, nullptr); // Capture memory state BEFORE a SafePoint. Since we can block at a // SafePoint we need our GC state to be safe; i.e. we need all our current @@ -2281,7 +2281,7 @@ void Parse::add_safepoint() { // Provide an edge from root to safepoint. This makes the safepoint // appear useful until the parse has completed. if (transformed_sfpnt->is_SafePoint()) { - assert(C->root() != NULL, "Expect parse is still valid"); + assert(C->root() != nullptr, "Expect parse is still valid"); C->root()->add_prec(transformed_sfpnt); } } @@ -2289,8 +2289,8 @@ void Parse::add_safepoint() { #ifndef PRODUCT //------------------------show_parse_info-------------------------------------- void Parse::show_parse_info() { - InlineTree* ilt = NULL; - if (C->ilt() != NULL) { + InlineTree* ilt = nullptr; + if (C->ilt() != nullptr) { JVMState* caller_jvms = is_osr_parse() ? caller()->caller() : caller(); ilt = InlineTree::find_subtree_from_root(C->ilt(), caller_jvms, method()); } @@ -2355,7 +2355,7 @@ void Parse::show_parse_info() { //------------------------------dump------------------------------------------- // Dump information associated with the bytecodes of current _method void Parse::dump() { - if( method() != NULL ) { + if( method() != nullptr ) { // Iterate over bytecodes ciBytecodeStream iter(method()); for( Bytecodes::Code bc = iter.next(); bc != ciBytecodeStream::EOBC() ; bc = iter.next() ) { diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp index 4bf48143f00..a1cf2c3508e 100644 --- a/src/hotspot/share/opto/parse2.cpp +++ b/src/hotspot/share/opto/parse2.cpp @@ -140,7 +140,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) { bool need_range_check = true; if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { need_range_check = false; - if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); + if (C->log() != nullptr) C->log()->elem("observe that='!need_range_check'"); } if (!arytype->is_loaded()) { @@ -190,7 +190,7 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) { // See IfNode::Ideal, is_range_check, adjust_check. uncommon_trap(Deoptimization::Reason_range_check, Deoptimization::Action_make_not_entrant, - NULL, "range_check"); + nullptr, "range_check"); } else { // If we have already recompiled with the range-check-widening // heroic optimization turned off, then we must really be throwing @@ -234,7 +234,7 @@ void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) { repush_if_args(); uncommon_trap(Deoptimization::Reason_unstable_if, Deoptimization::Action_reinterpret, - NULL, + nullptr, "taken always"); } else { assert(dest_bci_if_true != never_reached, "inconsistent dest"); @@ -256,7 +256,7 @@ void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) { repush_if_args(); uncommon_trap(Deoptimization::Reason_unstable_if, Deoptimization::Action_reinterpret, - NULL, + nullptr, "taken never"); } else { assert(dest_bci_if_true != never_reached, "inconsistent dest"); @@ -275,7 +275,7 @@ void Parse::jump_if_always_fork(int dest_bci, bool unc) { repush_if_args(); uncommon_trap(Deoptimization::Reason_unstable_if, Deoptimization::Action_reinterpret, - NULL, + nullptr, "taken never"); } else { assert(dest_bci != never_reached, "inconsistent dest"); @@ -424,10 +424,10 @@ void Parse::do_tableswitch() { } ciMethodData* methodData = method()->method_data(); - ciMultiBranchData* profile = NULL; + ciMultiBranchData* profile = nullptr; if (methodData->is_mature() && UseSwitchProfiling) { ciProfileData* data = methodData->bci_to_data(bci()); - if (data != NULL && data->is_MultiBranchData()) { + if (data != nullptr && data->is_MultiBranchData()) { profile = (ciMultiBranchData*)data; } } @@ -440,7 +440,7 @@ void Parse::do_tableswitch() { int rp = -1; if (lo_index != min_jint) { float cnt = 1.0F; - if (profile != NULL) { + if (profile != nullptr) { cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F); } ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt); @@ -450,7 +450,7 @@ void Parse::do_tableswitch() { int dest = iter().get_dest_table(j+3); makes_backward_branch |= (dest <= bci()); float cnt = 1.0F; - if (profile != NULL) { + if (profile != nullptr) { cnt = (float)profile->count_at(j); } if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) { @@ -461,7 +461,7 @@ void Parse::do_tableswitch() { assert(ranges[rp].hi() == highest, ""); if (highest != max_jint) { float cnt = 1.0F; - if (profile != NULL) { + if (profile != nullptr) { cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F); } if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) { @@ -498,10 +498,10 @@ void Parse::do_lookupswitch() { } ciMethodData* methodData = method()->method_data(); - ciMultiBranchData* profile = NULL; + ciMultiBranchData* profile = nullptr; if (methodData->is_mature() && UseSwitchProfiling) { ciProfileData* data = methodData->bci_to_data(bci()); - if (data != NULL && data->is_MultiBranchData()) { + if (data != nullptr && data->is_MultiBranchData()) { profile = (ciMultiBranchData*)data; } } @@ -514,13 +514,13 @@ void Parse::do_lookupswitch() { table[3*j+0] = iter().get_int_table(2+2*j); table[3*j+1] = iter().get_dest_table(2+2*j+1); // Handle overflow when converting from uint to jint - table[3*j+2] = (profile == NULL) ? 1 : (jint)MIN2((uint)max_jint, profile->count_at(j)); + table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2((uint)max_jint, profile->count_at(j)); } qsort(table, len, 3*sizeof(table[0]), jint_cmp); } float default_cnt = 1.0F; - if (profile != NULL) { + if (profile != nullptr) { juint defaults = max_juint - len; default_cnt = (float)profile->default_count()/(float)defaults; } @@ -605,12 +605,12 @@ public: } _state; SwitchRanges(SwitchRange *lo, SwitchRange *hi) - : _lo(lo), _hi(hi), _mid(NULL), + : _lo(lo), _hi(hi), _mid(nullptr), _cost(0), _state(Start) { } SwitchRanges() - : _lo(NULL), _hi(NULL), _mid(NULL), + : _lo(nullptr), _hi(nullptr), _mid(nullptr), _cost(0), _state(Start) {} }; @@ -624,7 +624,7 @@ static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt do { SwitchRanges& r = *tree.adr_at(tree.length()-1); if (r._hi != r._lo) { - if (r._mid == NULL) { + if (r._mid == nullptr) { float r_cnt = sum_of_cnts(r._lo, r._hi); if (r_cnt == 0) { @@ -633,7 +633,7 @@ static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt continue; } - SwitchRange* mid = NULL; + SwitchRange* mid = nullptr; mid = r._lo; for (float cnt = 0; ; ) { assert(mid <= r._hi, "out of bounds"); @@ -682,7 +682,7 @@ void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchR SwitchRange* array1 = lo; SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); - SwitchRange* ranges = NULL; + SwitchRange* ranges = nullptr; while (nr >= 2) { assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); @@ -878,15 +878,15 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) } ciMethodData* methodData = method()->method_data(); - ciMultiBranchData* profile = NULL; + ciMultiBranchData* profile = nullptr; if (methodData->is_mature()) { ciProfileData* data = methodData->bci_to_data(bci()); - if (data != NULL && data->is_MultiBranchData()) { + if (data != nullptr && data->is_MultiBranchData()) { profile = (ciMultiBranchData*)data; } } - Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); + Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total)); // These are the switch destinations hanging off the jumpnode i = 0; @@ -940,7 +940,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, jint min_val = min_jint; jint max_val = max_jint; const TypeInt* ti = key_val->bottom_type()->isa_int(); - if (ti != NULL) { + if (ti != nullptr) { min_val = ti->_lo; max_val = ti->_hi; assert(min_val <= max_val, "invalid int type"); @@ -977,7 +977,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, if (create_jump_tables(key_val, lo, hi)) return; - SwitchRange* mid = NULL; + SwitchRange* mid = nullptr; float total_cnt = sum_of_cnts(lo, hi); int nr = hi - lo + 1; @@ -1101,7 +1101,7 @@ void Parse::modf() { Node *f1 = pop(); Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::frem), - "frem", NULL, //no memory effects + "frem", nullptr, //no memory effects f1, f2); Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); @@ -1113,7 +1113,7 @@ void Parse::modd() { Node *d1 = pop_pair(); Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::drem), - "drem", NULL, //no memory effects + "drem", nullptr, //no memory effects d1, top(), d2, top()); Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); @@ -1130,7 +1130,7 @@ void Parse::l2f() { Node* f1 = pop(); Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::l2f), - "l2f", NULL, //no memory effects + "l2f", nullptr, //no memory effects f1, f2); Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); @@ -1212,7 +1212,7 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t ciMethodData* methodData = method()->method_data(); if (!methodData->is_mature()) return PROB_UNKNOWN; ciProfileData* data = methodData->bci_to_data(bci()); - if (data == NULL) { + if (data == nullptr) { return PROB_UNKNOWN; } if (!data->is_JumpData()) return PROB_UNKNOWN; @@ -1232,7 +1232,7 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. // We also check that individual counters are positive first, otherwise the sum can become positive. if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); } return PROB_UNKNOWN; @@ -1262,12 +1262,12 @@ float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* t assert((cnt > 0.0f) && (prob > 0.0f), "Bad frequency assignment in if"); - if (C->log() != NULL) { - const char* prob_str = NULL; + if (C->log() != nullptr) { + const char* prob_str = nullptr; if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; char prob_str_buf[30]; - if (prob_str == NULL) { + if (prob_str == nullptr) { jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); prob_str = prob_str_buf; } @@ -1306,7 +1306,7 @@ float Parse::branch_prediction(float& cnt, // of the OSR-ed method, and we want to deopt to gather more stats. // If you have ANY counts, then this loop is simply 'cold' relative // to the OSR loop. - if (data == NULL || + if (data == nullptr || (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { // This is the only way to return PROB_UNKNOWN: return PROB_UNKNOWN; @@ -1356,8 +1356,8 @@ inline int Parse::repush_if_args() { int bc_depth = - Bytecodes::depth(iter().cur_bc()); assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms - assert(argument(0) != NULL, "must exist"); - assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); + assert(argument(0) != nullptr, "must exist"); + assert(bc_depth == 1 || argument(1) != nullptr, "two must exist"); inc_sp(bc_depth); return bc_depth; } @@ -1379,7 +1379,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) { repush_if_args(); // to gather stats on loop uncommon_trap(Deoptimization::Reason_unreached, Deoptimization::Action_reinterpret, - NULL, "cold"); + nullptr, "cold"); if (C->eliminate_boxing()) { // Mark the successor blocks as parsed branch_block->next_path_num(); @@ -1450,7 +1450,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) { repush_if_args(); // to gather stats on loop uncommon_trap(Deoptimization::Reason_unreached, Deoptimization::Action_reinterpret, - NULL, "cold"); + nullptr, "cold"); if (C->eliminate_boxing()) { // Mark the successor blocks as parsed branch_block->next_path_num(); @@ -1480,7 +1480,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) { if (tst->is_Bool()) { // Refresh c from the transformed bool node, since it may be // simpler than the original c. Also re-canonicalize btest. - // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). + // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)). // That can arise from statements like: if (x instanceof C) ... if (tst != tst0) { // Canonicalize one more time since transform can change it. @@ -1585,7 +1585,7 @@ void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block repush_if_args(); Node* call = uncommon_trap(Deoptimization::Reason_unstable_if, Deoptimization::Action_reinterpret, - NULL, + nullptr, (is_fallthrough ? "taken always" : "taken never")); if (call != nullptr) { @@ -1627,25 +1627,25 @@ static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { Node* ldk; if (n->is_DecodeNKlass()) { if (n->in(1)->Opcode() != Op_LoadNKlass) { - return NULL; + return nullptr; } else { ldk = n->in(1); } } else if (n->Opcode() != Op_LoadKlass) { - return NULL; + return nullptr; } else { ldk = n; } - assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); + assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); Node* adr = ldk->in(MemNode::Address); intptr_t off = 0; Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); - if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? - return NULL; + if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? + return nullptr; const TypePtr* tp = gvn->type(obj)->is_ptr(); - if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? - return NULL; + if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? + return nullptr; return obj; } @@ -1658,13 +1658,13 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest, if (btest == BoolTest::eq && tcon->isa_klassptr()) { Node* obj = extract_obj_from_klass_load(&_gvn, val); const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); - if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { + if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) { // Found: // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) // or the narrowOop equivalent. const Type* obj_type = _gvn.type(obj); const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); - if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && + if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type && tboth->higher_equal(obj_type)) { // obj has to be of the exact type Foo if the CmpP succeeds. int obj_in_map = map()->find_edge(obj); @@ -1697,8 +1697,8 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest, // Check for a comparison to a constant, and "know" that the compared // value is constrained on this path. assert(tcon->singleton(), ""); - ConstraintCastNode* ccast = NULL; - Node* cast = NULL; + ConstraintCastNode* ccast = nullptr; + Node* cast = nullptr; switch (btest) { case BoolTest::eq: // Constant test? @@ -1735,7 +1735,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest, break; } - if (ccast != NULL) { + if (ccast != nullptr) { const Type* tcc = ccast->as_Type()->type(); assert(tcc != tval && tcc->higher_equal(tval), "must improve"); // Delay transform() call to allow recovery of pre-cast value @@ -1746,7 +1746,7 @@ void Parse::sharpen_type_after_if(BoolTest::mask btest, cast = ccast; } - if (cast != NULL) { // Here's the payoff. + if (cast != nullptr) { // Here's the payoff. replace_in_map(val, cast); } } @@ -1766,8 +1766,8 @@ Node* Parse::optimize_cmp_with_klass(Node* c) { if (c->Opcode() == Op_CmpP && (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && c->in(2)->is_Con()) { - Node* load_klass = NULL; - Node* decode = NULL; + Node* load_klass = nullptr; + Node* decode = nullptr; if (c->in(1)->Opcode() == Op_DecodeNKlass) { decode = c->in(1); load_klass = c->in(1)->in(1); @@ -1778,7 +1778,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) { Node* addp = load_klass->in(2); Node* obj = addp->in(AddPNode::Address); const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); - if (obj_type->speculative_type_not_null() != NULL) { + if (obj_type->speculative_type_not_null() != nullptr) { ciKlass* k = obj_type->speculative_type(); inc_sp(2); obj = maybe_cast_profiled_obj(obj, k); @@ -1788,7 +1788,7 @@ Node* Parse::optimize_cmp_with_klass(Node* c) { load_klass = load_klass->clone(); load_klass->set_req(2, addp); load_klass = _gvn.transform(load_klass); - if (decode != NULL) { + if (decode != nullptr) { decode = decode->clone(); decode->set_req(1, load_klass); load_klass = _gvn.transform(decode); @@ -1875,7 +1875,7 @@ void Parse::do_one_bytecode() { ciConstant constant = iter().get_constant(); if (constant.is_loaded()) { const Type* con_type = Type::make_from_constant(constant); - if (con_type != NULL) { + if (con_type != nullptr) { push_node(con_type->basic_type(), makecon(con_type)); } } else { @@ -1883,14 +1883,14 @@ void Parse::do_one_bytecode() { if (iter().is_in_error()) { uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unhandled, Deoptimization::Action_none), - NULL, "constant in error state", true /* must_throw */); + nullptr, "constant in error state", true /* must_throw */); } else { int index = iter().get_constant_pool_index(); uncommon_trap(Deoptimization::make_trap_request(Deoptimization::Reason_unloaded, Deoptimization::Action_reinterpret, index), - NULL, "unresolved constant", false /* must_throw */); + nullptr, "unresolved constant", false /* must_throw */); } } break; @@ -2531,17 +2531,17 @@ void Parse::do_one_bytecode() { case Bytecodes::_i2b: // Sign extend a = pop(); - a = Compile::narrow_value(T_BYTE, a, NULL, &_gvn, true); + a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true); push(a); break; case Bytecodes::_i2s: a = pop(); - a = Compile::narrow_value(T_SHORT, a, NULL, &_gvn, true); + a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true); push(a); break; case Bytecodes::_i2c: a = pop(); - a = Compile::narrow_value(T_CHAR, a, NULL, &_gvn, true); + a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true); push(a); break; @@ -2565,7 +2565,7 @@ void Parse::do_one_bytecode() { // Exit points of synchronized methods must have an unlock node case Bytecodes::_return: - return_current(NULL); + return_current(nullptr); break; case Bytecodes::_ireturn: @@ -2581,7 +2581,7 @@ void Parse::do_one_bytecode() { break; case Bytecodes::_athrow: - // null exception oop throws NULL pointer exception + // null exception oop throws null pointer exception null_check(peek()); if (stopped()) return; // Hook the thrown exception directly to subsequent handlers. @@ -2615,7 +2615,7 @@ void Parse::do_one_bytecode() { ciMethodData* methodData = method()->method_data(); if (!methodData->is_mature()) break; ciProfileData* data = methodData->bci_to_data(bci()); - assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); + assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch"); int taken = ((ciJumpData*)data)->taken(); taken = method()->scale_count(taken); target_block->set_count(taken); diff --git a/src/hotspot/share/opto/parse3.cpp b/src/hotspot/share/opto/parse3.cpp index 1f8f476ba48..8bb55eed62d 100644 --- a/src/hotspot/share/opto/parse3.cpp +++ b/src/hotspot/share/opto/parse3.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ void Parse::do_field_access(bool is_get, bool is_field) { !(method()->holder() == field_holder && method()->is_object_initializer())) { uncommon_trap(Deoptimization::Reason_unhandled, Deoptimization::Action_reinterpret, - NULL, "put to call site target field"); + nullptr, "put to call site target field"); return; } @@ -118,7 +118,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { (bt != T_OBJECT || field->type()->is_loaded())) { // final or stable field Node* con = make_constant_from_field(field, obj); - if (con != NULL) { + if (con != nullptr) { push_node(field->layout_type(), con); return; } @@ -156,7 +156,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { } else { type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); } - assert(type != NULL, "field singleton type must be consistent"); + assert(type != nullptr, "field singleton type must be consistent"); } else { type = TypeOopPtr::make_from_klass(field_klass->as_klass()); } @@ -186,7 +186,7 @@ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) { if (PrintOpto && (Verbose || WizardMode)) { method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci()); } - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("assert_null reason='field' klass='%d'", C->log()->identify(field->type())); } @@ -242,7 +242,7 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { // Any method can write a @Stable field; insert memory barriers after those also. if (field->is_final()) { set_wrote_final(true); - if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) { + if (AllocateNode::Ideal_allocation(obj, &_gvn) != nullptr) { // Preserve allocation ptr to create precedent edge to it in membar // generated on exit from constructor. // Can't bind stable with its allocation, only record allocation for final field. @@ -298,7 +298,7 @@ void Parse::do_newarray(BasicType elem_type) { // Also handle the degenerate 1-dimensional case of anewarray. Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) { Node* length = lengths[0]; - assert(length != NULL, ""); + assert(length != nullptr, ""); Node* array = new_array(makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), length, nargs); if (ndimensions > 1) { jint length_con = find_int_con(length, -1); @@ -331,7 +331,7 @@ void Parse::do_multianewarray() { // get the lengths from the stack (first dimension is on top) Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1); - length[ndimensions] = NULL; // terminating null for make_runtime_call + length[ndimensions] = nullptr; // terminating null for make_runtime_call int j; for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop(); @@ -356,7 +356,7 @@ void Parse::do_multianewarray() { // Can use multianewarray instead of [a]newarray if only one dimension, // or if all non-final dimensions are small constants. if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) { - Node* obj = NULL; + Node* obj = nullptr; // Set the original stack and the reexecute bit for the interpreter // to reexecute the multianewarray bytecode if deoptimization happens. // Do it unconditionally even for one dimension multianewarray. @@ -371,7 +371,7 @@ void Parse::do_multianewarray() { return; } - address fun = NULL; + address fun = nullptr; switch (ndimensions) { case 1: ShouldNotReachHere(); break; case 2: fun = OptoRuntime::multianewarray2_Java(); break; @@ -379,19 +379,19 @@ void Parse::do_multianewarray() { case 4: fun = OptoRuntime::multianewarray4_Java(); break; case 5: fun = OptoRuntime::multianewarray5_Java(); break; }; - Node* c = NULL; + Node* c = nullptr; - if (fun != NULL) { + if (fun != nullptr) { c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, OptoRuntime::multianewarray_Type(ndimensions), - fun, NULL, TypeRawPtr::BOTTOM, + fun, nullptr, TypeRawPtr::BOTTOM, makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), length[0], length[1], length[2], - (ndimensions > 2) ? length[3] : NULL, - (ndimensions > 3) ? length[4] : NULL); + (ndimensions > 2) ? length[3] : nullptr, + (ndimensions > 3) ? length[4] : nullptr); } else { // Create a java array for dimension sizes - Node* dims = NULL; + Node* dims = nullptr; { PreserveReexecuteState preexecs(this); inc_sp(ndimensions); Node* dims_array_klass = makecon(TypeKlassPtr::make(ciArrayKlass::make(ciType::make(T_INT)))); @@ -406,7 +406,7 @@ void Parse::do_multianewarray() { c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, OptoRuntime::multianewarrayN_Type(), - OptoRuntime::multianewarrayN_Java(), NULL, TypeRawPtr::BOTTOM, + OptoRuntime::multianewarrayN_Java(), nullptr, TypeRawPtr::BOTTOM, makecon(TypeKlassPtr::make(array_klass, Type::trust_interfaces)), dims); } @@ -421,7 +421,7 @@ void Parse::do_multianewarray() { type = type->is_aryptr()->cast_to_exactness(true); const TypeInt* ltype = _gvn.find_int_type(length[0]); - if (ltype != NULL) + if (ltype != nullptr) type = type->is_aryptr()->cast_to_size(ltype); // We cannot sharpen the nested sub-arrays, since the top level is mutable. diff --git a/src/hotspot/share/opto/parseHelper.cpp b/src/hotspot/share/opto/parseHelper.cpp index 19619595c74..ba4cc612cc3 100644 --- a/src/hotspot/share/opto/parseHelper.cpp +++ b/src/hotspot/share/opto/parseHelper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,11 +69,11 @@ void Parse::do_checkcast() { Node *obj = peek(); // Throw uncommon trap if class is not loaded or the value we are casting - // _from_ is not loaded, and value is not null. If the value _is_ NULL, + // _from_ is not loaded, and value is not null. If the value _is_ null, // then the checkcast does nothing. const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); if (!will_link || (tp && !tp->is_loaded())) { - if (C->log() != NULL) { + if (C->log() != nullptr) { if (!will_link) { C->log()->elem("assert_null reason='checkcast' klass='%d'", C->log()->identify(klass)); @@ -113,7 +113,7 @@ void Parse::do_instanceof() { ciKlass* klass = iter().get_klass(will_link); if (!will_link) { - if (C->log() != NULL) { + if (C->log() != nullptr) { C->log()->elem("assert_null reason='instanceof' klass='%d'", C->log()->identify(klass)); } @@ -157,7 +157,7 @@ void Parse::array_store_check() { int klass_offset = oopDesc::klass_offset_in_bytes(); Node* p = basic_plus_adr( ary, ary, klass_offset ); // p's type is array-of-OOPS plus klass_offset - Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); + Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, nullptr, immutable_memory(), p, TypeInstPtr::KLASS)); // Get the array klass const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); @@ -197,7 +197,7 @@ void Parse::array_store_check() { // Make a constant out of the inexact array klass const TypeKlassPtr *extak = tak->cast_to_exactness(true); - if (extak->exact_klass(true) != NULL) { + if (extak->exact_klass(true) != nullptr) { Node* con = makecon(extak); Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); @@ -213,7 +213,7 @@ void Parse::array_store_check() { // Use the exact constant value we know it is. replace_in_map(array_klass,con); CompileLog* log = C->log(); - if (log != NULL) { + if (log != nullptr) { log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", log->identify(extak->exact_klass())); } @@ -230,7 +230,7 @@ void Parse::array_store_check() { // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, // we must set a control edge from the IfTrue node created by the uncommon_trap above to the // LoadKlassNode. - Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, + Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : nullptr, immutable_memory(), p2, tak)); // Check (the hard way) and throw if not a subklass. @@ -287,8 +287,8 @@ void Parse::do_new() { // Debug dump of the mapping from address types to MergeMemNode indices. void Parse::dump_map_adr_mem() const { tty->print_cr("--- Mapping from address types to memory Nodes ---"); - MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? - map()->memory()->as_MergeMem() : NULL); + MergeMemNode *mem = map() == nullptr ? nullptr : (map()->memory()->is_MergeMem() ? + map()->memory()->as_MergeMem() : nullptr); for (uint i = 0; i < (uint)C->num_alias_types(); i++) { C->alias_type(i)->print_on(tty); tty->print("\t"); diff --git a/src/hotspot/share/opto/phase.cpp b/src/hotspot/share/opto/phase.cpp index 4b0920558a2..90b9a7d527c 100644 --- a/src/hotspot/share/opto/phase.cpp +++ b/src/hotspot/share/opto/phase.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ elapsedTimer Phase::_t_stubCompilation; elapsedTimer Phase::timers[max_phase_timers]; //------------------------------Phase------------------------------------------ -Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) { +Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? nullptr : Compile::current()) { // Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544). // This is an effective place to poll, since the compiler is full of phases. // In particular, every inlining site uses a recursively created Parse phase. diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index ce0502bfb03..b00d5e2d29b 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -56,7 +56,7 @@ NodeHash::NodeHash(uint est_max_size) : #endif { // _sentinel must be in the current node space - _sentinel = new ProjNode(NULL, TypeFunc::Control); + _sentinel = new ProjNode(nullptr, TypeFunc::Control); memset(_table,0,sizeof(Node*)*_max); } @@ -73,7 +73,7 @@ NodeHash::NodeHash(Arena *arena, uint est_max_size) : #endif { // _sentinel must be in the current node space - _sentinel = new ProjNode(NULL, TypeFunc::Control); + _sentinel = new ProjNode(nullptr, TypeFunc::Control); memset(_table,0,sizeof(Node*)*_max); } @@ -99,7 +99,7 @@ Node *NodeHash::hash_find( const Node *n ) { uint hash = n->hash(); if (hash == Node::NO_HASH) { NOT_PRODUCT( _lookup_misses++ ); - return NULL; + return nullptr; } uint key = hash & (_max-1); uint stride = key | 0x01; @@ -107,7 +107,7 @@ Node *NodeHash::hash_find( const Node *n ) { Node *k = _table[key]; // Get hashed value if( !k ) { // ?Miss? NOT_PRODUCT( _lookup_misses++ ); - return NULL; // Miss! + return nullptr; // Miss! } int op = n->Opcode(); @@ -129,11 +129,11 @@ Node *NodeHash::hash_find( const Node *n ) { k = _table[key]; // Get hashed value if( !k ) { // ?Miss? NOT_PRODUCT( _lookup_misses++ ); - return NULL; // Miss! + return nullptr; // Miss! } } ShouldNotReachHere(); - return NULL; + return nullptr; } //------------------------------hash_find_insert------------------------------- @@ -144,7 +144,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { uint hash = n->hash(); if (hash == Node::NO_HASH) { NOT_PRODUCT( _lookup_misses++ ); - return NULL; + return nullptr; } uint key = hash & (_max-1); uint stride = key | 0x01; // stride must be relatively prime to table siz @@ -156,7 +156,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { _table[key] = n; // Insert into table! debug_only(n->enter_hash_lock()); // Lock down the node while in the table. check_grow(); // Grow table if insert hit limit - return NULL; // Miss! + return nullptr; // Miss! } else if( k == _sentinel ) { first_sentinel = key; // Can insert here @@ -185,7 +185,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { _table[key] = n; // Insert into table! debug_only(n->enter_hash_lock()); // Lock down the node while in the table. check_grow(); // Grow table if insert hit limit - return NULL; // Miss! + return nullptr; // Miss! } else if( first_sentinel == 0 && k == _sentinel ) { first_sentinel = key; // Can insert here @@ -193,7 +193,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { } ShouldNotReachHere(); - return NULL; + return nullptr; } //------------------------------hash_insert------------------------------------ @@ -235,7 +235,7 @@ bool NodeHash::hash_delete( const Node *n ) { uint key = hash & (_max-1); uint stride = key | 0x01; debug_only( uint counter = 0; ); - for( ; /* (k != NULL) && (k != _sentinel) */; ) { + for( ; /* (k != nullptr) && (k != _sentinel) */; ) { debug_only( counter++ ); NOT_PRODUCT( _delete_probes++ ); k = _table[key]; // Get hashed value @@ -294,7 +294,7 @@ void NodeHash::grow() { } //------------------------------clear------------------------------------------ -// Clear all entries in _table to NULL but keep storage +// Clear all entries in _table to null but keep storage void NodeHash::clear() { #ifdef ASSERT // Unlock all nodes upon removal from table. @@ -319,7 +319,7 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) { Node *sentinel_node = sentinel(); for( uint i = 0; i < max; ++i ) { Node *n = at(i); - if(n != NULL && n != sentinel_node && !useful.test(n->_idx)) { + if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) { debug_only(n->exit_hash_lock()); // Unlock the node when removed _table[i] = sentinel_node; // Replace with placeholder } @@ -335,7 +335,7 @@ void NodeHash::check_no_speculative_types() { Node *sentinel_node = sentinel(); for (uint i = 0; i < max; ++i) { Node *n = at(i); - if (n != NULL && + if (n != nullptr && n != sentinel_node && n->is_Type() && live_nodes.member(n)) { @@ -379,7 +379,7 @@ Node *NodeHash::find_index(uint idx) { // For debugging if( !m || m == _sentinel ) continue; if( m->_idx == (uint)idx ) return m; } - return NULL; + return nullptr; } #endif @@ -567,7 +567,7 @@ int PhaseRenumberLive::update_embedded_ids(Node* n) { } const Type* type = _new_type_array.fast_lookup(n->_idx); - if (type != NULL && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) { + if (type != nullptr && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) { if (!_is_pass_finished) { return -1; // delay } @@ -595,7 +595,7 @@ PhaseTransform::PhaseTransform( PhaseNumber pnum ) : Phase(pnum), set_allow_progress(true); #endif // Force allocation for currently existing nodes - _types.map(C->unique(), NULL); + _types.map(C->unique(), nullptr); } //------------------------------PhaseTransform--------------------------------- @@ -611,7 +611,7 @@ PhaseTransform::PhaseTransform( Arena *arena, PhaseNumber pnum ) : Phase(pnum), set_allow_progress(true); #endif // Force allocation for currently existing nodes - _types.map(C->unique(), NULL); + _types.map(C->unique(), nullptr); } //------------------------------PhaseTransform--------------------------------- @@ -638,22 +638,22 @@ void PhaseTransform::init_con_caches() { //--------------------------------find_int_type-------------------------------- const TypeInt* PhaseTransform::find_int_type(Node* n) { - if (n == NULL) return NULL; + if (n == nullptr) return nullptr; // Call type_or_null(n) to determine node's type since we might be in // parse phase and call n->Value() may return wrong type. // (For example, a phi node at the beginning of loop parsing is not ready.) const Type* t = type_or_null(n); - if (t == NULL) return NULL; + if (t == nullptr) return nullptr; return t->isa_int(); } //-------------------------------find_long_type-------------------------------- const TypeLong* PhaseTransform::find_long_type(Node* n) { - if (n == NULL) return NULL; + if (n == nullptr) return nullptr; // (See comment above on type_or_null.) const Type* t = type_or_null(n); - if (t == NULL) return NULL; + if (t == nullptr) return nullptr; return t->isa_long(); } @@ -695,7 +695,7 @@ void PhaseTransform::dump_nodes_and_types_recur( const Node *n, uint depth, bool dump_nodes_and_types_recur( n->in(i), depth-1, only_ctrl, visited ); } n->dump(); - if (type_or_null(n) != NULL) { + if (type_or_null(n) != nullptr) { tty->print(" "); type(n)->dump(); tty->cr(); } } @@ -758,10 +758,10 @@ ConNode* PhaseValues::uncached_makecon(const Type *t) { assert(t->singleton(), "must be a constant"); ConNode* x = ConNode::make(t); ConNode* k = (ConNode*)hash_find_insert(x); // Value numbering - if (k == NULL) { + if (k == nullptr) { set_type(x, t); // Missed, provide type mapping GrowableArray* nna = C->node_note_array(); - if (nna != NULL) { + if (nna != nullptr) { Node_Notes* loc = C->locate_node_notes(nna, x->_idx, true); loc->clear(); // do not put debug info on constants } @@ -778,7 +778,7 @@ ConINode* PhaseTransform::intcon(jint i) { // Small integer? Check cache! Check that cached node is not dead if (i >= _icon_min && i <= _icon_max) { ConINode* icon = _icons[i-_icon_min]; - if (icon != NULL && icon->in(TypeFunc::Control) != NULL) + if (icon != nullptr && icon->in(TypeFunc::Control) != nullptr) return icon; } ConINode* icon = (ConINode*) uncached_makecon(TypeInt::make(i)); @@ -794,7 +794,7 @@ ConLNode* PhaseTransform::longcon(jlong l) { // Small integer? Check cache! Check that cached node is not dead if (l >= _lcon_min && l <= _lcon_max) { ConLNode* lcon = _lcons[l-_lcon_min]; - if (lcon != NULL && lcon->in(TypeFunc::Control) != NULL) + if (lcon != nullptr && lcon->in(TypeFunc::Control) != nullptr) return lcon; } ConLNode* lcon = (ConLNode*) uncached_makecon(TypeLong::make(l)); @@ -817,7 +817,7 @@ ConNode* PhaseTransform::integercon(jlong l, BasicType bt) { ConNode* PhaseTransform::zerocon(BasicType bt) { assert((uint)bt <= _zcon_max, "domain check"); ConNode* zcon = _zcons[bt]; - if (zcon != NULL && zcon->in(TypeFunc::Control) != NULL) + if (zcon != nullptr && zcon->in(TypeFunc::Control) != nullptr) return zcon; zcon = (ConNode*) uncached_makecon(Type::get_zero_type(bt)); _zcons[bt] = zcon; @@ -829,7 +829,7 @@ ConNode* PhaseTransform::zerocon(BasicType bt) { //============================================================================= Node* PhaseGVN::apply_ideal(Node* k, bool can_reshape) { Node* i = BarrierSet::barrier_set()->barrier_set_c2()->ideal_node(this, k, can_reshape); - if (i == NULL) { + if (i == nullptr) { i = k->Ideal(this, can_reshape); } return i; @@ -852,7 +852,7 @@ Node *PhaseGVN::transform_no_reclaim(Node *n) { Node* k = n; Node* i = apply_ideal(k, /*can_reshape=*/false); NOT_PRODUCT(uint loop_count = 1;) - while (i != NULL) { + while (i != nullptr) { assert(i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" ); k = i; #ifdef ASSERT @@ -873,11 +873,11 @@ Node *PhaseGVN::transform_no_reclaim(Node *n) { // cache Value. Later requests for the local phase->type of this Node can // use the cached Value instead of suffering with 'bottom_type'. const Type* t = k->Value(this); // Get runtime Value set - assert(t != NULL, "value sanity"); + assert(t != nullptr, "value sanity"); if (type_or_null(k) != t) { #ifndef PRODUCT // Do not count initial visit to node as a transformation - if (type_or_null(k) == NULL) { + if (type_or_null(k) == nullptr) { inc_new_values(); set_progress(); } @@ -923,7 +923,7 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) { while (d != n) { n = IfNode::up_one_dom(n, linear_only); i++; - if (n == NULL || i >= 100) { + if (n == nullptr || i >= 100) { return false; } } @@ -936,7 +936,7 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) { // or through an other data node excluding cons and phis. void PhaseGVN::dead_loop_check( Node *n ) { // Phi may reference itself in a loop - if (n != NULL && !n->is_dead_loop_safe() && !n->is_CFG()) { + if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) { // Do 2 levels check and only data inputs. bool no_dead_loop = true; uint cnt = n->req(); @@ -944,7 +944,7 @@ void PhaseGVN::dead_loop_check( Node *n ) { Node *in = n->in(i); if (in == n) { no_dead_loop = false; - } else if (in != NULL && !in->is_dead_loop_safe()) { + } else if (in != nullptr && !in->is_dead_loop_safe()) { uint icnt = in->req(); for (uint j = 1; j < icnt && no_dead_loop; j++) { if (in->in(j) == n || in->in(j) == in) @@ -999,7 +999,7 @@ PhaseIterGVN::PhaseIterGVN(PhaseGVN* gvn) : PhaseGVN(gvn), max = _table.size(); for( uint i = 0; i < max; ++i ) { Node *n = _table.at(i); - if(n != NULL && n != _table.sentinel() && n->outcnt() == 0) { + if(n != nullptr && n != _table.sentinel() && n->outcnt() == 0) { if( n->is_top() ) continue; // If remove_useless_nodes() has run, we expect no such nodes left. assert(false, "remove_useless_nodes missed this node"); @@ -1046,7 +1046,7 @@ void PhaseIterGVN::verify_step(Node* n) { } for (int i = 0; i < _verify_window_size; i++) { Node* n = _verify_window[i]; - if (n == NULL) { + if (n == nullptr) { continue; } if (n->in(0) == NodeSentinel) { // xform_idom @@ -1070,7 +1070,7 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) { if (nn != n) { // print old node tty->print("< "); - if (oldtype != newtype && oldtype != NULL) { + if (oldtype != newtype && oldtype != nullptr) { oldtype->dump(); } do { tty->print("\t"); } while (tty->position() < 16); @@ -1079,14 +1079,14 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) { } if (oldtype != newtype || nn != n) { // print new node and/or new type - if (oldtype == NULL) { + if (oldtype == nullptr) { tty->print("* "); } else if (nn != n) { tty->print("> "); } else { tty->print("= "); } - if (newtype == NULL) { + if (newtype == nullptr) { tty->print("null"); } else { newtype->dump(); @@ -1104,7 +1104,7 @@ void PhaseIterGVN::trace_PhaseIterGVN(Node* n, Node* nn, const Type* oldtype) { } if (nn != n) { // ignore n, it might be subsumed - verify_step((Node*) NULL); + verify_step((Node*) nullptr); } } } @@ -1113,12 +1113,12 @@ void PhaseIterGVN::init_verifyPhaseIterGVN() { _verify_counter = 0; _verify_full_passes = 0; for (int i = 0; i < _verify_window_size; i++) { - _verify_window[i] = NULL; + _verify_window[i] = nullptr; } #ifdef ASSERT // Verify that all modified nodes are on _worklist Unique_Node_List* modified_list = C->modified_nodes(); - while (modified_list != NULL && modified_list->size()) { + while (modified_list != nullptr && modified_list->size()) { Node* n = modified_list->pop(); if (!n->is_Con() && !_worklist.member(n)) { n->dump(); @@ -1132,7 +1132,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() { #ifdef ASSERT // Verify nodes with changed inputs. Unique_Node_List* modified_list = C->modified_nodes(); - while (modified_list != NULL && modified_list->size()) { + while (modified_list != nullptr && modified_list->size()) { Node* n = modified_list->pop(); if (!n->is_Con()) { // skip Con nodes n->dump(); @@ -1153,7 +1153,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() { } #ifdef ASSERT - if (modified_list != NULL) { + if (modified_list != nullptr) { while (modified_list->size() > 0) { Node* n = modified_list->pop(); n->dump(); @@ -1260,7 +1260,7 @@ void PhaseIterGVN::verify_optimize() { // (2) LoadNode performs deep traversals. Load is not notified for changes far away. // (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away. bool PhaseIterGVN::verify_node_value(Node* n) { - // If we assert inside type(n), because the type is still a nullptr, then maybe + // If we assert inside type(n), because the type is still a null, then maybe // the node never went through gvn.transform, which would be a bug. const Type* told = type(n); const Type* tnew = n->Value(this); @@ -1324,7 +1324,7 @@ bool PhaseIterGVN::verify_node_value(Node* n) { Node* PhaseIterGVN::register_new_node_with_optimizer(Node* n, Node* orig) { set_type_bottom(n); _worklist.push(n); - if (orig != NULL) C->copy_node_notes_to(n, orig); + if (orig != nullptr) C->copy_node_notes_to(n, orig); return n; } @@ -1339,7 +1339,7 @@ Node *PhaseIterGVN::transform( Node *n ) { // If brand new node, make space in type array, and give it a type. ensure_type_or_null(n); - if (type_or_null(n) == NULL) { + if (type_or_null(n) == nullptr) { set_type_bottom(n); } @@ -1373,7 +1373,7 @@ Node *PhaseIterGVN::transform_old(Node* n) { #endif DEBUG_ONLY(uint loop_count = 1;) - while (i != NULL) { + while (i != nullptr) { #ifdef ASSERT if (loop_count >= K + C->live_nodes()) { dump_infinite_loop_info(i, "PhaseIterGVN::transform_old"); @@ -1405,7 +1405,7 @@ Node *PhaseIterGVN::transform_old(Node* n) { // See what kind of values 'k' takes on at runtime const Type* t = k->Value(this); - assert(t != NULL, "value sanity"); + assert(t != nullptr, "value sanity"); // Since I just called 'Value' to compute the set of run-time values // for this Node, and 'Value' is non-local (and therefore expensive) I'll @@ -1490,8 +1490,8 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { // Smash all inputs to 'dead', isolating him completely for (uint i = 0; i < dead->req(); i++) { Node *in = dead->in(i); - if (in != NULL && in != C->top()) { // Points to something? - int nrep = dead->replace_edge(in, NULL, this); // Kill edges + if (in != nullptr && in != C->top()) { // Points to something? + int nrep = dead->replace_edge(in, nullptr, this); // Kill edges assert((nrep > 0), "sanity"); if (in->outcnt() == 0) { // Made input go dead? _stack.push(in, PROCESS_INPUTS); // Recursively remove @@ -1516,7 +1516,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(this, in); } if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory && - in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) { + in->is_Proj() && in->in(0) != nullptr && in->in(0)->is_Initialize()) { // A Load that directly follows an InitializeNode is // going away. The Stores that follow are candidates // again to be captured by the InitializeNode. @@ -1527,7 +1527,7 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) { } } } - } // if (in != NULL && in != C->top()) + } // if (in != nullptr && in != C->top()) } // for (uint i = 0; i < dead->req(); i++) if (recurse) { continue; @@ -1583,11 +1583,11 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) { // Search for instance field data PhiNodes in the same region pointing to the old // memory PhiNode and update their instance memory ids to point to the new node. - if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != NULL) { + if (old->is_Phi() && old->as_Phi()->type()->has_memory() && old->in(0) != nullptr) { Node* region = old->in(0); for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { PhiNode* phi = region->fast_out(i)->isa_Phi(); - if (phi != NULL && phi->inst_mem_id() == (int)old->_idx) { + if (phi != nullptr && phi->inst_mem_id() == (int)old->_idx) { phi->set_inst_mem_id((int)nn->_idx); } } @@ -1598,7 +1598,7 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) { temp->init_req(0,nn); // Add a use to nn to prevent him from dying remove_dead_node( old ); temp->del_req(0); // Yank bogus edge - if (nn != NULL && nn->outcnt() == 0) { + if (nn != nullptr && nn->outcnt() == 0) { _worklist.push(nn); } #ifndef PRODUCT @@ -1630,14 +1630,14 @@ static PhiNode* countedloop_phi_from_cmp(CmpNode* cmp, Node* n) { BaseCountedLoopEndNode* cle = iff->as_BaseCountedLoopEnd(); if (cle->limit() == n) { PhiNode* phi = cle->phi(); - if (phi != NULL) { + if (phi != nullptr) { return phi; } } } } } - return NULL; + return nullptr; } void PhaseIterGVN::add_users_to_worklist( Node *n ) { @@ -1652,12 +1652,12 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) { add_users_to_worklist0(use); // If we changed the receiver type to a call, we need to revisit - // the Catch following the call. It's looking for a non-NULL + // the Catch following the call. It's looking for a non-null // receiver to know when to enable the regular fall-through path // in addition to the NullPtrException path. if (use->is_CallDynamicJava() && n == use->in(TypeFunc::Parms)) { Node* p = use->as_CallDynamicJava()->proj_out_or_null(TypeFunc::Control); - if (p != NULL) { + if (p != nullptr) { add_users_to_worklist0(p); } } @@ -1685,7 +1685,7 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) { } if (use_op == Op_CmpI || use_op == Op_CmpL) { Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n); - if (phi != NULL) { + if (phi != nullptr) { // Input to the cmp of a loop exit check has changed, thus // the loop limit may have changed, which can then change the // range values of the trip-count Phi. @@ -1823,9 +1823,9 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) { // If changed initialization activity, check dependent Stores if (use_op == Op_Allocate || use_op == Op_AllocateArray) { InitializeNode* init = use->as_Allocate()->initialization(); - if (init != NULL) { + if (init != nullptr) { Node* imem = init->proj_out_or_null(TypeFunc::Memory); - if (imem != NULL) add_users_to_worklist0(imem); + if (imem != nullptr) add_users_to_worklist0(imem); } } // If the ValidLengthTest input changes then the fallthrough path out of the AllocateArray may have become dead. @@ -1833,14 +1833,14 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) { // to guarantee the change is not missed. if (use_op == Op_AllocateArray && n == use->in(AllocateNode::ValidLengthTest)) { Node* p = use->as_AllocateArray()->proj_out_or_null(TypeFunc::Control); - if (p != NULL) { + if (p != nullptr) { add_users_to_worklist0(p); } } if (use_op == Op_Initialize) { Node* imem = use->as_Initialize()->proj_out_or_null(TypeFunc::Memory); - if (imem != NULL) add_users_to_worklist0(imem); + if (imem != nullptr) add_users_to_worklist0(imem); } // Loading the java mirror from a Klass requires two loads and the type // of the mirror load depends on the type of 'n'. See LoadNode::Value(). @@ -1883,7 +1883,7 @@ void PhaseIterGVN::remove_speculative_types() { assert(UseTypeSpeculation, "speculation is off"); for (uint i = 0; i < _types.Size(); i++) { const Type* t = _types.fast_lookup(i); - if (t != NULL) { + if (t != nullptr) { _types.map(i, t->remove_speculative()); } } @@ -2073,7 +2073,7 @@ void PhaseCCP::push_phis(Unique_Node_List& worklist, const Node* use) const { } // If we changed the receiver type to a call, we need to revisit the Catch node following the call. It's looking for a -// non-NULL receiver to know when to enable the regular fall-through path in addition to the NullPtrException path. +// non-null receiver to know when to enable the regular fall-through path in addition to the NullPtrException path. // Same is true if the type of a ValidLengthTest input to an AllocateArrayNode changes. void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) { if (use->is_Call()) { @@ -2081,7 +2081,7 @@ void PhaseCCP::push_catch(Unique_Node_List& worklist, const Node* use) { Node* proj = use->fast_out(i); if (proj->is_Proj() && proj->as_Proj()->_con == TypeFunc::Control) { Node* catch_node = proj->find_out_with(Op_Catch); - if (catch_node != NULL) { + if (catch_node != nullptr) { worklist.push(catch_node); } } @@ -2111,7 +2111,7 @@ void PhaseCCP::push_counted_loop_phi(Unique_Node_List& worklist, Node* parent, c uint use_op = use->Opcode(); if (use_op == Op_CmpI || use_op == Op_CmpL) { PhiNode* phi = countedloop_phi_from_cmp(use->as_Cmp(), parent); - if (phi != NULL) { + if (phi != nullptr) { worklist.push(phi); } } @@ -2200,7 +2200,7 @@ void PhaseCCP::do_transform() { // Convert any of his old-space children into new-space children. Node *PhaseCCP::transform( Node *n ) { Node *new_node = _nodes[n->_idx]; // Check for transformed node - if( new_node != NULL ) + if( new_node != nullptr ) return new_node; // Been there, done that, return old answer assert(n->is_Root(), "traversal must start at root"); @@ -2221,7 +2221,7 @@ Node *PhaseCCP::transform( Node *n ) { for (uint i = 0; i < _root_and_safepoints.size(); ++i) { Node* nn = _root_and_safepoints.at(i); Node* new_node = _nodes[nn->_idx]; - assert(new_node == NULL, ""); + assert(new_node == nullptr, ""); new_node = transform_once(nn); // Check for constant _nodes.map(nn->_idx, new_node); // Flag as having been cloned transform_stack.push(new_node); // Process children of cloned node @@ -2233,9 +2233,9 @@ Node *PhaseCCP::transform( Node *n ) { uint cnt = clone->req(); for( uint i = 0; i < cnt; i++ ) { // For all inputs do Node *input = clone->in(i); - if( input != NULL ) { // Ignore NULLs + if( input != nullptr ) { // Ignore nulls Node *new_input = _nodes[input->_idx]; // Check for cloned input node - if( new_input == NULL ) { + if( new_input == nullptr ) { new_input = transform_once(input); // Check for constant _nodes.map( input->_idx, new_input );// Flag as having been cloned transform_stack.push(new_input); // Process children of cloned node @@ -2277,7 +2277,7 @@ Node *PhaseCCP::transform_once( Node *n ) { Node *nn = n; // Default is to return the original constant if( t == Type::TOP ) { // cache my top node on the Compile instance - if( C->cached_top_node() == NULL || C->cached_top_node()->in(0) == NULL ) { + if( C->cached_top_node() == nullptr || C->cached_top_node()->in(0) == nullptr ) { C->set_cached_top_node(ConNode::make(Type::TOP)); set_type(C->top(), Type::TOP); } @@ -2289,7 +2289,7 @@ Node *PhaseCCP::transform_once( Node *n ) { NOT_PRODUCT( inc_constants(); ) } else if( n->is_Region() ) { // Unreachable region // Note: nn == C->top() - n->set_req(0, NULL); // Cut selfreference + n->set_req(0, nullptr); // Cut selfreference bool progress = true; uint max = n->outcnt(); DUIterator i; @@ -2322,7 +2322,7 @@ Node *PhaseCCP::transform_once( Node *n ) { _worklist.push(n); // n re-enters the hash table via the worklist } - // TEMPORARY fix to ensure that 2nd GVN pass eliminates NULL checks + // TEMPORARY fix to ensure that 2nd GVN pass eliminates null checks switch( n->Opcode() ) { case Op_CallStaticJava: // Give post-parse call devirtualization a chance case Op_CallDynamicJava: @@ -2383,7 +2383,7 @@ PhasePeephole::~PhasePeephole() { //------------------------------transform-------------------------------------- Node *PhasePeephole::transform( Node *n ) { ShouldNotCallThis(); - return NULL; + return nullptr; } //------------------------------do_transform----------------------------------- @@ -2485,7 +2485,7 @@ void Node::set_req_X( uint i, Node *n, PhaseIterGVN *igvn ) { void Node::set_req_X(uint i, Node *n, PhaseGVN *gvn) { PhaseIterGVN* igvn = gvn->is_IterGVN(); - if (igvn == NULL) { + if (igvn == nullptr) { set_req(i, n); return; } @@ -2518,7 +2518,7 @@ void Type_Array::grow( uint i ) { if( !_max ) { _max = 1; _types = (const Type**)_a->Amalloc( _max * sizeof(Type*) ); - _types[0] = NULL; + _types[0] = nullptr; } uint old = _max; _max = next_power_of_2(i); @@ -2531,7 +2531,7 @@ void Type_Array::grow( uint i ) { void Type_Array::dump() const { uint max = Size(); for( uint i = 0; i < max; i++ ) { - if( _types[i] != NULL ) { + if( _types[i] != nullptr ) { tty->print(" %d\t== ", i); _types[i]->dump(); tty->cr(); } } diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp index 5e3efcf5982..331feea6b34 100644 --- a/src/hotspot/share/opto/phaseX.hpp +++ b/src/hotspot/share/opto/phaseX.hpp @@ -48,7 +48,7 @@ class PhaseRegAlloc; //----------------------------------------------------------------------------- -// Expandable closed hash-table of nodes, initialized to NULL. +// Expandable closed hash-table of nodes, initialized to null. // Note that the constructor just zeros things // Storage is reclaimed when the Arena's lifetime is over. class NodeHash : public StackObj { @@ -83,7 +83,7 @@ public: // Return 75% of _max, rounded up. uint insert_limit() const { return _max - (_max>>2); } - void clear(); // Set all entries to NULL, keep storage. + void clear(); // Set all entries to null, keep storage. // Size of hash table uint size() const { return _max; } // Return Node* at index in table @@ -117,7 +117,7 @@ public: //----------------------------------------------------------------------------- // Map dense integer indices to Types. Uses classic doubling-array trick. -// Abstractly provides an infinite array of Type*'s, initialized to NULL. +// Abstractly provides an infinite array of Type*'s, initialized to null. // Note that the constructor just zeros things, and since I use Arena // allocation I do not need a destructor to reclaim storage. // Despite the general name, this class is customized for use by PhaseTransform. @@ -126,8 +126,8 @@ class Type_Array : public StackObj { uint _max; const Type **_types; void grow( uint i ); // Grow array node to fit - const Type *operator[] ( uint i ) const // Lookup, or NULL for not mapped - { return (i<_max) ? _types[i] : (Type*)NULL; } + const Type *operator[] ( uint i ) const // Lookup, or null for not mapped + { return (i<_max) ? _types[i] : (Type*)nullptr; } friend class PhaseTransform; public: Type_Array(Arena *a) : _a(a), _max(0), _types(0) {} @@ -220,28 +220,28 @@ public: // Get a previously recorded type for the node n. // This type must already have been recorded. // If you want the type of a very new (untransformed) node, - // you must use type_or_null, and test the result for NULL. + // you must use type_or_null, and test the result for null. const Type* type(const Node* n) const { assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop"); - assert(n != NULL, "must not be null"); + assert(n != nullptr, "must not be null"); const Type* t = _types.fast_lookup(n->_idx); - assert(t != NULL, "must set before get"); + assert(t != nullptr, "must set before get"); return t; } // Get a previously recorded type for the node n, - // or else return NULL if there is none. + // or else return null if there is none. const Type* type_or_null(const Node* n) const { assert(_pnum != Ideal_Loop, "should not be used from PhaseIdealLoop"); return _types.fast_lookup(n->_idx); } // Record a type for a node. void set_type(const Node* n, const Type *t) { - assert(t != NULL, "type must not be null"); + assert(t != nullptr, "type must not be null"); _types.map(n->_idx, t); } void clear_type(const Node* n) { if (n->_idx < _types.Size()) { - _types.map(n->_idx, NULL); + _types.map(n->_idx, nullptr); } } // Record an initial type for a node, the node's bottom type. @@ -249,14 +249,14 @@ public: // Use this for initialization when bottom_type() (or better) is not handy. // Usually the initialization should be to n->Value(this) instead, // or a hand-optimized value like Type::MEMORY or Type::CONTROL. - assert(_types[n->_idx] == NULL, "must set the initial type just once"); + assert(_types[n->_idx] == nullptr, "must set the initial type just once"); _types.map(n->_idx, n->bottom_type()); } // Make sure the types array is big enough to record a size for the node n. // (In product builds, we never want to do range checks on the types array!) void ensure_type_or_null(const Node* n) { if (n->_idx >= _types.Size()) - _types.map(n->_idx, NULL); // Grow the types array as needed. + _types.map(n->_idx, nullptr); // Grow the types array as needed. } // Utility functions: @@ -264,18 +264,18 @@ public: const TypeLong* find_long_type(Node* n); jint find_int_con( Node* n, jint value_if_unknown) { const TypeInt* t = find_int_type(n); - return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; + return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; } jlong find_long_con(Node* n, jlong value_if_unknown) { const TypeLong* t = find_long_type(n); - return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; + return (t != nullptr && t->is_con()) ? t->get_con() : value_if_unknown; } // Make an idealized constant, i.e., one of ConINode, ConPNode, ConFNode, etc. // Same as transform(ConNode::make(t)). ConNode* makecon(const Type* t); virtual ConNode* uncached_makecon(const Type* t) // override in PhaseValues - { ShouldNotCallThis(); return NULL; } + { ShouldNotCallThis(); return nullptr; } // Fast int or long constant. Same as TypeInt::make(i) or TypeLong::make(l). ConINode* intcon(jint i); @@ -339,7 +339,7 @@ public: // Caller guarantees that old_type and new_type are no higher than limit_type. virtual const Type* saturate(const Type* new_type, const Type* old_type, const Type* limit_type) const - { ShouldNotCallThis(); return NULL; } + { ShouldNotCallThis(); return nullptr; } virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) { return saturate(new_type, type_or_null(n), n->type()); } @@ -380,7 +380,7 @@ public: PhaseValues(Arena* arena, uint est_max_size); PhaseValues(PhaseValues* pt); NOT_PRODUCT(~PhaseValues();) - PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : NULL; } + PhaseIterGVN* is_IterGVN() { return (_iterGVN) ? (PhaseIterGVN*)this : nullptr; } // Some Ideal and other transforms delete --> modify --> insert values bool hash_delete(Node* n) { return _table.hash_delete(n); } @@ -506,7 +506,7 @@ public: // transforms can be triggered on the region. // Optional 'orig' is an earlier version of this node. // It is significant only for debugging and profiling. - Node* register_new_node_with_optimizer(Node* n, Node* orig = NULL); + Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr); // Kill a globally dead Node. All uses are also globally dead and are // aggressively trimmed. diff --git a/src/hotspot/share/opto/phasetype.hpp b/src/hotspot/share/opto/phasetype.hpp index c056e2819f2..af2e584ce6a 100644 --- a/src/hotspot/share/opto/phasetype.hpp +++ b/src/hotspot/share/opto/phasetype.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,7 +131,7 @@ class PhaseNameIter { const char* operator*() const { return _token; } PhaseNameIter& operator++() { - _token = strtok_r(NULL, ",", &_saved_ptr); + _token = strtok_r(nullptr, ",", &_saved_ptr); return *this; } @@ -159,13 +159,13 @@ class PhaseNameValidator { public: PhaseNameValidator(ccstrlist option, uint64_t& mask) : _valid(true), _bad(nullptr) { - for (PhaseNameIter iter(option); *iter != NULL && _valid; ++iter) { + for (PhaseNameIter iter(option); *iter != nullptr && _valid; ++iter) { CompilerPhaseType cpt = find_phase(*iter); if (PHASE_NONE == cpt) { const size_t len = MIN2(strlen(*iter), 63) + 1; // cap len to a value we know is enough for all phase descriptions _bad = NEW_C_HEAP_ARRAY(char, len, mtCompiler); - // strncpy always writes len characters. If the source string is shorter, the function fills the remaining bytes with NULLs. + // strncpy always writes len characters. If the source string is shorter, the function fills the remaining bytes with nulls. strncpy(_bad, *iter, len); _valid = false; } else if (PHASE_ALL == cpt) { @@ -178,7 +178,7 @@ class PhaseNameValidator { } ~PhaseNameValidator() { - if (_bad != NULL) { + if (_bad != nullptr) { FREE_C_HEAP_ARRAY(char, _bad); } } diff --git a/src/hotspot/share/opto/postaloc.cpp b/src/hotspot/share/opto/postaloc.cpp index 579d4cdec4d..bc336e1e0a9 100644 --- a/src/hotspot/share/opto/postaloc.cpp +++ b/src/hotspot/share/opto/postaloc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { def = def->in(1); else break; - guarantee(def != NULL, "must not resurrect dead copy"); + guarantee(def != nullptr, "must not resurrect dead copy"); } // If we reached the end and didn't find a callee save proj // then this may be a callee save proj so we return true @@ -87,10 +87,10 @@ int PhaseChaitin::yank(Node *old, Block *current_block, Node_List *value, Node_L } _cfg.unmap_node_from_block(old); OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); - assert(value != NULL || regnd == NULL, "sanity"); - if (value != NULL && regnd != NULL && regnd->at(old_reg) == old) { // Instruction is currently available? - value->map(old_reg, NULL); // Yank from value/regnd maps - regnd->map(old_reg, NULL); // This register's value is now unknown + assert(value != nullptr || regnd == nullptr, "sanity"); + if (value != nullptr && regnd != nullptr && regnd->at(old_reg) == old) { // Instruction is currently available? + value->map(old_reg, nullptr); // Yank from value/regnd maps + regnd->map(old_reg, nullptr); // This register's value is now unknown } return blk_adjust; } @@ -147,8 +147,8 @@ int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current for (uint i = 1; i < old->req(); i++) { Node* n = old->in(i); - if (n != NULL) { - old->set_req(i, NULL); + if (n != nullptr) { + old->set_req(i, nullptr); blk_adjust += yank_if_dead_recurse(n, orig_old, current_block, value, regnd); } } @@ -218,7 +218,7 @@ Node *PhaseChaitin::skip_copies( Node *c ) { int idx = c->is_Copy(); uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop; while (idx != 0) { - guarantee(c->in(idx) != NULL, "must not resurrect dead copy"); + guarantee(c->in(idx) != nullptr, "must not resurrect dead copy"); if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) { break; // casting copy, not the same value } @@ -241,7 +241,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v int idx; while( (idx=x->is_Copy()) != 0 ) { Node *copy = x->in(idx); - guarantee(copy != NULL, "must not resurrect dead copy"); + guarantee(copy != nullptr, "must not resurrect dead copy"); if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) { break; } @@ -258,8 +258,8 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v return blk_adjust; // Only check stupid copies! } // Loop backedges won't have a value-mapping yet - assert(regnd != NULL || value == NULL, "sanity"); - if (value == NULL || regnd == NULL) { + assert(regnd != nullptr || value == nullptr, "sanity"); + if (value == nullptr || regnd == nullptr) { return blk_adjust; } @@ -291,7 +291,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *v // register. // Also handle duplicate copies here. - const Type *t = val->is_Con() ? val->bottom_type() : NULL; + const Type *t = val->is_Con() ? val->bottom_type() : nullptr; // Scan all registers to see if this value is around already for( uint reg = 0; reg < (uint)_max_reg; reg++ ) { @@ -360,7 +360,7 @@ bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n, Node_List& value, Node_List& regnd, OptoReg::Name nreg, OptoReg::Name nreg2) { if (value[nreg] != val && val->is_Con() && - value[nreg] != NULL && value[nreg]->is_Con() && + value[nreg] != nullptr && value[nreg]->is_Con() && (nreg2 == OptoReg::Bad || value[nreg] == value[nreg2]) && value[nreg]->bottom_type() == val->bottom_type() && value[nreg]->as_Mach()->rule() == val->as_Mach()->rule()) { @@ -440,7 +440,7 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe OptoReg::Name reg = lrgs(lrg).reg(); Node* def = reg2defuse.at(reg).def(); - if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) { + if (def != nullptr && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) { // Same lrg but different node, we have to merge. MachMergeNode* merge; if (def->is_MachMerge()) { // is it already a merge? @@ -464,7 +464,7 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe if (use == n) { break; } - use->replace_edge(def, merge, NULL); + use->replace_edge(def, merge, nullptr); } } if (merge->find_edge(n->in(k)) == -1) { @@ -484,10 +484,10 @@ int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDe //------------------------------post_allocate_copy_removal--------------------- // Post-Allocation peephole copy removal. We do this in 1 pass over the // basic blocks. We maintain a mapping of registers to Nodes (an array of -// Nodes indexed by machine register or stack slot number). NULL means that a +// Nodes indexed by machine register or stack slot number). null means that a // register is not mapped to any Node. We can (want to have!) have several // registers map to the same Node. We walk forward over the instructions -// updating the mapping as we go. At merge points we force a NULL if we have +// updating the mapping as we go. At merge points we force a null if we have // to merge 2 different Nodes into the same register. Phi functions will give // us a new Node if there is a proper value merging. Since the blocks are // arranged in some RPO, we will visit all parent blocks before visiting any @@ -535,7 +535,7 @@ void PhaseChaitin::post_allocate_copy_removal() { // of registers at the start. Check for this, while updating copies // along Phi input edges bool missing_some_inputs = false; - Block *freed = NULL; + Block *freed = nullptr; for (j = 1; j < block->num_preds(); j++) { Block* pb = _cfg.get_block_for_node(block->pred(j)); // Remove copies along phi edges @@ -585,7 +585,7 @@ void PhaseChaitin::post_allocate_copy_removal() { value.copy(*blk2value[freed->_pre_order]); regnd.copy(*blk2regnd[freed->_pre_order]); } - // Merge all inputs together, setting to NULL any conflicts. + // Merge all inputs together, setting to null any conflicts. for (j = 1; j < block->num_preds(); j++) { Block* pb = _cfg.get_block_for_node(block->pred(j)); if (pb == freed) { @@ -594,8 +594,8 @@ void PhaseChaitin::post_allocate_copy_removal() { Node_List &p_regnd = *blk2regnd[pb->_pre_order]; for (uint k = 0; k < (uint)_max_reg; k++) { if (regnd[k] != p_regnd[k]) { // Conflict on reaching defs? - value.map(k, NULL); // Then no value handy - regnd.map(k, NULL); + value.map(k, nullptr); // Then no value handy + regnd.map(k, nullptr); } } } @@ -609,7 +609,7 @@ void PhaseChaitin::post_allocate_copy_removal() { OptoReg::Name preg = lrgs(pidx).reg(); // Remove copies remaining on edges. Check for junk phi. - Node *u = NULL; + Node *u = nullptr; for (k = 1; k < phi->req(); k++) { Node *x = phi->in(k); if( phi != x && u != x ) // Found a different input @@ -662,7 +662,7 @@ void PhaseChaitin::post_allocate_copy_removal() { uint k; for (k = 1; k < n->req(); k++) { Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE - guarantee(def != NULL, "no disconnected nodes at this point"); + guarantee(def != nullptr, "no disconnected nodes at this point"); uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE if( useidx ) { @@ -670,7 +670,7 @@ void PhaseChaitin::post_allocate_copy_removal() { if( !value[ureg] ) { int idx; // Skip occasional useless copy while( (idx=def->is_Copy()) != 0 && - def->in(idx) != NULL && // NULL should not happen + def->in(idx) != nullptr && // null should not happen ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg()) def = def->in(idx); Node *valdef = skip_copies(def); // tighten up val through non-useless copies @@ -716,9 +716,9 @@ void PhaseChaitin::post_allocate_copy_removal() { // definition could in fact be a kill projection with a count of // 0 which is safe but since those are uninteresting for copy // elimination just delete them as well. - if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) { - regnd.map(nreg, NULL); - value.map(nreg, NULL); + if (regnd[nreg] != nullptr && regnd[nreg]->outcnt() == 0) { + regnd.map(nreg, nullptr); + value.map(nreg, nullptr); } uint n_ideal_reg = n->ideal_reg(); diff --git a/src/hotspot/share/opto/reg_split.cpp b/src/hotspot/share/opto/reg_split.cpp index e5469bfecca..27ba9838a89 100644 --- a/src/hotspot/share/opto/reg_split.cpp +++ b/src/hotspot/share/opto/reg_split.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,10 +66,10 @@ Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, def->_idx, def->Name(), use->_idx, use->Name(), ireg, MachSpillCopyNode::spill_type(spill_type)); C->record_method_not_compilable("attempted to spill a non-spillable item"); - return NULL; + return nullptr; } if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { - return NULL; + return nullptr; } const RegMask *i_mask = &def->out_RegMask(); const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg]; @@ -164,7 +164,7 @@ uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node ** assert( loc >= 0, "must insert past block head" ); // Get a def-side SpillCopy - Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, NULL, 0); + Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, nullptr, 0); // Did we fail to split?, then bail if (!spill) { return 0; @@ -363,7 +363,7 @@ Node *PhaseChaitin::split_Rematerialize(Node *def, Block *b, uint insidx, uint & } Node *spill = clone_node(def, b, C); - if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { + if (spill == nullptr || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes return 0; } @@ -561,7 +561,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { bool *UPblock = UP[bidx]; for( slidx = 0; slidx < spill_cnt; slidx++ ) { UPblock[slidx] = true; // Assume they start in registers - Reachblock[slidx] = NULL; // Assume that no def is present + Reachblock[slidx] = nullptr; // Assume that no def is present } } @@ -652,8 +652,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Move n2/u2 to n1/u1 for next iteration n1 = n2; u1 = u2; - // Preserve a non-NULL predecessor for later type referencing - if( (n3 == NULL) && (n2 != NULL) ){ + // Preserve a non-null predecessor for later type referencing + if( (n3 == nullptr) && (n2 != nullptr) ){ n3 = n2; u3 = u2; } @@ -663,8 +663,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { n1 = b->get_node(insidx); // bail if this is not a phi - phi = n1->is_Phi() ? n1->as_Phi() : NULL; - if( phi == NULL ) { + phi = n1->is_Phi() ? n1->as_Phi() : nullptr; + if( phi == nullptr ) { // Keep track of index of first non-PhiNode instruction in block non_phi = insidx; // break out of the for loop as we have handled all phi nodes @@ -687,7 +687,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if( needs_phi ) { // create a new phi node and insert it into the block // type is taken from left over pointer to a predecessor - guarantee(n3, "No non-NULL reaching DEF for a Phi"); + guarantee(n3, "No non-null reaching DEF for a Phi"); phi = new PhiNode(b->head(), n3->bottom_type()); // initialize the Reaches entry for this LRG Reachblock[slidx] = phi; @@ -700,7 +700,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); } // end if not found correct phi // Here you have either found or created the Phi, so record it - assert(phi != NULL,"Must have a Phi Node here"); + assert(phi != nullptr,"Must have a Phi Node here"); phis->push(phi); // PhiNodes should either force the LRG UP or DOWN depending // on its inputs and the register pressure in the Phi's block. @@ -753,7 +753,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Memoize any DOWN reaching definitions for use as DEBUG info for( insidx = 0; insidx < spill_cnt; insidx++ ) { - debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx]; + debug_defs[insidx] = (UPblock[insidx]) ? nullptr : Reachblock[insidx]; if( UPblock[insidx] ) // Memoize UP decision at block start UP_entry[insidx]->set( b->_pre_order ); } @@ -774,13 +774,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // ranges; they are busy getting modified in this pass. if( lrgs(defidx).reg() < LRG::SPILL_REG ) { uint i; - Node *u = NULL; + Node *u = nullptr; // Look for the Phi merging 2 unique inputs for( i = 1; i < cnt; i++ ) { // Ignore repeats and self if( n->in(i) != u && n->in(i) != n ) { // Found a unique input - if( u != NULL ) // If it's the 2nd, bail out + if( u != nullptr ) // If it's the 2nd, bail out break; u = n->in(i); // Else record it } @@ -816,7 +816,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Check for need to split at HRP boundary - split if UP n1 = Reachblock[slidx]; // bail out if no reaching DEF - if( n1 == NULL ) continue; + if( n1 == nullptr ) continue; // bail out if live range is 'isolated' around inner loop uint lidx = lidxs.at(slidx); // If live range is currently UP @@ -826,7 +826,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if( is_high_pressure( b, &lrgs(lidx), insidx ) && !n1->rematerialize() ) { // If there is already a valid stack definition available, use it - if( debug_defs[slidx] != NULL ) { + if( debug_defs[slidx] != nullptr ) { Reachblock[slidx] = debug_defs[slidx]; } else { @@ -861,7 +861,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if (!maxlrg) { return 0; } - // Spill of NULL check mem op goes into the following block. + // Spill of null check mem op goes into the following block. if (b->end_idx() > orig_eidx) { insidx++; } @@ -891,7 +891,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Remove coalesced copy from CFG if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { n->replace_by( n->in(copyidx) ); - n->set_req( copyidx, NULL ); + n->set_req( copyidx, nullptr ); b->remove_node(insidx--); b->_ihrp_index--; // Adjust the point where we go hi-pressure b->_fhrp_index--; @@ -925,7 +925,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // Check for valid reaching DEF slidx = lrg2reach[useidx]; Node *def = Reachblock[slidx]; - assert( def != NULL, "Using Undefined Value in Split()\n"); + assert( def != nullptr, "Using Undefined Value in Split()\n"); // (+++) %%%% remove this in favor of pre-pass in matcher.cpp // monitor references do not care where they live, so just hook @@ -934,7 +934,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // so that the allocator does not see it anymore, and therefore // does not attempt to assign it a register. def = clone_node(def, b, C); - if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { + if (def == nullptr || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { return 0; } _lrg_map.extend(def->_idx, 0); @@ -952,7 +952,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { insidx += b->number_of_nodes()-old_size; } - MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL; + MachNode *mach = n->is_Mach() ? n->as_Mach() : nullptr; // Base pointers and oopmap references do not care where they live. if ((inpidx >= oopoff) || (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) { @@ -981,7 +981,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { Node *derived_debug = debug_defs[slidx]; if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base? mach && mach->ideal_Opcode() != Op_Halt && - derived_debug != NULL && + derived_debug != nullptr && derived_debug != def ) { // Actual 2nd value appears // We have already set 'def' as a derived value. // Also set debug_defs[slidx] as a derived value. @@ -1009,7 +1009,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) { uint debug_start = jvms->debug_start(); // If this is debug info use & there is a reaching DOWN def - if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) { + if ((debug_start <= inpidx) && (debug_defs[slidx] != nullptr)) { assert(inpidx < oopoff, "handle only debug info here"); // Just hook it in & move on n->set_req(inpidx, debug_defs[slidx]); @@ -1230,7 +1230,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // UP should come from the outRegmask() of the DEF UPblock[slidx] = defup; // Update debug list of reaching down definitions, kill if DEF is UP - debug_defs[slidx] = defup ? NULL : n; + debug_defs[slidx] = defup ? nullptr : n; #ifndef PRODUCT // DEBUG if( trace_spilling() ) { @@ -1294,9 +1294,9 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } } #endif - Reachblock[slidx] = NULL; + Reachblock[slidx] = nullptr; } else { - assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value"); + assert(Reachblock[slidx] != nullptr,"No reaching definition for liveout value"); } } #ifndef PRODUCT diff --git a/src/hotspot/share/opto/replacednodes.cpp b/src/hotspot/share/opto/replacednodes.cpp index 83d8abc8362..78c1703799e 100644 --- a/src/hotspot/share/opto/replacednodes.cpp +++ b/src/hotspot/share/opto/replacednodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,13 +29,13 @@ #include "opto/replacednodes.hpp" void ReplacedNodes::allocate_if_necessary() { - if (_replaced_nodes == NULL) { + if (_replaced_nodes == nullptr) { _replaced_nodes = new GrowableArray(); } } bool ReplacedNodes::is_empty() const { - return _replaced_nodes == NULL || _replaced_nodes->length() == 0; + return _replaced_nodes == nullptr || _replaced_nodes->length() == 0; } bool ReplacedNodes::has_node(const ReplacedNode& r) const { @@ -78,7 +78,7 @@ void ReplacedNodes::transfer_from(const ReplacedNodes& other, uint idx) { } void ReplacedNodes::clone() { - if (_replaced_nodes != NULL) { + if (_replaced_nodes != nullptr) { GrowableArray* replaced_nodes_clone = new GrowableArray(); replaced_nodes_clone->appendAll(_replaced_nodes); _replaced_nodes = replaced_nodes_clone; @@ -86,7 +86,7 @@ void ReplacedNodes::clone() { } void ReplacedNodes::reset() { - if (_replaced_nodes != NULL) { + if (_replaced_nodes != nullptr) { _replaced_nodes->clear(); } } @@ -130,7 +130,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) { ReplacedNode replaced = _replaced_nodes->at(i); Node* initial = replaced.initial(); Node* improved = replaced.improved(); - assert (ctl != NULL && !ctl->is_top(), "replaced node should have actual control"); + assert (ctl != nullptr && !ctl->is_top(), "replaced node should have actual control"); ResourceMark rm; Unique_Node_List work; @@ -150,7 +150,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) { if (use->outcnt() == 0) { continue; } - if (n->is_CFG() || (n->in(0) != NULL && !n->in(0)->is_top())) { + if (n->is_CFG() || (n->in(0) != nullptr && !n->in(0)->is_top())) { // Skip projections, since some of the multi nodes aren't CFG (e.g., LoadStore and SCMemProj). if (n->is_Proj()) { n = n->in(0); @@ -164,7 +164,7 @@ void ReplacedNodes::apply(Compile* C, Node* ctl) { n = IfNode::up_one_dom(n); depth++; // limit search depth - if (depth >= 100 || n == NULL) { + if (depth >= 100 || n == nullptr) { replace = false; break; } diff --git a/src/hotspot/share/opto/replacednodes.hpp b/src/hotspot/share/opto/replacednodes.hpp index ebd3363fc58..c569e55ce5f 100644 --- a/src/hotspot/share/opto/replacednodes.hpp +++ b/src/hotspot/share/opto/replacednodes.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +48,7 @@ class ReplacedNodes { Node* _initial; Node* _improved; public: - ReplacedNode() : _initial(NULL), _improved(NULL) {} + ReplacedNode() : _initial(nullptr), _improved(nullptr) {} ReplacedNode(Node* initial, Node* improved) : _initial(initial), _improved(improved) {} Node* initial() const { return _initial; } Node* improved() const { return _improved; } @@ -65,7 +65,7 @@ class ReplacedNodes { public: ReplacedNodes() - : _replaced_nodes(NULL) {} + : _replaced_nodes(nullptr) {} void clone(); void record(Node* initial, Node* improved); diff --git a/src/hotspot/share/opto/rootnode.cpp b/src/hotspot/share/opto/rootnode.cpp index ba74215eb61..87bb0539678 100644 --- a/src/hotspot/share/opto/rootnode.cpp +++ b/src/hotspot/share/opto/rootnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ Node *RootNode::Ideal(PhaseGVN *phase, bool can_reshape) { // If we want to get the rest of the win later, we should pattern match // simple recursive call trees to closed-form solutions. - return modified ? this : NULL; + return modified ? this : nullptr; } //============================================================================= @@ -78,7 +78,7 @@ uint HaltNode::size_of() const { return sizeof(*this); } //------------------------------Ideal------------------------------------------ Node *HaltNode::Ideal(PhaseGVN *phase, bool can_reshape) { - return remove_dead_region(phase, can_reshape) ? this : NULL; + return remove_dead_region(phase, can_reshape) ? this : nullptr; } //------------------------------Value------------------------------------------ diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index aed280fd4e5..e60dc00ea9d 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,22 +93,22 @@ // Compiled code entry points -address OptoRuntime::_new_instance_Java = NULL; -address OptoRuntime::_new_array_Java = NULL; -address OptoRuntime::_new_array_nozero_Java = NULL; -address OptoRuntime::_multianewarray2_Java = NULL; -address OptoRuntime::_multianewarray3_Java = NULL; -address OptoRuntime::_multianewarray4_Java = NULL; -address OptoRuntime::_multianewarray5_Java = NULL; -address OptoRuntime::_multianewarrayN_Java = NULL; -address OptoRuntime::_vtable_must_compile_Java = NULL; -address OptoRuntime::_complete_monitor_locking_Java = NULL; -address OptoRuntime::_monitor_notify_Java = NULL; -address OptoRuntime::_monitor_notifyAll_Java = NULL; -address OptoRuntime::_rethrow_Java = NULL; +address OptoRuntime::_new_instance_Java = nullptr; +address OptoRuntime::_new_array_Java = nullptr; +address OptoRuntime::_new_array_nozero_Java = nullptr; +address OptoRuntime::_multianewarray2_Java = nullptr; +address OptoRuntime::_multianewarray3_Java = nullptr; +address OptoRuntime::_multianewarray4_Java = nullptr; +address OptoRuntime::_multianewarray5_Java = nullptr; +address OptoRuntime::_multianewarrayN_Java = nullptr; +address OptoRuntime::_vtable_must_compile_Java = nullptr; +address OptoRuntime::_complete_monitor_locking_Java = nullptr; +address OptoRuntime::_monitor_notify_Java = nullptr; +address OptoRuntime::_monitor_notifyAll_Java = nullptr; +address OptoRuntime::_rethrow_Java = nullptr; -address OptoRuntime::_slow_arraycopy_Java = NULL; -address OptoRuntime::_register_finalizer_Java = NULL; +address OptoRuntime::_slow_arraycopy_Java = nullptr; +address OptoRuntime::_register_finalizer_Java = nullptr; ExceptionBlob* OptoRuntime::_exception_blob; @@ -130,7 +130,7 @@ static bool check_compiled_frame(JavaThread* thread) { #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \ var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \ - if (var == NULL) { return false; } + if (var == nullptr) { return false; } bool OptoRuntime::generate(ciEnv* env) { @@ -181,7 +181,7 @@ const char* OptoRuntime::stub_name(address entry) { #ifndef PRODUCT CodeBlob* cb = CodeCache::find_blob(entry); RuntimeStub* rs =(RuntimeStub *)cb; - assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub"); + assert(rs != nullptr && rs->is_runtime_stub(), "not a runtime stub"); return rs->name(); #else // Fast implementation for product mode (maybe it should be inlined too) @@ -303,7 +303,7 @@ JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len SharedRuntime::on_slowpath_allocation_exit(current); oop result = current->vm_result(); - if ((len > 0) && (result != NULL) && + if ((len > 0) && (result != nullptr) && is_deoptimized_caller_frame(current)) { // Zero array here if the caller is deoptimized. const size_t size = TypeArrayKlass::cast(array_type)->oop_size(result); @@ -609,12 +609,12 @@ const TypeFunc *OptoRuntime::monitor_notify_Type() { const TypeFunc* OptoRuntime::flush_windows_Type() { // create input type (domain) const Type** fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); // create result type fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); @@ -781,7 +781,7 @@ static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); fields = TypeTuple::fields(1); if (retcnt == 0) - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void else fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); @@ -823,7 +823,7 @@ const TypeFunc* OptoRuntime::array_fill_Type() { // create result type fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); @@ -844,7 +844,7 @@ const TypeFunc* OptoRuntime::aescrypt_block_Type() { // no result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1024,7 +1024,7 @@ const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) { // no result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1070,7 +1070,7 @@ const TypeFunc* OptoRuntime::multiplyToLen_Type() { // no result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; + fields[TypeFunc::Parms+0] = nullptr; const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1090,7 +1090,7 @@ const TypeFunc* OptoRuntime::squareToLen_Type() { // no result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; + fields[TypeFunc::Parms+0] = nullptr; const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1178,7 +1178,7 @@ const TypeFunc * OptoRuntime::bigIntegerShift_Type() { // no result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms + 0] = NULL; + fields[TypeFunc::Parms + 0] = nullptr; const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1218,7 +1218,7 @@ const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { // result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1259,7 +1259,7 @@ const TypeFunc* OptoRuntime::base64_encodeBlock_Type() { // result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms + 0] = NULL; // void + fields[TypeFunc::Parms + 0] = nullptr; // void const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1301,7 +1301,7 @@ const TypeFunc* OptoRuntime::poly1305_processBlocks_Type() { // result type needed fields = TypeTuple::fields(1); - fields[TypeFunc::Parms + 0] = NULL; // void + fields[TypeFunc::Parms + 0] = nullptr; // void const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1316,7 +1316,7 @@ const TypeFunc* OptoRuntime::osr_end_Type() { // create result type fields = TypeTuple::fields(1); // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop - fields[TypeFunc::Parms+0] = NULL; // void + fields[TypeFunc::Parms+0] = nullptr; // void const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); return TypeFunc::make(domain, range); } @@ -1355,8 +1355,8 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c // is only used to pass arguments into the method. Not for general // exception handling. DO NOT CHANGE IT to use pending_exception, since // the runtime stubs checks this on exit. - assert(current->exception_oop() != NULL, "exception oop is found"); - address handler_address = NULL; + assert(current->exception_oop() != nullptr, "exception oop is found"); + address handler_address = nullptr; Handle exception(current, current->exception_oop()); address pc = current->exception_pc(); @@ -1389,7 +1389,7 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c // using rethrow node nm = CodeCache::find_nmethod(pc); - assert(nm != NULL, "No NMethod found"); + assert(nm != nullptr, "No NMethod found"); if (nm->is_native_method()) { fatal("Native method should not have path to exception handling"); } else { @@ -1430,12 +1430,12 @@ JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* c } else { handler_address = - force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc); + force_unwind ? nullptr : nm->handler_for_exception_and_pc(exception, pc); - if (handler_address == NULL) { + if (handler_address == nullptr) { bool recursive_exception = false; handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); - assert (handler_address != NULL, "must have compiled handler"); + assert (handler_address != nullptr, "must have compiled handler"); // Update the exception cache only when the unwind was not forced // and there didn't happen another exception during the computation of the // compiled exception handler. Checking for exception oop equality is not @@ -1481,8 +1481,8 @@ address OptoRuntime::handle_exception_C(JavaThread* current) { SharedRuntime::_find_handler_ctr++; // find exception handler #endif debug_only(NoHandleMark __hm;) - nmethod* nm = NULL; - address handler_address = NULL; + nmethod* nm = nullptr; + address handler_address = nullptr; { // Enter the VM @@ -1495,7 +1495,7 @@ address OptoRuntime::handle_exception_C(JavaThread* current) { // Now check to see if the handler we are returning is in a now // deoptimized frame - if (nm != NULL) { + if (nm != nullptr) { RegisterMap map(current, RegisterMap::UpdateMap::skip, RegisterMap::ProcessFrames::skip, @@ -1547,7 +1547,7 @@ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address r #ifndef PRODUCT SharedRuntime::_rethrow_ctr++; // count rethrows #endif - assert (exception != NULL, "should have thrown a NULLPointerException"); + assert (exception != nullptr, "should have thrown a NullPointerException"); #ifdef ASSERT if (!(exception->is_a(vmClasses::Throwable_klass()))) { // should throw an exception here @@ -1686,7 +1686,7 @@ JRT_END //----------------------------------------------------------------------------- -NamedCounter * volatile OptoRuntime::_named_counters = NULL; +NamedCounter * volatile OptoRuntime::_named_counters = nullptr; // // dump the collected NamedCounters. @@ -1742,7 +1742,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount stringStream st; for (int depth = max_depth; depth >= 1; depth--) { JVMState* jvms = youngest_jvms->of_depth(depth); - ciMethod* m = jvms->has_method() ? jvms->method() : NULL; + ciMethod* m = jvms->has_method() ? jvms->method() : nullptr; if (!first) { st.print(" "); } else { @@ -1750,7 +1750,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount } int bci = jvms->bci(); if (bci < 0) bci = 0; - if (m != NULL) { + if (m != nullptr) { st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); } else { st.print("no method"); @@ -1769,7 +1769,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount // add counters so this is safe. NamedCounter* head; do { - c->set_next(NULL); + c->set_next(nullptr); head = _named_counters; c->set_next(head); } while (Atomic::cmpxchg(&_named_counters, head, c) != head); diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp index 6ddf50a7b6d..53bb3737902 100644 --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,13 +73,13 @@ private: public: NamedCounter(const char *n, CounterTag tag = NoTag): - _name(n == NULL ? NULL : os::strdup(n)), + _name(n == nullptr ? nullptr : os::strdup(n)), _count(0), _tag(tag), - _next(NULL) {} + _next(nullptr) {} ~NamedCounter() { - if (_name != NULL) { + if (_name != nullptr) { os::free((void*)_name); } } @@ -92,7 +92,7 @@ private: NamedCounter* next() const { return _next; } void set_next(NamedCounter* next) { - assert(_next == NULL || next == NULL, "already set"); + assert(_next == nullptr || next == nullptr, "already set"); _next = next; } diff --git a/src/hotspot/share/opto/split_if.cpp b/src/hotspot/share/opto/split_if.cpp index 9bfb7c0c77b..d1b5608ce4f 100644 --- a/src/hotspot/share/opto/split_if.cpp +++ b/src/hotspot/share/opto/split_if.cpp @@ -102,7 +102,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { Node* m = wq.at(i); if (m->is_If()) { assert(skeleton_predicate_has_opaque(m->as_If()), "opaque node not reachable from if?"); - Node* bol = clone_skeleton_predicate_bool(m, NULL, NULL, m->in(0)); + Node* bol = clone_skeleton_predicate_bool(m, nullptr, nullptr, m->in(0)); _igvn.replace_input_of(m, 1, bol); } else { assert(!m->is_CFG(), "not CFG expected"); @@ -164,7 +164,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { // ConvI2L may have type information on it which becomes invalid if // it moves up in the graph so change any clones so widen the type // to TypeLong::INT when pushing it up. - const Type* rtype = NULL; + const Type* rtype = nullptr; if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) { rtype = TypeLong::INT; } @@ -174,7 +174,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) { for( uint j = 1; j < blk1->req(); j++ ) { Node *x = n->clone(); // Widen the type of the ConvI2L when pushing up. - if (rtype != NULL) x->as_Type()->set_type(rtype); + if (rtype != nullptr) x->as_Type()->set_type(rtype); if( n->in(0) && n->in(0) == blk1 ) x->set_req( 0, blk1->in(j) ); for( uint i = 1; i < n->req(); i++ ) { @@ -550,7 +550,7 @@ Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Nod set_ctrl(use, new_true); } - if (use_blk == NULL) { // He's dead, Jim + if (use_blk == nullptr) { // He's dead, Jim _igvn.replace_node(use, C->top()); } @@ -628,7 +628,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio for (j = n->outs(); n->has_out(j); j++) { Node* m = n->out(j); // If m is dead, throw it away, and declare progress - if (_nodes[m->_idx] == NULL) { + if (_nodes[m->_idx] == nullptr) { _igvn.remove_dead_node(m); // fall through } @@ -652,9 +652,9 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio // Replace both uses of 'new_iff' with Regions merging True/False // paths. This makes 'new_iff' go dead. - Node *old_false = NULL, *old_true = NULL; - RegionNode* new_false = NULL; - RegionNode* new_true = NULL; + Node *old_false = nullptr, *old_true = nullptr; + RegionNode* new_false = nullptr; + RegionNode* new_true = nullptr; for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) { Node *ifp = iff->last_out(j2); assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" ); @@ -690,7 +690,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio // Lazy replace IDOM info with the region's dominator lazy_replace(iff, region_dom); lazy_update(region, region_dom); // idom must be update before handle_uses - region->set_req(0, NULL); // Break the self-cycle. Required for lazy_update to work on region + region->set_req(0, nullptr); // Break the self-cycle. Required for lazy_update to work on region // Now make the original merge point go dead, by handling all its uses. small_cache region_cache; @@ -735,10 +735,10 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio _igvn.remove_dead_node(region); - if (new_false_region != NULL) { + if (new_false_region != nullptr) { *new_false_region = new_false; } - if (new_true_region != NULL) { + if (new_true_region != nullptr) { *new_true_region = new_true; } diff --git a/src/hotspot/share/opto/stringopts.cpp b/src/hotspot/share/opto/stringopts.cpp index 6285d0bb82e..399f18ce9aa 100644 --- a/src/hotspot/share/opto/stringopts.cpp +++ b/src/hotspot/share/opto/stringopts.cpp @@ -66,7 +66,7 @@ class StringConcat : public ResourceObj { StringConcat(PhaseStringOpts* stringopts, CallStaticJavaNode* end): _stringopts(stringopts), - _begin(NULL), + _begin(nullptr), _end(end), _multiple(false) { _arguments = new Node(1); @@ -115,7 +115,7 @@ class StringConcat : public ResourceObj { if (call->is_CallStaticJava()) { CallStaticJavaNode* csj = call->as_CallStaticJava(); ciMethod* m = csj->method(); - if (m != NULL && + if (m != nullptr && (m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString || m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString)) { return true; @@ -127,7 +127,7 @@ class StringConcat : public ResourceObj { static Node* skip_string_null_check(Node* value) { // Look for a diamond shaped Null check of toString() result // (could be code from String.valueOf()): - // (Proj == NULL) ? "null":"CastPP(Proj)#NotNULL + // (Proj == nullptr) ? "null":"CastPP(Proj)#Notnull if (value->is_Phi()) { int true_path = value->as_Phi()->is_diamond_phi(); if (true_path != 0) { @@ -187,10 +187,10 @@ class StringConcat : public ResourceObj { void maybe_log_transform() { CompileLog* log = _stringopts->C->log(); - if (log != NULL) { + if (log != nullptr) { log->head("replace_string_concat arguments='%d' multiple='%d'", num_arguments(), _multiple); JVMState* p = _begin->jvms(); - while (p != NULL) { + while (p != nullptr) { log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); p = p->caller(); } @@ -205,7 +205,7 @@ class StringConcat : public ResourceObj { // Build a new call using the jvms state of the allocate address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type(); - const TypePtr* no_memory_effects = NULL; + const TypePtr* no_memory_effects = nullptr; Compile* C = _stringopts->C; CallStaticJavaNode* call = new CallStaticJavaNode(call_type, call_addr, "uncommon_trap", no_memory_effects); @@ -317,22 +317,22 @@ void StringConcat::eliminate_call(CallNode* call) { Compile* C = _stringopts->C; CallProjections projs; call->extract_projections(&projs, false); - if (projs.fallthrough_catchproj != NULL) { + if (projs.fallthrough_catchproj != nullptr) { C->gvn_replace_by(projs.fallthrough_catchproj, call->in(TypeFunc::Control)); } - if (projs.fallthrough_memproj != NULL) { + if (projs.fallthrough_memproj != nullptr) { C->gvn_replace_by(projs.fallthrough_memproj, call->in(TypeFunc::Memory)); } - if (projs.catchall_memproj != NULL) { + if (projs.catchall_memproj != nullptr) { C->gvn_replace_by(projs.catchall_memproj, C->top()); } - if (projs.fallthrough_ioproj != NULL) { + if (projs.fallthrough_ioproj != nullptr) { C->gvn_replace_by(projs.fallthrough_ioproj, call->in(TypeFunc::I_O)); } - if (projs.catchall_ioproj != NULL) { + if (projs.catchall_ioproj != nullptr) { C->gvn_replace_by(projs.catchall_ioproj, C->top()); } - if (projs.catchall_catchproj != NULL) { + if (projs.catchall_catchproj != nullptr) { // EA can't cope with the partially collapsed graph this // creates so put it on the worklist to be collapsed later. for (SimpleDUIterator i(projs.catchall_catchproj); i.has_next(); i.next()) { @@ -344,7 +344,7 @@ void StringConcat::eliminate_call(CallNode* call) { } C->gvn_replace_by(projs.catchall_catchproj, C->top()); } - if (projs.resproj != NULL) { + if (projs.resproj != nullptr) { C->gvn_replace_by(projs.resproj, C->top()); } C->gvn_replace_by(call, C->top()); @@ -357,11 +357,11 @@ void StringConcat::eliminate_initialize(InitializeNode* init) { assert(init->outcnt() <= 2, "only a control and memory projection expected"); assert(init->req() <= InitializeNode::RawStores, "no pending inits"); Node *ctrl_proj = init->proj_out_or_null(TypeFunc::Control); - if (ctrl_proj != NULL) { + if (ctrl_proj != nullptr) { C->gvn_replace_by(ctrl_proj, init->in(TypeFunc::Control)); } Node *mem_proj = init->proj_out_or_null(TypeFunc::Memory); - if (mem_proj != NULL) { + if (mem_proj != nullptr) { Node *mem = init->in(TypeFunc::Memory); C->gvn_replace_by(mem_proj, mem); } @@ -378,7 +378,7 @@ Node_List PhaseStringOpts::collect_toString_calls() { // Prime the worklist for (uint i = 1; i < C->root()->len(); i++) { Node* n = C->root()->in(i); - if (n != NULL && !_visited.test_set(n->_idx)) { + if (n != nullptr && !_visited.test_set(n->_idx)) { worklist.push(n); } } @@ -391,12 +391,12 @@ Node_List PhaseStringOpts::collect_toString_calls() { string_calls.push(csj); encountered++; } - if (ctrl->in(0) != NULL && !_visited.test_set(ctrl->in(0)->_idx)) { + if (ctrl->in(0) != nullptr && !_visited.test_set(ctrl->in(0)->_idx)) { worklist.push(ctrl->in(0)); } if (ctrl->is_Region()) { for (uint i = 1; i < ctrl->len(); i++) { - if (ctrl->in(i) != NULL && !_visited.test_set(ctrl->in(i)->_idx)) { + if (ctrl->in(i) != nullptr && !_visited.test_set(ctrl->in(i)->_idx)) { worklist.push(ctrl->in(i)); } } @@ -444,7 +444,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { int_sig = ciSymbols::int_StringBuffer_signature(); char_sig = ciSymbols::char_StringBuffer_signature(); } else { - return NULL; + return nullptr; } #ifndef PRODUCT if (PrintOptimizeStringConcat) { @@ -454,7 +454,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { #endif StringConcat* sc = new StringConcat(this, call); - AllocateNode* alloc = NULL; + AllocateNode* alloc = nullptr; // possible opportunity for StringBuilder fusion CallStaticJavaNode* cnode = call; @@ -464,14 +464,14 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { recv = recv->in(0); } cnode = recv->isa_CallStaticJava(); - if (cnode == NULL) { + if (cnode == nullptr) { alloc = recv->isa_Allocate(); - if (alloc == NULL) { + if (alloc == nullptr) { break; } // Find the constructor call Node* result = alloc->result_cast(); - if (result == NULL || !result->is_CheckCastPP() || alloc->in(TypeFunc::Memory)->is_top()) { + if (result == nullptr || !result->is_CheckCastPP() || alloc->in(TypeFunc::Memory)->is_top()) { // strange looking allocation #ifndef PRODUCT if (PrintOptimizeStringConcat) { @@ -481,11 +481,11 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { #endif break; } - Node* constructor = NULL; + Node* constructor = nullptr; for (SimpleDUIterator i(result); i.has_next(); i.next()) { CallStaticJavaNode *use = i.get()->isa_CallStaticJava(); - if (use != NULL && - use->method() != NULL && + if (use != nullptr && + use->method() != nullptr && !use->method()->is_static() && use->method()->name() == ciSymbols::object_initializer_name() && use->method()->holder() == m->holder()) { @@ -496,7 +496,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { sig == ciSymbols::string_void_signature()) { if (sig == ciSymbols::string_void_signature()) { // StringBuilder(String) so pick this up as the first argument - assert(use->in(TypeFunc::Parms + 1) != NULL, "what?"); + assert(use->in(TypeFunc::Parms + 1) != nullptr, "what?"); const Type* type = _gvn->type(use->in(TypeFunc::Parms + 1)); if (type == TypePtr::NULL_PTR) { // StringBuilder(null) throws exception. @@ -507,14 +507,14 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { tty->cr(); } #endif - return NULL; + return nullptr; } // StringBuilder(str) argument needs null check. sc->push_string_null_check(use->in(TypeFunc::Parms + 1)); } else if (sig == ciSymbols::int_void_signature()) { // StringBuilder(int) case. Node* parm = use->in(TypeFunc::Parms + 1); - assert(parm != NULL, "must exist"); + assert(parm != nullptr, "must exist"); const TypeInt* type = _gvn->type(parm)->is_int(); if (type->_hi < 0) { // Initial capacity argument is always negative in which case StringBuilder(int) throws @@ -527,7 +527,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { tty->cr(); } #endif - return NULL; + return nullptr; } else if (type->_lo < 0) { // Argument could be negative: We need a runtime check to throw NegativeArraySizeException in that case. sc->push_negative_int_check(parm); @@ -546,7 +546,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { break; } } - if (constructor == NULL) { + if (constructor == nullptr) { // couldn't find constructor #ifndef PRODUCT if (PrintOptimizeStringConcat) { @@ -567,9 +567,9 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { if (sc->validate_control_flow() && sc->validate_mem_flow()) { return sc; } else { - return NULL; + return nullptr; } - } else if (cnode->method() == NULL) { + } else if (cnode->method() == nullptr) { break; } else if (!cnode->method()->is_static() && cnode->method()->holder() == m->holder() && @@ -579,7 +579,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { cnode->method()->signature()->as_symbol() == int_sig)) { sc->add_control(cnode); Node* arg = cnode->in(TypeFunc::Parms + 1); - if (arg == NULL || arg->is_top()) { + if (arg == nullptr || arg->is_top()) { #ifndef PRODUCT if (PrintOptimizeStringConcat) { tty->print("giving up because the call is effectively dead"); @@ -595,7 +595,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { } else { if (arg->is_Proj() && arg->in(0)->is_CallStaticJava()) { CallStaticJavaNode* csj = arg->in(0)->as_CallStaticJava(); - if (csj->method() != NULL && + if (csj->method() != nullptr && csj->method()->intrinsic_id() == vmIntrinsics::_Integer_toString && arg->outcnt() == 1) { // _control is the list of StringBuilder calls nodes which @@ -627,7 +627,7 @@ StringConcat* PhaseStringOpts::build_candidate(CallStaticJavaNode* call) { break; } } - return NULL; + return nullptr; } @@ -650,7 +650,7 @@ PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn): Node_List toStrings = collect_toString_calls(); while (toStrings.size() > 0) { StringConcat* sc = build_candidate(toStrings.pop()->as_CallStaticJava()); - if (sc != NULL) { + if (sc != nullptr) { concats.push(sc); } } @@ -778,7 +778,7 @@ bool StringConcat::validate_mem_flow() { if (!_constructors.contains(curr)) { NOT_PRODUCT(path.push(curr);) Node* mem = curr->in(TypeFunc::Memory); - assert(mem != NULL, "calls should have memory edge"); + assert(mem != nullptr, "calls should have memory edge"); assert(!mem->is_Phi(), "should be handled by control flow validation"); NOT_PRODUCT(path.push(mem);) while (mem->is_MergeMem()) { @@ -835,8 +835,8 @@ bool StringConcat::validate_mem_flow() { assert(curr->is_Call(), "constructor should be a call"); // Go up the control starting from the constructor call Node* ctrl = curr->in(0); - IfNode* iff = NULL; - RegionNode* copy = NULL; + IfNode* iff = nullptr; + RegionNode* copy = nullptr; while (true) { // skip known check patterns @@ -846,10 +846,10 @@ bool StringConcat::validate_mem_flow() { ctrl = copy->is_copy(); } else { // a cast assert(ctrl->req() == 3 && - ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() && - ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() && + ctrl->in(1) != nullptr && ctrl->in(1)->is_Proj() && + ctrl->in(2) != nullptr && ctrl->in(2)->is_Proj() && ctrl->in(1)->in(0) == ctrl->in(2)->in(0) && - ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(), + ctrl->in(1)->in(0) != nullptr && ctrl->in(1)->in(0)->is_If(), "must be a simple diamond"); Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2); for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) { @@ -933,7 +933,7 @@ bool StringConcat::validate_control_flow() { ctrl_path.push(cn->proj_out(0)); ctrl_path.push(cn->proj_out(0)->unique_out()); Node* catchproj = cn->proj_out(0)->unique_out()->as_Catch()->proj_out_or_null(0); - if (catchproj != NULL) { + if (catchproj != nullptr) { ctrl_path.push(catchproj); } } else { @@ -954,7 +954,7 @@ bool StringConcat::validate_control_flow() { IfNode* iff = ptr->in(0)->as_If(); BoolNode* b = iff->in(1)->isa_Bool(); - if (b == NULL) { + if (b == nullptr) { #ifndef PRODUCT if (PrintOptimizeStringConcat) { tty->print_cr("unexpected input to IfNode"); @@ -975,11 +975,11 @@ bool StringConcat::validate_control_flow() { if (b->_test._test == BoolTest::ne && v2->bottom_type() == TypePtr::NULL_PTR && v1->is_Proj() && ctrl_path.member(v1->in(0))) { - // NULL check of the return value of the append + // null check of the return value of the append null_check_count++; if (otherproj->outcnt() == 1) { CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava(); - if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) { + if (call != nullptr && call->_name != nullptr && strcmp(call->_name, "uncommon_trap") == 0) { ctrl_path.push(call); } } @@ -993,7 +993,7 @@ bool StringConcat::validate_control_flow() { // at the beginning. if (otherproj->outcnt() == 1) { CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava(); - if (call != NULL && call->_name != NULL && strcmp(call->_name, "uncommon_trap") == 0) { + if (call != nullptr && call->_name != nullptr && strcmp(call->_name, "uncommon_trap") == 0) { // control flow leads to uct so should be ok _uncommon_traps.push(call); ctrl_path.push(call); @@ -1034,15 +1034,15 @@ bool StringConcat::validate_control_flow() { ptr = ptr->in(0)->in(0); } else if (ptr->is_Region()) { Node* copy = ptr->as_Region()->is_copy(); - if (copy != NULL) { + if (copy != nullptr) { ptr = copy; continue; } if (ptr->req() == 3 && - ptr->in(1) != NULL && ptr->in(1)->is_Proj() && - ptr->in(2) != NULL && ptr->in(2)->is_Proj() && + ptr->in(1) != nullptr && ptr->in(1)->is_Proj() && + ptr->in(2) != nullptr && ptr->in(2)->is_Proj() && ptr->in(1)->in(0) == ptr->in(2)->in(0) && - ptr->in(1)->in(0) != NULL && ptr->in(1)->in(0)->is_If()) { + ptr->in(1)->in(0) != nullptr && ptr->in(1)->in(0)->is_If()) { // Simple diamond. // XXX should check for possibly merging stores. simple data merges are ok. // The IGVN will make this simple diamond go away when it @@ -1095,16 +1095,16 @@ bool StringConcat::validate_control_flow() { Node* final_result = _end->proj_out_or_null(TypeFunc::Parms); for (uint i = 0; i < _control.size(); i++) { CallNode* cnode = _control.at(i)->isa_Call(); - if (cnode != NULL) { + if (cnode != nullptr) { _stringopts->_visited.test_set(cnode->_idx); } - Node* result = cnode != NULL ? cnode->proj_out_or_null(TypeFunc::Parms) : NULL; - if (result != NULL && result != final_result) { + Node* result = cnode != nullptr ? cnode->proj_out_or_null(TypeFunc::Parms) : nullptr; + if (result != nullptr && result != final_result) { worklist.push(result); } } - Node* last_result = NULL; + Node* last_result = nullptr; while (worklist.size() > 0) { Node* result = worklist.pop(); if (_stringopts->_visited.test_set(result->_idx)) @@ -1450,7 +1450,7 @@ void PhaseStringOpts::arraycopy(GraphKit& kit, IdealKit& ideal, Node* src_array, count = __ RShiftI(count, __ intcon(1)); } - Node* extra = NULL; + Node* extra = nullptr; #ifdef _LP64 count = __ ConvI2L(count); extra = C->top(); @@ -1627,7 +1627,7 @@ Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* dst_array, No // Compress copy the char into dst_array at index start. Node* PhaseStringOpts::copy_char(GraphKit& kit, Node* val, Node* dst_array, Node* dst_coder, Node* start) { - bool dcon = (dst_coder != NULL) && dst_coder->is_Con(); + bool dcon = (dst_coder != nullptr) && dst_coder->is_Con(); bool dbyte = dcon ? (dst_coder->get_int() == java_lang_String::CODER_LATIN1) : false; IdealKit ideal(&kit, true, true); @@ -1663,11 +1663,11 @@ Node* PhaseStringOpts::copy_char(GraphKit& kit, Node* val, Node* dst_array, Node // Allocate a byte array of specified length. Node* PhaseStringOpts::allocate_byte_array(GraphKit& kit, IdealKit* ideal, Node* length) { - if (ideal != NULL) { + if (ideal != nullptr) { // Sync IdealKit and graphKit. kit.sync_kit(*ideal); } - Node* byte_array = NULL; + Node* byte_array = nullptr; { PreserveReexecuteState preexecs(&kit); // The original jvms is for an allocation of either a String or @@ -1684,7 +1684,7 @@ Node* PhaseStringOpts::allocate_byte_array(GraphKit& kit, IdealKit* ideal, Node* AllocateArrayNode* byte_alloc = AllocateArrayNode::Ideal_array_allocation(byte_array, _gvn); byte_alloc->maybe_set_complete(_gvn); - if (ideal != NULL) { + if (ideal != nullptr) { // Sync IdealKit and graphKit. ideal->sync_kit(&kit); } @@ -1834,8 +1834,8 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { } case StringConcat::StringMode: { const Type* type = kit.gvn().type(arg); - Node* count = NULL; - Node* arg_coder = NULL; + Node* count = nullptr; + Node* arg_coder = nullptr; if (type == TypePtr::NULL_PTR) { // replace the argument with the null checked version arg = null_string; @@ -1893,7 +1893,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { coder = __ OrI(coder, arg_coder); } length = __ AddI(length, count); - string_sizes->init_req(argi, NULL); + string_sizes->init_req(argi, nullptr); break; } case StringConcat::CharMode: { @@ -1956,7 +1956,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { assert(CompactStrings || (coder->is_Con() && coder->get_int() == java_lang_String::CODER_UTF16), "Result string must be UTF16 encoded if CompactStrings is disabled"); - Node* dst_array = NULL; + Node* dst_array = nullptr; if (sc->num_arguments() == 1 && (sc->mode(0) == StringConcat::StringMode || sc->mode(0) == StringConcat::StringNullCheckMode)) { @@ -1965,7 +1965,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { dst_array = kit.load_String_value(sc->argument(0), true); } else { // Allocate destination byte array according to coder - dst_array = allocate_byte_array(kit, NULL, __ LShiftI(length, coder)); + dst_array = allocate_byte_array(kit, nullptr, __ LShiftI(length, coder)); // Now copy the string representations into the final byte[] Node* start = __ intcon(0); @@ -2009,7 +2009,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { // The value field is final. Emit a barrier here to ensure that the effect // of the initialization is committed to memory before any code publishes // a reference to the newly constructed object (see Parse::do_exits()). - assert(AllocateNode::Ideal_allocation(result, _gvn) != NULL, "should be newly allocated"); + assert(AllocateNode::Ideal_allocation(result, _gvn) != nullptr, "should be newly allocated"); kit.insert_mem_bar(Op_MemBarRelease, result); } else { result = C->top(); diff --git a/src/hotspot/share/opto/subnode.cpp b/src/hotspot/share/opto/subnode.cpp index 285cd388aec..a0811c926d7 100644 --- a/src/hotspot/share/opto/subnode.cpp +++ b/src/hotspot/share/opto/subnode.cpp @@ -94,12 +94,12 @@ const Type* SubNode::Value_common(PhaseTransform *phase) const { if( t1 == Type::BOTTOM || t2 == Type::BOTTOM ) return bottom_type(); - return NULL; + return nullptr; } const Type* SubNode::Value(PhaseGVN* phase) const { const Type* t = Value_common(phase); - if (t != NULL) { + if (t != nullptr) { return t; } const Type* t1 = phase->type(in(1)); @@ -117,7 +117,7 @@ SubNode* SubNode::make(Node* in1, Node* in2, BasicType bt) { default: fatal("Not implemented for %s", type2name(bt)); } - return NULL; + return nullptr; } //============================================================================= @@ -170,7 +170,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){ #endif const Type *t2 = phase->type( in2 ); - if( t2 == Type::TOP ) return NULL; + if( t2 == Type::TOP ) return nullptr; // Convert "x-c0" into "x+ -c0". if( t2->base() == Type::Int ){ // Might be bottom or top... const TypeInt *i = t2->is_int(); @@ -211,7 +211,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){ } const Type *t1 = phase->type( in1 ); - if( t1 == Type::TOP ) return NULL; + if( t1 == Type::TOP ) return nullptr; #ifdef ASSERT // Check for dead loop @@ -271,9 +271,9 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){ // Associative if (op1 == Op_MulI && op2 == Op_MulI) { - Node* sub_in1 = NULL; - Node* sub_in2 = NULL; - Node* mul_in = NULL; + Node* sub_in1 = nullptr; + Node* sub_in2 = nullptr; + Node* mul_in = nullptr; if (in1->in(1) == in2->in(1)) { // Convert "a*b-a*c into a*(b-c) @@ -297,7 +297,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){ mul_in = in1->in(1); } - if (mul_in != NULL) { + if (mul_in != nullptr) { Node* sub = phase->transform(new SubINode(sub_in1, sub_in2)); return new MulINode(mul_in, sub); } @@ -315,7 +315,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){ } } - return NULL; + return nullptr; } //------------------------------sub-------------------------------------------- @@ -355,7 +355,7 @@ Node *SubLNode::Ideal(PhaseGVN *phase, bool can_reshape) { } #endif - if( phase->type( in2 ) == Type::TOP ) return NULL; + if( phase->type( in2 ) == Type::TOP ) return nullptr; const TypeLong *i = phase->type( in2 )->isa_long(); // Convert "x-c0" into "x+ -c0". if( i && // Might be bottom or top... @@ -395,7 +395,7 @@ Node *SubLNode::Ideal(PhaseGVN *phase, bool can_reshape) { } const Type *t1 = phase->type( in1 ); - if( t1 == Type::TOP ) return NULL; + if( t1 == Type::TOP ) return nullptr; #ifdef ASSERT // Check for dead loop @@ -448,9 +448,9 @@ Node *SubLNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Associative if (op1 == Op_MulL && op2 == Op_MulL) { - Node* sub_in1 = NULL; - Node* sub_in2 = NULL; - Node* mul_in = NULL; + Node* sub_in1 = nullptr; + Node* sub_in2 = nullptr; + Node* mul_in = nullptr; if (in1->in(1) == in2->in(1)) { // Convert "a*b-a*c into a*(b+c) @@ -474,7 +474,7 @@ Node *SubLNode::Ideal(PhaseGVN *phase, bool can_reshape) { mul_in = in1->in(1); } - if (mul_in != NULL) { + if (mul_in != nullptr) { Node* sub = phase->transform(new SubLNode(sub_in1, sub_in2)); return new MulLNode(mul_in, sub); } @@ -492,7 +492,7 @@ Node *SubLNode::Ideal(PhaseGVN *phase, bool can_reshape) { } } - return NULL; + return nullptr; } //------------------------------sub-------------------------------------------- @@ -556,7 +556,7 @@ Node *SubFNode::Ideal(PhaseGVN *phase, bool can_reshape) { //if( phase->type(in(1)) == TypeF::ZERO ) //return new (phase->C, 2) NegFNode(in(2)); - return NULL; + return nullptr; } //------------------------------sub-------------------------------------------- @@ -591,7 +591,7 @@ Node *SubDNode::Ideal(PhaseGVN *phase, bool can_reshape){ //if( phase->type(in(1)) == TypeD::ZERO ) //return new (phase->C, 2) NegDNode(in(2)); - return NULL; + return nullptr; } //------------------------------sub-------------------------------------------- @@ -636,7 +636,7 @@ CmpNode *CmpNode::make(Node *in1, Node *in2, BasicType bt, bool unsigned_comp) { default: fatal("Not implemented for %s", type2name(bt)); } - return NULL; + return nullptr; } //============================================================================= @@ -676,20 +676,20 @@ const Type* CmpINode::Value(PhaseGVN* phase) const { // - the post loop is initially not reachable but as long as there's a main loop, the zero trip guard for the post // loop takes a phi that merges the pre and main loop's iv and can't constant fold the zero trip guard. Once, the main // loop is removed, there's no need to preserve the zero trip guard for the post loop anymore. - if (in1 != NULL && in2 != NULL) { + if (in1 != nullptr && in2 != nullptr) { uint input = 0; - Node* cmp = NULL; + Node* cmp = nullptr; BoolTest::mask test; if (in1->Opcode() == Op_OpaqueZeroTripGuard && phase->type(in1) != Type::TOP) { cmp = new CmpINode(in1->in(1), in2); test = ((OpaqueZeroTripGuardNode*)in1)->_loop_entered_mask; } if (in2->Opcode() == Op_OpaqueZeroTripGuard && phase->type(in2) != Type::TOP) { - assert(cmp == NULL, "A cmp with 2 OpaqueZeroTripGuard inputs"); + assert(cmp == nullptr, "A cmp with 2 OpaqueZeroTripGuard inputs"); cmp = new CmpINode(in1, in2->in(1)); test = ((OpaqueZeroTripGuardNode*)in2)->_loop_entered_mask; } - if (cmp != NULL) { + if (cmp != nullptr) { const Type* cmp_t = cmp->Value(phase); const Type* t = BoolTest(test).cc2logical(cmp_t); cmp->destruct(phase); @@ -768,7 +768,7 @@ const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const { const Type* CmpUNode::Value(PhaseGVN* phase) const { const Type* t = SubNode::Value_common(phase); - if (t != NULL) { + if (t != nullptr) { return t; } const Node* in1 = in(1); @@ -871,7 +871,7 @@ Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) { // This is handled (with more general cases) by Ideal_sub_algebra. } } - return NULL; // No change + return nullptr; // No change } Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) { @@ -882,7 +882,7 @@ Node *CmpLNode::Ideal( PhaseGVN *phase, bool can_reshape ) { return new CmpINode(in(1)->in(1), phase->intcon((jint)con)); } } - return NULL; + return nullptr; } //============================================================================= @@ -988,9 +988,9 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const { if (p0 && p1) { Node* in1 = in(1)->uncast(); Node* in2 = in(2)->uncast(); - AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1, NULL); - AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2, NULL); - if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, NULL)) { + AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1, nullptr); + AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2, nullptr); + if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, nullptr)) { return TypeInt::CC_GT; // different pointers } } @@ -1040,25 +1040,25 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const { static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) { // Return the klass node for (indirect load from OopHandle) // LoadBarrier?(LoadP(LoadP(AddP(foo:Klass, #java_mirror)))) - // or NULL if not matching. + // or null if not matching. BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); n = bs->step_over_gc_barrier(n); - if (n->Opcode() != Op_LoadP) return NULL; + if (n->Opcode() != Op_LoadP) return nullptr; const TypeInstPtr* tp = phase->type(n)->isa_instptr(); - if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return NULL; + if (!tp || tp->instance_klass() != phase->C->env()->Class_klass()) return nullptr; Node* adr = n->in(MemNode::Address); // First load from OopHandle: ((OopHandle)mirror)->resolve(); may need barrier. - if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return NULL; + if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return nullptr; adr = adr->in(MemNode::Address); intptr_t off = 0; Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off); - if (k == NULL) return NULL; + if (k == nullptr) return nullptr; const TypeKlassPtr* tkp = phase->type(k)->isa_klassptr(); - if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return NULL; + if (!tkp || off != in_bytes(Klass::java_mirror_offset())) return nullptr; // We've found the klass node of a Java mirror load. return k; @@ -1066,19 +1066,19 @@ static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) { static inline Node* isa_const_java_mirror(PhaseGVN* phase, Node* n) { // for ConP(Foo.class) return ConP(Foo.klass) - // otherwise return NULL - if (!n->is_Con()) return NULL; + // otherwise return null + if (!n->is_Con()) return nullptr; const TypeInstPtr* tp = phase->type(n)->isa_instptr(); - if (!tp) return NULL; + if (!tp) return nullptr; ciType* mirror_type = tp->java_mirror_type(); - // TypeInstPtr::java_mirror_type() returns non-NULL for compile- + // TypeInstPtr::java_mirror_type() returns non-null for compile- // time Class constants only. - if (!mirror_type) return NULL; + if (!mirror_type) return nullptr; // x.getClass() == int.class can never be true (for all primitive types) - // Return a ConP(NULL) node for this case. + // Return a ConP(null) node for this case. if (mirror_type->is_classless()) { return phase->makecon(TypePtr::NULL_PTR); } @@ -1115,7 +1115,7 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { if (k1 && (k2 || conk2)) { Node* lhs = k1; - Node* rhs = (k2 != NULL) ? k2 : conk2; + Node* rhs = (k2 != nullptr) ? k2 : conk2; set_req_X(1, lhs, phase); set_req_X(2, rhs, phase); return this; @@ -1124,8 +1124,8 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { // Constant pointer on right? const TypeKlassPtr* t2 = phase->type(in(2))->isa_klassptr(); - if (t2 == NULL || !t2->klass_is_exact()) - return NULL; + if (t2 == nullptr || !t2->klass_is_exact()) + return nullptr; // Get the constant klass we are comparing to. ciKlass* superklass = t2->exact_klass(); @@ -1134,15 +1134,15 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { if (ldk1->is_DecodeNKlass()) { ldk1 = ldk1->in(1); if (ldk1->Opcode() != Op_LoadNKlass ) - return NULL; + return nullptr; } else if (ldk1->Opcode() != Op_LoadKlass ) - return NULL; + return nullptr; // Take apart the address of the LoadKlass: Node* adr1 = ldk1->in(MemNode::Address); intptr_t con2 = 0; Node* ldk2 = AddPNode::Ideal_base_and_offset(adr1, phase, con2); - if (ldk2 == NULL) - return NULL; + if (ldk2 == nullptr) + return nullptr; if (con2 == oopDesc::klass_offset_in_bytes()) { // We are inspecting an object's concrete class. // Short-circuit the check if the query is abstract. @@ -1159,13 +1159,13 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { if (ldk2->is_DecodeNKlass()) { // Keep ldk2 as DecodeN since it could be used in CmpP below. if (ldk2->in(1)->Opcode() != Op_LoadNKlass ) - return NULL; + return nullptr; } else if (ldk2->Opcode() != Op_LoadKlass) - return NULL; + return nullptr; // Verify that we understand the situation if (con2 != (intptr_t) superklass->super_check_offset()) - return NULL; // Might be element-klass loading from array klass + return nullptr; // Might be element-klass loading from array klass // If 'superklass' has no subklasses and is not an interface, then we are // assured that the only input which will pass the type check is @@ -1183,7 +1183,7 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { } if (superklass->is_instance_klass()) { ciInstanceKlass* ik = superklass->as_instance_klass(); - if (ik->has_subklass() || ik->is_interface()) return NULL; + if (ik->has_subklass() || ik->is_interface()) return nullptr; // Add a dependency if there is a chance that a subclass will be added later. if (!ik->is_final()) { phase->C->dependencies()->assert_leaf_type(ik); @@ -1207,7 +1207,7 @@ const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const { //------------------------------Ideal------------------------------------------ Node *CmpNNode::Ideal( PhaseGVN *phase, bool can_reshape ) { - return NULL; + return nullptr; } //============================================================================= @@ -1307,7 +1307,7 @@ Node *CmpDNode::Ideal(PhaseGVN *phase, bool can_reshape){ } // Testing value required the precision of a double } - return NULL; // No change + return nullptr; // No change } @@ -1398,7 +1398,7 @@ Node* BoolNode::make_predicate(Node* test_value, PhaseGVN* phase) { //--------------------------------as_int_value--------------------------------- Node* BoolNode::as_int_value(PhaseGVN* phase) { // Inverse to make_predicate. The CMove probably boils down to a Conv2B. - Node* cmov = CMoveNode::make(NULL, this, + Node* cmov = CMoveNode::make(nullptr, this, phase->intcon(0), phase->intcon(1), TypeInt::BOOL); return phase->transform(cmov); @@ -1420,8 +1420,8 @@ Node* BoolNode::fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op, // Skip cases were inputs of add/sub are not integers or of bottom type const TypeInt* r0 = phase->type(cmp1->in(1))->isa_int(); const TypeInt* r1 = phase->type(cmp1->in(2))->isa_int(); - if ((r0 != NULL) && (r0 != TypeInt::INT) && - (r1 != NULL) && (r1 != TypeInt::INT) && + if ((r0 != nullptr) && (r0 != TypeInt::INT) && + (r1 != nullptr) && (r1 != TypeInt::INT) && (cmp2_type != TypeInt::INT)) { // Compute exact (long) type range of add/sub result jlong lo_long = r0->_lo; @@ -1455,14 +1455,14 @@ Node* BoolNode::fold_cmpI(PhaseGVN* phase, SubNode* cmp, Node* cmp1, int cmp_op, } } } - return NULL; + return nullptr; } static bool is_counted_loop_cmp(Node *cmp) { Node *n = cmp->in(1)->in(1); - return n != NULL && + return n != nullptr && n->is_Phi() && - n->in(0) != NULL && + n->in(0) != nullptr && n->in(0)->is_CountedLoop() && n->in(0)->as_CountedLoop()->phi() == n; } @@ -1472,18 +1472,18 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Change "bool tst (cmp con x)" into "bool ~tst (cmp x con)". // This moves the constant to the right. Helps value-numbering. Node *cmp = in(1); - if( !cmp->is_Sub() ) return NULL; + if( !cmp->is_Sub() ) return nullptr; int cop = cmp->Opcode(); if( cop == Op_FastLock || cop == Op_FastUnlock || cmp->is_SubTypeCheck() || cop == Op_VectorTest ) { - return NULL; + return nullptr; } Node *cmp1 = cmp->in(1); Node *cmp2 = cmp->in(2); - if( !cmp1 ) return NULL; + if( !cmp1 ) return nullptr; if (_test._test == BoolTest::overflow || _test._test == BoolTest::no_overflow) { - return NULL; + return nullptr; } const int cmp1_op = cmp1->Opcode(); @@ -1588,7 +1588,7 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) { // The XOR-1 is an idiom used to flip the sense of a bool. We flip the // test instead. const TypeInt* cmp2_type = phase->type(cmp2)->isa_int(); - if (cmp2_type == NULL) return NULL; + if (cmp2_type == nullptr) return nullptr; Node* j_xor = cmp1; if( cmp2_type == TypeInt::ZERO && cmp1_op == Op_XorI && @@ -1605,7 +1605,7 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) { // Same with ((x & m) u< m+1) and ((m & x) u< m+1) if (cop == Op_CmpU && cmp1_op == Op_AndI) { - Node* bound = NULL; + Node* bound = nullptr; if (_test._test == BoolTest::le) { bound = cmp2; } else if (_test._test == BoolTest::lt && @@ -1701,7 +1701,7 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) { if ((_test._test == BoolTest::eq || _test._test == BoolTest::ne) && cop == Op_CmpI && cmp1_op == Op_AddI && - cmp1->in(2) != NULL && + cmp1->in(2) != nullptr && phase->type(cmp1->in(2))->isa_int() && phase->type(cmp1->in(2))->is_int()->is_con() && cmp2_type == TypeInt::ZERO && @@ -1774,18 +1774,18 @@ Node *BoolNode::Ideal(PhaseGVN *phase, bool can_reshape) { // // counter. If they use the PRE-incremented counter, then the counter has // // to be incremented in a private block on a loop backedge. // if( du && du->cnt(this) && du->out(this)[0]->Opcode() == Op_CountedLoopEnd ) - // return NULL; + // return nullptr; // #ifndef PRODUCT // // Do not do this in a wash GVN pass during verification. // // Gets triggered by too many simple optimizations to be bothered with // // re-trying it again and again. - // if( !phase->allow_progress() ) return NULL; + // if( !phase->allow_progress() ) return nullptr; // #endif // // Not valid for unsigned compare because of corner cases in involving zero. // // For example, replacing "X-1 Opcode() == Op_CmpU ) return NULL; + // if( cmp->Opcode() == Op_CmpU ) return nullptr; // int cmp2_op = cmp2->Opcode(); // if( _test._test == BoolTest::le ) { // if( cmp1_op == Op_AddI && @@ -1888,7 +1888,7 @@ Node* AbsNode::Ideal(PhaseGVN* phase, bool can_reshape) { set_req_X(1, in1->in(2), phase); return this; } - return NULL; + return nullptr; } //============================================================================= diff --git a/src/hotspot/share/opto/subnode.hpp b/src/hotspot/share/opto/subnode.hpp index 8a7bd72b170..986327e5776 100644 --- a/src/hotspot/share/opto/subnode.hpp +++ b/src/hotspot/share/opto/subnode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -250,7 +250,7 @@ class CmpFNode : public CmpNode { public: CmpFNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {} virtual int Opcode() const; - virtual const Type *sub( const Type *, const Type * ) const { ShouldNotReachHere(); return NULL; } + virtual const Type *sub( const Type *, const Type * ) const { ShouldNotReachHere(); return nullptr; } const Type* Value(PhaseGVN* phase) const; }; @@ -278,7 +278,7 @@ class CmpDNode : public CmpNode { public: CmpDNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {} virtual int Opcode() const; - virtual const Type *sub( const Type *, const Type * ) const { ShouldNotReachHere(); return NULL; } + virtual const Type *sub( const Type *, const Type * ) const { ShouldNotReachHere(); return nullptr; } const Type* Value(PhaseGVN* phase) const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); }; @@ -336,7 +336,7 @@ class BoolNode : public Node { int cmp1_op, const TypeInt* cmp2_type); public: const BoolTest _test; - BoolNode(Node *cc, BoolTest::mask t): Node(NULL,cc), _test(t) { + BoolNode(Node *cc, BoolTest::mask t): Node(nullptr,cc), _test(t) { init_class_id(Class_Bool); } // Convert an arbitrary int value to a Bool or other suitable predicate. @@ -512,7 +512,7 @@ class SqrtFNode : public Node { public: SqrtFNode(Compile* C, Node *c, Node *in1) : Node(c, in1) { init_flags(Flag_is_expensive); - if (c != NULL) { + if (c != nullptr) { // Treat node only as expensive if a control input is set because it might // be created from a SqrtDNode in ConvD2FNode::Ideal() that was found to // be unique and therefore has no control input. diff --git a/src/hotspot/share/opto/subtypenode.cpp b/src/hotspot/share/opto/subtypenode.cpp index 6f5aa54c318..1e8265f3a5a 100644 --- a/src/hotspot/share/opto/subtypenode.cpp +++ b/src/hotspot/share/opto/subtypenode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,7 +46,7 @@ const Type* SubTypeCheckNode::sub(const Type* sub_t, const Type* super_t) const } } - if (subk != NULL) { + if (subk != nullptr) { switch (Compile::current()->static_subtype_check(superk, subk, false)) { case Compile::SSC_always_false: return TypeInt::CC_GT; @@ -67,9 +67,9 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { Node* obj_or_subklass = in(ObjOrSubKlass); Node* superklass = in(SuperKlass); - if (obj_or_subklass == NULL || - superklass == NULL) { - return NULL; + if (obj_or_subklass == nullptr || + superklass == nullptr) { + return nullptr; } const Type* sub_t = phase->type(obj_or_subklass); @@ -77,12 +77,12 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (!super_t->isa_klassptr() || (!sub_t->isa_klassptr() && !sub_t->isa_oopptr())) { - return NULL; + return nullptr; } - Node* addr = NULL; + Node* addr = nullptr; if (obj_or_subklass->is_DecodeNKlass()) { - if (obj_or_subklass->in(1) != NULL && + if (obj_or_subklass->in(1) != nullptr && obj_or_subklass->in(1)->Opcode() == Op_LoadNKlass) { addr = obj_or_subklass->in(1)->in(MemNode::Address); } @@ -90,10 +90,10 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { addr = obj_or_subklass->in(MemNode::Address); } - if (addr != NULL) { + if (addr != nullptr) { intptr_t con = 0; Node* obj = AddPNode::Ideal_base_and_offset(addr, phase, con); - if (con == oopDesc::klass_offset_in_bytes() && obj != NULL) { + if (con == oopDesc::klass_offset_in_bytes() && obj != nullptr) { assert(is_oop(phase, obj), "only for oop input"); set_req_X(ObjOrSubKlass, obj, phase); return this; @@ -102,7 +102,7 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { // AllocateNode might have more accurate klass input Node* allocated_klass = AllocateNode::Ideal_klass(obj_or_subklass, phase); - if (allocated_klass != NULL) { + if (allocated_klass != nullptr) { assert(is_oop(phase, obj_or_subklass), "only for oop input"); set_req_X(ObjOrSubKlass, allocated_klass, phase); return this; @@ -112,7 +112,7 @@ Node *SubTypeCheckNode::Ideal(PhaseGVN* phase, bool can_reshape) { // when possible would not constant fold better assert(verify(phase), "missing Value() optimization"); - return NULL; + return nullptr; } #ifdef ASSERT @@ -166,7 +166,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { const TypeKlassPtr* superk = super_t->isa_klassptr(); const TypeKlassPtr* subk = sub_t->isa_klassptr() ? sub_t->is_klassptr() : sub_t->is_oopptr()->as_klass_type(); - if (super_t->singleton() && subk != NULL) { + if (super_t->singleton() && subk != nullptr) { const Type* cached_t = Value(phase); // cache the type to validate consistency switch (C->static_subtype_check(superk, subk)) { case Compile::SSC_easy_test: { @@ -174,7 +174,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { } case Compile::SSC_full_test: { Node* p1 = phase->transform(new AddPNode(superklass, superklass, phase->MakeConX(in_bytes(Klass::super_check_offset_offset())))); - Node* chk_off = phase->transform(new LoadINode(NULL, C->immutable_memory(), p1, phase->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); + Node* chk_off = phase->transform(new LoadINode(nullptr, C->immutable_memory(), p1, phase->type(p1)->is_ptr(), TypeInt::INT, MemNode::unordered)); record_for_cleanup(chk_off, phase); int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset()); @@ -186,7 +186,7 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { chk_off_X = phase->transform(new ConvI2LNode(chk_off_X)); #endif Node* p2 = phase->transform(new AddPNode(subklass, subklass, chk_off_X)); - Node* nkls = phase->transform(LoadKlassNode::make(*phase, NULL, C->immutable_memory(), p2, phase->type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); + Node* nkls = phase->transform(LoadKlassNode::make(*phase, nullptr, C->immutable_memory(), p2, phase->type(p2)->is_ptr(), TypeInstKlassPtr::OBJECT_OR_NULL)); return verify_helper(phase, nkls, cached_t); } @@ -206,10 +206,10 @@ bool SubTypeCheckNode::verify(PhaseGVN* phase) { Node* SubTypeCheckNode::load_klass(PhaseGVN* phase) const { Node* obj_or_subklass = in(ObjOrSubKlass); const Type* sub_t = phase->type(obj_or_subklass); - Node* subklass = NULL; + Node* subklass = nullptr; if (sub_t->isa_oopptr()) { Node* adr = phase->transform(new AddPNode(obj_or_subklass, obj_or_subklass, phase->MakeConX(oopDesc::klass_offset_in_bytes()))); - subklass = phase->transform(LoadKlassNode::make(*phase, NULL, phase->C->immutable_memory(), adr, TypeInstPtr::KLASS)); + subklass = phase->transform(LoadKlassNode::make(*phase, nullptr, phase->C->immutable_memory(), adr, TypeInstPtr::KLASS)); record_for_cleanup(subklass, phase); } else { subklass = obj_or_subklass; diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index e714e70f92a..baf880aac20 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -50,42 +50,42 @@ SuperWord::SuperWord(PhaseIdealLoop* phase) : _phase(phase), _arena(phase->C->comp_arena()), _igvn(phase->_igvn), - _packset(arena(), 8, 0, NULL), // packs for the current block + _packset(arena(), 8, 0, nullptr), // packs for the current block _bb_idx(arena(), (int)(1.10 * phase->C->unique()), 0, 0), // node idx to index in bb - _block(arena(), 8, 0, NULL), // nodes in current block - _post_block(arena(), 8, 0, NULL), // nodes common to current block which are marked as post loop vectorizable - _data_entry(arena(), 8, 0, NULL), // nodes with all inputs from outside - _mem_slice_head(arena(), 8, 0, NULL), // memory slice heads - _mem_slice_tail(arena(), 8, 0, NULL), // memory slice tails - _node_info(arena(), 8, 0, SWNodeInfo::initial), // info needed per node - _clone_map(phase->C->clone_map()), // map of nodes created in cloning - _cmovev_kit(_arena, this), // map to facilitate CMoveV creation - _align_to_ref(NULL), // memory reference to align vectors to - _disjoint_ptrs(arena(), 8, 0, OrderedPair::initial), // runtime disambiguated pointer pairs - _dg(_arena), // dependence graph - _visited(arena()), // visited node set - _post_visited(arena()), // post visited node set - _n_idx_list(arena(), 8), // scratch list of (node,index) pairs - _nlist(arena(), 8, 0, NULL), // scratch list of nodes - _stk(arena(), 8, 0, NULL), // scratch stack of nodes - _lpt(NULL), // loop tree node - _lp(NULL), // CountedLoopNode - _pre_loop_end(NULL), // Pre loop CountedLoopEndNode - _bb(NULL), // basic block - _iv(NULL), // induction var - _race_possible(false), // cases where SDMU is true - _early_return(true), // analysis evaluations routine - _do_vector_loop(phase->C->do_vector_loop()), // whether to do vectorization/simd style + _block(arena(), 8, 0, nullptr), // nodes in current block + _post_block(arena(), 8, 0, nullptr), // nodes common to current block which are marked as post loop vectorizable + _data_entry(arena(), 8, 0, nullptr), // nodes with all inputs from outside + _mem_slice_head(arena(), 8, 0, nullptr), // memory slice heads + _mem_slice_tail(arena(), 8, 0, nullptr), // memory slice tails + _node_info(arena(), 8, 0, SWNodeInfo::initial), // info needed per node + _clone_map(phase->C->clone_map()), // map of nodes created in cloning + _cmovev_kit(_arena, this), // map to facilitate CMoveV creation + _align_to_ref(nullptr), // memory reference to align vectors to + _disjoint_ptrs(arena(), 8, 0, OrderedPair::initial), // runtime disambiguated pointer pairs + _dg(_arena), // dependence graph + _visited(arena()), // visited node set + _post_visited(arena()), // post visited node set + _n_idx_list(arena(), 8), // scratch list of (node,index) pairs + _nlist(arena(), 8, 0, nullptr), // scratch list of nodes + _stk(arena(), 8, 0, nullptr), // scratch stack of nodes + _lpt(nullptr), // loop tree node + _lp(nullptr), // CountedLoopNode + _pre_loop_end(nullptr), // Pre loop CountedLoopEndNode + _bb(nullptr), // basic block + _iv(nullptr), // induction var + _race_possible(false), // cases where SDMU is true + _early_return(true), // analysis evaluations routine + _do_vector_loop(phase->C->do_vector_loop()), // whether to do vectorization/simd style _do_reserve_copy(DoReserveCopyInSuperWord), - _num_work_vecs(0), // amount of vector work we have - _num_reductions(0), // amount of reduction work we have - _ii_first(-1), // first loop generation index - only if do_vector_loop() - _ii_last(-1), // last loop generation index - only if do_vector_loop() + _num_work_vecs(0), // amount of vector work we have + _num_reductions(0), // amount of reduction work we have + _ii_first(-1), // first loop generation index - only if do_vector_loop() + _ii_last(-1), // last loop generation index - only if do_vector_loop() _ii_order(arena(), 8, 0, 0) { #ifndef PRODUCT _vector_loop_debug = 0; - if (_phase->C->method() != NULL) { + if (_phase->C->method() != nullptr) { _vector_loop_debug = phase->C->directive()->VectorizeDebugOption; } @@ -154,7 +154,7 @@ bool SuperWord::transform_loop(IdealLoopTree* lpt, bool do_optimization) { if (cl->is_main_loop()) { // Check for pre-loop ending with CountedLoopEnd(Bool(Cmp(x,Opaque1(limit)))) CountedLoopEndNode* pre_end = find_pre_loop_end(cl); - if (pre_end == NULL) { + if (pre_end == nullptr) { return false; } Node* pre_opaq1 = pre_end->limit(); @@ -294,7 +294,7 @@ void SuperWord::unrolling_analysis(int &local_loop_unroll_factor) { Node* n_ctrl = _phase->get_ctrl(adr); // save a queue of post process nodes - if (n_ctrl != NULL && lpt()->is_member(_phase->get_loop(n_ctrl))) { + if (n_ctrl != nullptr && lpt()->is_member(_phase->get_loop(n_ctrl))) { // Process the memory expression int stack_idx = 0; bool have_side_effects = true; @@ -513,7 +513,7 @@ bool SuperWord::SLP_extract() { find_adjacent_refs(); - if (align_to_ref() == NULL) { + if (align_to_ref() == nullptr) { return false; // Did not find memory reference to align vectors } @@ -608,16 +608,16 @@ void SuperWord::find_adjacent_refs() { Node_List align_to_refs; int max_idx; int best_iv_adjustment = 0; - MemNode* best_align_to_mem_ref = NULL; + MemNode* best_align_to_mem_ref = nullptr; while (memops.size() != 0) { // Find a memory reference to align to. MemNode* mem_ref = find_align_to_ref(memops, max_idx); - if (mem_ref == NULL) break; + if (mem_ref == nullptr) break; align_to_refs.push(mem_ref); int iv_adjustment = get_iv_adjustment(mem_ref); - if (best_align_to_mem_ref == NULL) { + if (best_align_to_mem_ref == nullptr) { // Set memory reference which is the best from all memory operations // to be used for alignment. The pre-loop trip count is modified to align // this reference to a vector-aligned address. @@ -626,13 +626,13 @@ void SuperWord::find_adjacent_refs() { NOT_PRODUCT(find_adjacent_refs_trace_1(best_align_to_mem_ref, best_iv_adjustment);) } - SWPointer align_to_ref_p(mem_ref, this, NULL, false); + SWPointer align_to_ref_p(mem_ref, this, nullptr, false); // Set alignment relative to "align_to_ref" for all related memory operations. for (int i = memops.size() - 1; i >= 0; i--) { MemNode* s = memops.at(i)->as_Mem(); if (isomorphic(s, mem_ref) && (!_do_vector_loop || same_origin_idx(s, mem_ref))) { - SWPointer p2(s, this, NULL, false); + SWPointer p2(s, this, nullptr, false); if (p2.comparable(align_to_ref_p)) { int align = memory_alignment(s, iv_adjustment); set_alignment(s, align); @@ -653,7 +653,7 @@ void SuperWord::find_adjacent_refs() { // iterations in pre-loop will be not enough to align it. create_pack = false; } else { - SWPointer p2(best_align_to_mem_ref, this, NULL, false); + SWPointer p2(best_align_to_mem_ref, this, nullptr, false); if (!align_to_ref_p.invar_equals(p2)) { // Do not vectorize memory accesses with different invariants // if unaligned memory accesses are not allowed. @@ -737,9 +737,9 @@ void SuperWord::find_adjacent_refs() { memops.push(s); } best_align_to_mem_ref = find_align_to_ref(memops, max_idx); - if (best_align_to_mem_ref == NULL) { + if (best_align_to_mem_ref == nullptr) { if (TraceSuperWord) { - tty->print_cr("SuperWord::find_adjacent_refs(): best_align_to_mem_ref == NULL"); + tty->print_cr("SuperWord::find_adjacent_refs(): best_align_to_mem_ref == nullptr"); } // best_align_to_mem_ref will be used for adjusting the pre-loop limit in // SuperWord::align_initial_loop_index. Find one with the biggest vector size, @@ -752,10 +752,10 @@ void SuperWord::find_adjacent_refs() { memops.remove(0); } best_align_to_mem_ref = find_align_to_ref(memops, max_idx); - assert(best_align_to_mem_ref == NULL, "sanity"); + assert(best_align_to_mem_ref == nullptr, "sanity"); best_align_to_mem_ref = memops.at(max_idx)->as_Mem(); } - assert(best_align_to_mem_ref != NULL, "sanity"); + assert(best_align_to_mem_ref != nullptr, "sanity"); } break; } @@ -804,7 +804,7 @@ MemNode* SuperWord::find_align_to_ref(Node_List &memops, int &idx) { // Count number of comparable memory ops for (uint i = 0; i < memops.size(); i++) { MemNode* s1 = memops.at(i)->as_Mem(); - SWPointer p1(s1, this, NULL, false); + SWPointer p1(s1, this, nullptr, false); // Only discard unalignable memory references if vector memory references // should be aligned on this platform. if (vectors_should_be_aligned() && !ref_is_alignable(p1)) { @@ -814,7 +814,7 @@ MemNode* SuperWord::find_align_to_ref(Node_List &memops, int &idx) { for (uint j = i+1; j < memops.size(); j++) { MemNode* s2 = memops.at(j)->as_Mem(); if (isomorphic(s1, s2)) { - SWPointer p2(s2, this, NULL, false); + SWPointer p2(s2, this, nullptr, false); if (p1.comparable(p2)) { (*cmp_ct.adr_at(i))++; (*cmp_ct.adr_at(j))++; @@ -835,7 +835,7 @@ MemNode* SuperWord::find_align_to_ref(Node_List &memops, int &idx) { if (s->is_Store()) { int vw = vector_width_in_bytes(s); assert(vw > 1, "sanity"); - SWPointer p(s, this, NULL, false); + SWPointer p(s, this, nullptr, false); if ( cmp_ct.at(j) > max_ct || (cmp_ct.at(j) == max_ct && ( vw > max_vw || @@ -858,7 +858,7 @@ MemNode* SuperWord::find_align_to_ref(Node_List &memops, int &idx) { if (s->is_Load()) { int vw = vector_width_in_bytes(s); assert(vw > 1, "sanity"); - SWPointer p(s, this, NULL, false); + SWPointer p(s, this, nullptr, false); if ( cmp_ct.at(j) > max_ct || (cmp_ct.at(j) == max_ct && ( vw > max_vw || @@ -896,7 +896,7 @@ MemNode* SuperWord::find_align_to_ref(Node_List &memops, int &idx) { #endif return memops.at(max_idx)->as_Mem(); } - return NULL; + return nullptr; } //------------------span_works_for_memory_size----------------------------- @@ -958,7 +958,7 @@ bool SuperWord::ref_is_alignable(SWPointer& p) { int vw = vector_width_in_bytes(p.mem()); assert(vw > 1, "sanity"); Node* init_nd = pre_end->init_trip(); - if (init_nd->is_Con() && p.invar() == NULL) { + if (init_nd->is_Con() && p.invar() == nullptr) { int init = init_nd->bottom_type()->is_int()->get_con(); int init_offset = init * p.scale_in_bytes() + offset; if (init_offset < 0) { // negative offset from object start? @@ -1020,7 +1020,7 @@ int SuperWord::get_vw_bytes_special(MemNode* s) { //---------------------------get_iv_adjustment--------------------------- // Calculate loop's iv adjustment for this memory ops. int SuperWord::get_iv_adjustment(MemNode* mem_ref) { - SWPointer align_to_ref_p(mem_ref, this, NULL, false); + SWPointer align_to_ref_p(mem_ref, this, nullptr, false); int offset = align_to_ref_p.offset_in_bytes(); int scale = align_to_ref_p.scale_in_bytes(); int elt_size = align_to_ref_p.memory_size(); @@ -1090,7 +1090,7 @@ void SuperWord::dependence_graph() { _dg.make_edge(_dg.root(), slice); // Create a sink for the slice - DepMem* slice_sink = _dg.make_node(NULL); + DepMem* slice_sink = _dg.make_node(nullptr); _dg.make_edge(slice_sink, _dg.tail()); // Now visit each pair of memory ops, creating the edges @@ -1101,13 +1101,13 @@ void SuperWord::dependence_graph() { if (_dg.dep(s1)->in_cnt() == 0) { _dg.make_edge(slice, s1); } - SWPointer p1(s1->as_Mem(), this, NULL, false); + SWPointer p1(s1->as_Mem(), this, nullptr, false); bool sink_dependent = true; for (int k = j - 1; k >= 0; k--) { Node* s2 = _nlist.at(k); if (s1->is_Load() && s2->is_Load()) continue; - SWPointer p2(s2->as_Mem(), this, NULL, false); + SWPointer p2(s2->as_Mem(), this, nullptr, false); int cmp = p1.cmp(p2); if (SuperWordRTDepCheck && @@ -1154,7 +1154,7 @@ void SuperWord::dependence_graph() { void SuperWord::mem_slice_preds(Node* start, Node* stop, GrowableArray &preds) { assert(preds.length() == 0, "start empty"); Node* n = start; - Node* prev = NULL; + Node* prev = nullptr; while (true) { NOT_PRODUCT( if(is_trace_mem_slice()) tty->print_cr("SuperWord::mem_slice_preds: n %d", n->_idx);) assert(in_bb(n), "must be in block"); @@ -1178,7 +1178,7 @@ void SuperWord::mem_slice_preds(Node* start, Node* stop, GrowableArray &p // StoreCM has an input edge used as a precedence edge. // Maybe an issue when oop stores are vectorized. } else { - assert(out == prev || prev == NULL, "no branches off of store slice"); + assert(out == prev || prev == nullptr, "no branches off of store slice"); } }//else }//for @@ -1256,8 +1256,8 @@ bool SuperWord::are_adjacent_refs(Node* s1, Node* s2) { if (!same_memory_slice(s1->as_Mem(), s2->as_Mem())) { return false; } - SWPointer p1(s1->as_Mem(), this, NULL, false); - SWPointer p2(s2->as_Mem(), this, NULL, false); + SWPointer p1(s1->as_Mem(), this, nullptr, false); + SWPointer p2(s2->as_Mem(), this, nullptr, false); if (p1.base() != p2.base() || !p1.comparable(p2)) return false; int diff = p2.offset_in_bytes() - p1.offset_in_bytes(); return diff == data_size(s1); @@ -1275,13 +1275,13 @@ bool SuperWord::isomorphic(Node* s1, Node* s2) { if (s1_ctrl == s2_ctrl) { return true; } else { - bool s1_ctrl_inv = ((s1_ctrl == NULL) ? true : lpt()->is_invariant(s1_ctrl)); - bool s2_ctrl_inv = ((s2_ctrl == NULL) ? true : lpt()->is_invariant(s2_ctrl)); + bool s1_ctrl_inv = ((s1_ctrl == nullptr) ? true : lpt()->is_invariant(s1_ctrl)); + bool s2_ctrl_inv = ((s2_ctrl == nullptr) ? true : lpt()->is_invariant(s2_ctrl)); // If the control nodes are not invariant for the loop, fail isomorphism test. if (!s1_ctrl_inv || !s2_ctrl_inv) { return false; } - if(s1_ctrl != NULL && s2_ctrl != NULL) { + if(s1_ctrl != nullptr && s2_ctrl != nullptr) { if (s1_ctrl->is_Proj()) { s1_ctrl = s1_ctrl->in(0); assert(lpt()->is_invariant(s1_ctrl), "must be invariant"); @@ -1404,14 +1404,14 @@ void SuperWord::set_alignment(Node* s1, Node* s2, int align) { //------------------------------data_size--------------------------- int SuperWord::data_size(Node* s) { - Node* use = NULL; //test if the node is a candidate for CMoveV optimization, then return the size of CMov + Node* use = nullptr; //test if the node is a candidate for CMoveV optimization, then return the size of CMov if (UseVectorCmov) { use = _cmovev_kit.is_Bool_candidate(s); - if (use != NULL) { + if (use != nullptr) { return data_size(use); } use = _cmovev_kit.is_Cmp_candidate(s); - if (use != NULL) { + if (use != nullptr) { return data_size(use); } } @@ -1517,8 +1517,8 @@ bool SuperWord::follow_def_uses(Node_List* p) { NOT_PRODUCT(if(is_trace_alignment()) tty->print_cr("SuperWord::follow_def_uses: s1 %d, align %d", s1->_idx, align);) int savings = -1; int num_s1_uses = 0; - Node* u1 = NULL; - Node* u2 = NULL; + Node* u1 = nullptr; + Node* u2 = nullptr; for (DUIterator_Fast imax, i = s1->fast_outs(imax); i < imax; i++) { Node* t1 = s1->fast_out(i); num_s1_uses++; @@ -1576,17 +1576,17 @@ void SuperWord::order_def_uses(Node_List* p) { } // Now find t1's packset - Node_List* p2 = NULL; + Node_List* p2 = nullptr; for (int j = 0; j < _packset.length(); j++) { p2 = _packset.at(j); Node* first = p2->at(0); if (t1 == first) { break; } - p2 = NULL; + p2 = nullptr; } // Arrange all sub components by the major component - if (p2 != NULL) { + if (p2 != nullptr) { for (uint j = 1; j < p->size(); j++) { Node* d1 = p->at(j); Node* u1 = p2->at(j); @@ -1709,17 +1709,17 @@ void SuperWord::combine_packs() { changed = false; for (int i = 0; i < _packset.length(); i++) { Node_List* p1 = _packset.at(i); - if (p1 == NULL) continue; + if (p1 == nullptr) continue; // Because of sorting we can start at i + 1 for (int j = i + 1; j < _packset.length(); j++) { Node_List* p2 = _packset.at(j); - if (p2 == NULL) continue; + if (p2 == nullptr) continue; if (i == j) continue; if (p1->at(p1->size()-1) == p2->at(0)) { for (uint k = 1; k < p2->size(); k++) { p1->push(p2->at(k)); } - _packset.at_put(j, NULL); + _packset.at_put(j, nullptr); changed = true; } } @@ -1729,7 +1729,7 @@ void SuperWord::combine_packs() { // Split packs which have size greater then max vector size. for (int i = 0; i < _packset.length(); i++) { Node_List* p1 = _packset.at(i); - if (p1 != NULL) { + if (p1 != nullptr) { uint max_vlen = max_vector_size_in_def_use_chain(p1->at(0)); // Max elements in vector assert(is_power_of_2(max_vlen), "sanity"); uint psize = p1->size(); @@ -1737,7 +1737,7 @@ void SuperWord::combine_packs() { // Skip pack which can't be vector. // case1: for(...) { a[i] = i; } elements values are different (i+x) // case2: for(...) { a[i] = b[i+1]; } can't align both, load and store - _packset.at_put(i, NULL); + _packset.at_put(i, nullptr); continue; } if (psize > max_vlen) { @@ -1750,7 +1750,7 @@ void SuperWord::combine_packs() { pack = new Node_List(); } } - _packset.at_put(i, NULL); + _packset.at_put(i, nullptr); } } } @@ -1758,7 +1758,7 @@ void SuperWord::combine_packs() { // Compress list. for (int i = _packset.length() - 1; i >= 0; i--) { Node_List* p1 = _packset.at(i); - if (p1 == NULL) { + if (p1 == nullptr) { _packset.remove_at(i); } } @@ -1773,13 +1773,13 @@ void SuperWord::combine_packs() { // Construct the map from nodes to packs. Only valid after the // point where a node is only in one pack (after combine_packs). void SuperWord::construct_my_pack_map() { - Node_List* rslt = NULL; + Node_List* rslt = nullptr; for (int i = 0; i < _packset.length(); i++) { Node_List* p = _packset.at(i); for (uint j = 0; j < p->size(); j++) { Node* s = p->at(j); #ifdef ASSERT - if (my_pack(s) != NULL) { + if (my_pack(s) != nullptr) { s->dump(1); tty->print_cr("packs[%d]:", i); print_pack(p); @@ -1886,28 +1886,28 @@ void SuperWord::merge_packs_to_cmove() { } Node* CMoveKit::is_Bool_candidate(Node* def) const { - Node* use = NULL; - if (!def->is_Bool() || def->in(0) != NULL || def->outcnt() != 1) { - return NULL; + Node* use = nullptr; + if (!def->is_Bool() || def->in(0) != nullptr || def->outcnt() != 1) { + return nullptr; } for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { use = def->fast_out(j); if (!_sw->same_generation(def, use) || !use->is_CMove()) { - return NULL; + return nullptr; } } return use; } Node* CMoveKit::is_Cmp_candidate(Node* def) const { - Node* use = NULL; - if (!def->is_Cmp() || def->in(0) != NULL || def->outcnt() != 1) { - return NULL; + Node* use = nullptr; + if (!def->is_Cmp() || def->in(0) != nullptr || def->outcnt() != 1) { + return nullptr; } for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { use = def->fast_out(j); - if (!_sw->same_generation(def, use) || (use = is_Bool_candidate(use)) == NULL || !_sw->same_generation(def, use)) { - return NULL; + if (!_sw->same_generation(def, use) || (use = is_Bool_candidate(use)) == nullptr || !_sw->same_generation(def, use)) { + return nullptr; } } return use; @@ -1919,11 +1919,11 @@ bool CMoveKit::can_merge_cmove_pack(Node_List* cmove_pk) { Node* cmove = cmove_pk->at(0); if (!SuperWord::is_cmove_fp_opcode(cmove->Opcode()) || - pack(cmove) != NULL /* already in the cmove pack */) { + pack(cmove) != nullptr /* already in the cmove pack */) { return false; } - if (cmove->in(0) != NULL) { + if (cmove->in(0) != nullptr) { NOT_PRODUCT(if(_sw->is_trace_cmov()) {tty->print("CMoveKit::can_merge_cmove_pack: CMove %d has control flow, escaping...", cmove->_idx); cmove->dump();}) return false; } @@ -1932,8 +1932,8 @@ bool CMoveKit::can_merge_cmove_pack(Node_List* cmove_pk) { if (!bol->is_Bool() || bol->outcnt() != 1 || !_sw->same_generation(bol, cmove) || - bol->in(0) != NULL || // Bool node has control flow!! - _sw->my_pack(bol) == NULL) { + bol->in(0) != nullptr || // Bool node has control flow!! + _sw->my_pack(bol) == nullptr) { NOT_PRODUCT(if(_sw->is_trace_cmov()) {tty->print("CMoveKit::can_merge_cmove_pack: Bool %d does not fit CMove %d for building vector, escaping...", bol->_idx, cmove->_idx); bol->dump();}) return false; } @@ -1946,8 +1946,8 @@ bool CMoveKit::can_merge_cmove_pack(Node_List* cmove_pk) { if (!cmp->is_Cmp() || cmp->outcnt() != 1 || !_sw->same_generation(cmp, cmove) || - cmp->in(0) != NULL || // Cmp node has control flow!! - _sw->my_pack(cmp) == NULL) { + cmp->in(0) != nullptr || // Cmp node has control flow!! + _sw->my_pack(cmp) == nullptr) { NOT_PRODUCT(if(_sw->is_trace_cmov()) {tty->print("CMoveKit::can_merge_cmove_pack: Cmp %d does not fit CMove %d for building vector, escaping...", cmp->_idx, cmove->_idx); cmp->dump();}) return false; } @@ -2005,13 +2005,13 @@ bool CMoveKit::test_cmp_pack(Node_List* cmp_pk, Node_List* cmove_pk) { Node_List* in1_pk = _sw->my_pack(in1); Node_List* in2_pk = _sw->my_pack(in2); - if ( (in1_pk != NULL && in1_pk->size() != cmp_pk->size()) - || (in2_pk != NULL && in2_pk->size() != cmp_pk->size()) ) { + if ( (in1_pk != nullptr && in1_pk->size() != cmp_pk->size()) + || (in2_pk != nullptr && in2_pk->size() != cmp_pk->size()) ) { return false; } // test if "all" in1 are in the same pack or the same node - if (in1_pk == NULL) { + if (in1_pk == nullptr) { for (uint j = 1; j < cmp_pk->size(); j++) { if (cmp_pk->at(j)->in(1) != in1) { return false; @@ -2019,7 +2019,7 @@ bool CMoveKit::test_cmp_pack(Node_List* cmp_pk, Node_List* cmove_pk) { }//for: in1_pk is not pack but all Cmp nodes in the pack have the same in(1) } // test if "all" in2 are in the same pack or the same node - if (in2_pk == NULL) { + if (in2_pk == nullptr) { for (uint j = 1; j < cmp_pk->size(); j++) { if (cmp_pk->at(j)->in(2) != in2) { return false; @@ -2056,7 +2056,7 @@ bool CMoveKit::test_cmp_pack(Node_List* cmp_pk, Node_List* cmove_pk) { bool SuperWord::implemented(Node_List* p) { bool retValue = false; Node* p0 = p->at(0); - if (p0 != NULL) { + if (p0 != nullptr) { int opc = p0->Opcode(); uint size = p->size(); if (p0->is_reduction()) { @@ -2101,7 +2101,7 @@ bool SuperWord::implemented(Node_List* p) { } bool SuperWord::is_cmov_pack(Node_List* p) { - return _cmovev_kit.pack(p->at(0)) != NULL; + return _cmovev_kit.pack(p->at(0)) != nullptr; } bool SuperWord::requires_long_to_int_conversion(int opc) { @@ -2152,7 +2152,7 @@ bool SuperWord::profitable(Node_List* p) { if (p0->is_reduction()) { Node* second_in = p0->in(2); Node_List* second_pk = my_pack(second_in); - if ((second_pk == NULL) || (_num_work_vecs == _num_reductions)) { + if ((second_pk == nullptr) || (_num_work_vecs == _num_reductions)) { // Remove reduction flag if no parent pack or if not enough work // to cover reduction expansion overhead p0->remove_flag(Node::Flag_is_reduction); @@ -2166,7 +2166,7 @@ bool SuperWord::profitable(Node_List* p) { // case (different shift counts) because it is not supported yet. Node* cnt = p0->in(2); Node_List* cnt_pk = my_pack(cnt); - if (cnt_pk != NULL) + if (cnt_pk != nullptr) return false; if (!same_inputs(p, 2)) return false; @@ -2315,7 +2315,7 @@ void SuperWord::co_locate_pack(Node_List* pk) { if (in_pack(s2, pk) || schedule_before_pack.member(s2)) { schedule_before_pack.push(s1); // s1 must be scheduled before Node_List* mem_pk = my_pack(s1); - if (mem_pk != NULL) { + if (mem_pk != nullptr) { for (uint ii = 0; ii < mem_pk->size(); ii++) { Node* s = mem_pk->at(ii); // follow partner if (memops.member(s) && !schedule_before_pack.member(s)) @@ -2415,7 +2415,7 @@ Node* SuperWord::pick_mem_state(Node_List* pk) { // of the last load as the remaining unvectorized stores could interfere since they have a dependency to the loads. // Some stores could be executed before the load vector resulting in a wrong result. We need to take the // memory state of the first load to prevent this. - if (my_pack(current) != NULL && is_dependent) { + if (my_pack(current) != nullptr && is_dependent) { // For vectorized store pack, when the load pack depends on // some memory operations locating after first_mem, we still // take the memory state of the last load. @@ -2528,11 +2528,11 @@ bool SuperWord::output() { return false; } - Node* vmask = NULL; + Node* vmask = nullptr; if (cl->is_rce_post_loop() && do_reserve_copy()) { // Create a vector mask node for post loop, bail out if not created vmask = create_post_loop_vmask(); - if (vmask == NULL) { + if (vmask == nullptr) { return false; // and reverse to backup IG } } @@ -2543,7 +2543,7 @@ bool SuperWord::output() { if (p && n == executed_last(p)) { uint vlen = p->size(); uint vlen_in_bytes = 0; - Node* vn = NULL; + Node* vn = nullptr; Node* low_adr = p->at(0); Node* first = executed_first(p); if (cl->is_rce_post_loop()) { @@ -2555,13 +2555,13 @@ bool SuperWord::output() { if (n->is_Load()) { Node* ctl = n->in(MemNode::Control); Node* mem = first->in(MemNode::Memory); - SWPointer p1(n->as_Mem(), this, NULL, false); + SWPointer p1(n->as_Mem(), this, nullptr, false); // Identify the memory dependency for the new loadVector node by // walking up through memory chain. // This is done to give flexibility to the new loadVector node so that // it can move above independent storeVector nodes. while (mem->is_StoreVector()) { - SWPointer p2(mem->as_Mem(), this, NULL, false); + SWPointer p2(mem->as_Mem(), this, nullptr, false); int cmp = p1.cmp(p2); if (SWPointer::not_equal(cmp) || !SWPointer::comparable(cmp)) { mem = mem->in(MemNode::Memory); @@ -2572,7 +2572,7 @@ bool SuperWord::output() { Node* adr = low_adr->in(MemNode::Address); const TypePtr* atyp = n->adr_type(); if (cl->is_rce_post_loop()) { - assert(vmask != NULL, "vector mask should be generated"); + assert(vmask != nullptr, "vector mask should be generated"); const TypeVect* vt = TypeVect::make(velt_basic_type(n), vlen); vn = new LoadVectorMaskedNode(ctl, mem, adr, atyp, vt, vmask); } else { @@ -2582,9 +2582,9 @@ bool SuperWord::output() { } else if (n->is_Store()) { // Promote value to be stored to vector Node* val = vector_opd(p, MemNode::ValueIn); - if (val == NULL) { + if (val == nullptr) { if (do_reserve_copy()) { - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: val should not be NULL, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: val should not be null, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); @@ -2595,7 +2595,7 @@ bool SuperWord::output() { Node* adr = low_adr->in(MemNode::Address); const TypePtr* atyp = n->adr_type(); if (cl->is_rce_post_loop()) { - assert(vmask != NULL, "vector mask should be generated"); + assert(vmask != nullptr, "vector mask should be generated"); const TypeVect* vt = TypeVect::make(velt_basic_type(n), vlen); vn = new StoreVectorMaskedNode(ctl, mem, adr, val, atyp, vmask); } else { @@ -2632,25 +2632,25 @@ bool SuperWord::output() { vlen_in_bytes = vn->as_Vector()->length_in_bytes(); } else if (n->req() == 3 && !is_cmov_pack(p)) { // Promote operands to vector - Node* in1 = NULL; + Node* in1 = nullptr; bool node_isa_reduction = n->is_reduction(); if (node_isa_reduction) { // the input to the first reduction operation is retained in1 = low_adr->in(1); } else { in1 = vector_opd(p, 1); - if (in1 == NULL) { + if (in1 == nullptr) { if (do_reserve_copy()) { - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: in1 should not be NULL, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: in1 should not be null, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); } } Node* in2 = vector_opd(p, 2); - if (in2 == NULL) { + if (in2 == nullptr) { if (do_reserve_copy()) { - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: in2 should not be NULL, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: in2 should not be null, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); @@ -2663,7 +2663,7 @@ bool SuperWord::output() { } if (node_isa_reduction) { const Type *arith_type = n->bottom_type(); - vn = ReductionNode::make(opc, NULL, in1, in2, arith_type->basic_type()); + vn = ReductionNode::make(opc, nullptr, in1, in2, arith_type->basic_type()); if (in2->is_Load()) { vlen_in_bytes = in2->as_LoadVector()->memory_size(); } else { @@ -2692,7 +2692,7 @@ bool SuperWord::output() { opc == Op_CountTrailingZerosI) { assert(n->req() == 2, "only one input expected"); Node* in = vector_opd(p, 1); - vn = VectorNode::make(opc, in, NULL, vlen, velt_basic_type(n)); + vn = VectorNode::make(opc, in, nullptr, vlen, velt_basic_type(n)); vlen_in_bytes = vn->as_Vector()->length_in_bytes(); } else if (requires_long_to_int_conversion(opc)) { // Java API for Long.bitCount/numberOfLeadingZeros/numberOfTrailingZeros @@ -2702,7 +2702,7 @@ bool SuperWord::output() { // converting long to int. assert(n->req() == 2, "only one input expected"); Node* in = vector_opd(p, 1); - Node* longval = VectorNode::make(opc, in, NULL, vlen, T_LONG); + Node* longval = VectorNode::make(opc, in, nullptr, vlen, T_LONG); _igvn.register_new_node_with_optimizer(longval); _phase->set_ctrl(longval, _phase->get_ctrl(p->at(0))); vn = VectorCastNode::make(Op_VectorCastL2X, longval, T_INT, vlen); @@ -2768,17 +2768,17 @@ bool SuperWord::output() { NOT_PRODUCT(if(is_trace_cmov()) {tty->print("SWPointer::output: created intcon in_cc node %d", cc->_idx); cc->dump();}) Node* src1 = vector_opd(p, 2); //2=CMoveNode::IfFalse - if (src1 == NULL) { + if (src1 == nullptr) { if (do_reserve_copy()) { - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: src1 should not be NULL, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: src1 should not be null, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); } Node* src2 = vector_opd(p, 3); //3=CMoveNode::IfTrue - if (src2 == NULL) { + if (src2 == nullptr) { if (do_reserve_copy()) { - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: src2 should not be NULL, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: src2 should not be null, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); @@ -2808,10 +2808,10 @@ bool SuperWord::output() { ShouldNotReachHere(); } - assert(vn != NULL, "sanity"); - if (vn == NULL) { + assert(vn != nullptr, "sanity"); + if (vn == nullptr) { if (do_reserve_copy()){ - NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: got NULL node, cannot proceed, exiting SuperWord");}) + NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("SWPointer::output: got null node, cannot proceed, exiting SuperWord");}) return false; //and reverse to backup IG } ShouldNotReachHere(); @@ -2883,7 +2883,7 @@ bool SuperWord::output() { //-------------------------create_post_loop_vmask------------------------- // Check the post loop vectorizability and create a vector mask if yes. -// Return NULL to bail out if post loop is not vectorizable. +// Return null to bail out if post loop is not vectorizable. Node* SuperWord::create_post_loop_vmask() { CountedLoopNode *cl = lpt()->_head->as_CountedLoop(); assert(cl->is_rce_post_loop(), "Must be an rce post loop"); @@ -2897,23 +2897,23 @@ Node* SuperWord::create_post_loop_vmask() { // counting-down loop.) Collected SWPointer(s) are also used for data // dependence check next. VectorElementSizeStats stats(_arena); - GrowableArray swptrs(_arena, _packset.length(), 0, NULL); + GrowableArray swptrs(_arena, _packset.length(), 0, nullptr); for (int i = 0; i < _packset.length(); i++) { Node_List* p = _packset.at(i); assert(p->size() == 1, "all post loop packs should be singleton"); Node* n = p->at(0); BasicType bt = velt_basic_type(n); if (!is_java_primitive(bt)) { - return NULL; + return nullptr; } if (n->is_Mem()) { - SWPointer* mem_p = new (_arena) SWPointer(n->as_Mem(), this, NULL, false); + SWPointer* mem_p = new (_arena) SWPointer(n->as_Mem(), this, nullptr, false); // For each memory access, we check if the scale (in bytes) in its // address expression is equal to the data size times loop stride. // With this, Only positive scales exist in counting-up loops and // negative scales exist in counting-down loops. if (mem_p->scale_in_bytes() != type2aelembytes(bt) * cl->stride_con()) { - return NULL; + return nullptr; } swptrs.append(mem_p); } @@ -2929,7 +2929,7 @@ Node* SuperWord::create_post_loop_vmask() { case 2: vmask_bt = T_SHORT; break; case 4: vmask_bt = T_INT; break; case 8: vmask_bt = T_LONG; break; - default: return NULL; + default: return nullptr; } // Currently we can't remove this MaxVectorSize constraint. Without it, @@ -2939,19 +2939,19 @@ Node* SuperWord::create_post_loop_vmask() { // to fix this problem. int vlen = cl->slp_max_unroll(); if (unique_size * vlen != MaxVectorSize) { - return NULL; + return nullptr; } // Bail out if target doesn't support mask generator or masked load/store if (!Matcher::match_rule_supported_vector(Op_LoadVectorMasked, vlen, vmask_bt) || !Matcher::match_rule_supported_vector(Op_StoreVectorMasked, vlen, vmask_bt) || !Matcher::match_rule_supported_vector(Op_VectorMaskGen, vlen, vmask_bt)) { - return NULL; + return nullptr; } // Bail out if potential data dependence exists between memory accesses if (SWPointer::has_potential_dependence(swptrs)) { - return NULL; + return nullptr; } // Create vector mask with the post loop trip count. Note there's another @@ -3025,7 +3025,7 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { assert(((opd_idx != 2) || !VectorNode::is_shift(p0)), "shift's count can't be vector"); if (opd_idx == 2 && VectorNode::is_shift(p0)) { NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("shift's count can't be vector");}) - return NULL; + return nullptr; } return opd; // input is matching vector } @@ -3034,14 +3034,14 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { // Vector instructions do not mask shift count, do it here. juint mask = (p0->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1); const TypeInt* t = opd->find_int_type(); - if (t != NULL && t->is_con()) { + if (t != nullptr && t->is_con()) { juint shift = t->get_con(); if (shift > mask) { // Unsigned cmp cnt = ConNode::make(TypeInt::make(shift & mask)); _igvn.register_new_node_with_optimizer(cnt); } } else { - if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { + if (t == nullptr || t->_lo < 0 || t->_hi > (int)mask) { cnt = ConNode::make(TypeInt::make(mask)); _igvn.register_new_node_with_optimizer(cnt); cnt = new AndINode(opd, cnt); @@ -3051,7 +3051,7 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { assert(opd->bottom_type()->isa_int(), "int type only"); if (!opd->bottom_type()->isa_int()) { NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");}) - return NULL; + return nullptr; } } // Move shift count into vector register. @@ -3063,13 +3063,13 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { assert(!opd->is_StoreVector(), "such vector is not expected here"); if (opd->is_StoreVector()) { NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("StoreVector is not expected here");}) - return NULL; + return nullptr; } // Convert scalar input to vector with the same number of elements as // p0's vector. Use p0's type because size of operand's container in // vector should match p0's size regardless operand's size. - const Type* p0_t = NULL; - VectorNode* vn = NULL; + const Type* p0_t = nullptr; + VectorNode* vn = nullptr; if (opd_idx == 2 && VectorNode::is_scalar_rotate(p0)) { Node* conv = opd; p0_t = TypeInt::INT; @@ -3104,19 +3104,19 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) { for (uint i = 1; i < vlen; i++) { Node* pi = p->at(i); Node* in = pi->in(opd_idx); - assert(my_pack(in) == NULL, "Should already have been unpacked"); - if (my_pack(in) != NULL) { + assert(my_pack(in) == nullptr, "Should already have been unpacked"); + if (my_pack(in) != nullptr) { NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should already have been unpacked");}) - return NULL; + return nullptr; } assert(opd_bt == in->bottom_type()->basic_type(), "all same type"); pk->add_opd(in); if (VectorNode::is_muladds2i(pi)) { Node* in2 = pi->in(opd_idx + 2); - assert(my_pack(in2) == NULL, "Should already have been unpacked"); - if (my_pack(in2) != NULL) { + assert(my_pack(in2) == nullptr, "Should already have been unpacked"); + if (my_pack(in2) != nullptr) { NOT_PRODUCT(if (is_trace_loop_reverse() || TraceLoopOpts) { tty->print_cr("Should already have been unpacked"); }) - return NULL; + return nullptr; } assert(opd_bt == in2->bottom_type()->basic_type(), "all same type"); pk->add_opd(in2); @@ -3151,7 +3151,7 @@ void SuperWord::insert_extracts(Node_List* p) { Node* n = use->in(k); if (def == n) { Node_List* u_pk = my_pack(use); - if ((u_pk == NULL || !is_cmov_pack(u_pk) || use->is_CMove()) && !is_vector_use(use, k)) { + if ((u_pk == nullptr || !is_cmov_pack(u_pk) || use->is_CMove()) && !is_vector_use(use, k)) { _n_idx_list.push(use, k); } } @@ -3187,11 +3187,11 @@ void SuperWord::insert_extracts(Node_List* p) { // Is use->in(u_idx) a vector use? bool SuperWord::is_vector_use(Node* use, int u_idx) { Node_List* u_pk = my_pack(use); - if (u_pk == NULL) return false; + if (u_pk == nullptr) return false; if (use->is_reduction()) return true; Node* def = use->in(u_idx); Node_List* d_pk = my_pack(def); - if (d_pk == NULL) { + if (d_pk == nullptr) { Node* n = u_pk->at(0)->in(u_idx); if (n == iv()) { // check for index population @@ -3203,7 +3203,7 @@ bool SuperWord::is_vector_use(Node* use, int u_idx) { Node* use_in = u_pk->at(i)->in(u_idx); if (!use_in->is_Add() || use_in->in(1) != n) return false; const TypeInt* offset_t = use_in->in(2)->bottom_type()->is_int(); - if (offset_t == NULL || !offset_t->is_con() || + if (offset_t == nullptr || !offset_t->is_con() || offset_t->get_con() != (jint) i) return false; } } else { @@ -3621,7 +3621,7 @@ int SuperWord::memory_alignment(MemNode* s, int iv_adjust) { } #endif NOT_PRODUCT(SWPointer::Tracer::Depth ddd(0);) - SWPointer p(s, this, NULL, false); + SWPointer p(s, this, nullptr, false); if (!p.valid()) { NOT_PRODUCT(if(is_trace_alignment()) tty->print_cr("SWPointer::memory_alignment: SWPointer p invalid, return bottom_align");) return bottom_align; @@ -3706,7 +3706,7 @@ Node_List* SuperWord::in_pack(Node* s, Node_List* p) { return p; } } - return NULL; + return nullptr; } //------------------------------remove_pack_at--------------------------- @@ -3715,7 +3715,7 @@ void SuperWord::remove_pack_at(int pos) { Node_List* p = _packset.at(pos); for (uint i = 0; i < p->size(); i++) { Node* s = p->at(i); - set_my_pack(s, NULL); + set_my_pack(s, nullptr); } _packset.remove_at(pos); } @@ -3812,9 +3812,9 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { // Ensure the original loop limit is available from the // pre-loop Opaque1 node. Node* orig_limit = pre_opaq->original_loop_limit(); - assert(orig_limit != NULL && _igvn.type(orig_limit) != Type::TOP, ""); + assert(orig_limit != nullptr && _igvn.type(orig_limit) != Type::TOP, ""); - SWPointer align_to_ref_p(align_to_ref, this, NULL, false); + SWPointer align_to_ref_p(align_to_ref, this, nullptr, false); assert(align_to_ref_p.valid(), "sanity"); // Given: @@ -3878,7 +3878,7 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { Node *offsn = _igvn.intcon(offset); Node *e = offsn; - if (align_to_ref_p.invar() != NULL) { + if (align_to_ref_p.invar() != nullptr) { // incorporate any extra invariant piece producing (offset +/- invar) >>> log2(elt) Node* log2_elt = _igvn.intcon(exact_log2(elt_size)); Node* invar = align_to_ref_p.invar(); @@ -3890,7 +3890,7 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { _igvn.register_new_node_with_optimizer(invar); } Node* invar_scale = align_to_ref_p.invar_scale(); - if (invar_scale != NULL) { + if (invar_scale != nullptr) { invar = new LShiftINode(invar, invar_scale); _igvn.register_new_node_with_optimizer(invar); } @@ -3907,7 +3907,7 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { } if (vw > ObjectAlignmentInBytes || align_to_ref_p.base()->is_top()) { // incorporate base e +/- base && Mask >>> log2(elt) - Node* xbase = new CastP2XNode(NULL, align_to_ref_p.adr()); + Node* xbase = new CastP2XNode(nullptr, align_to_ref_p.adr()); _igvn.register_new_node_with_optimizer(xbase); #ifdef _LP64 xbase = new ConvL2INode(xbase); @@ -3970,16 +3970,16 @@ void SuperWord::align_initial_loop_index(MemNode* align_to_ref) { CountedLoopEndNode* SuperWord::find_pre_loop_end(CountedLoopNode* cl) const { // The loop cannot be optimized if the graph shape at // the loop entry is inappropriate. - if (cl->is_canonical_loop_entry() == NULL) { - return NULL; + if (cl->is_canonical_loop_entry() == nullptr) { + return nullptr; } Node* p_f = cl->skip_predicates()->in(0)->in(0); - if (!p_f->is_IfFalse()) return NULL; - if (!p_f->in(0)->is_CountedLoopEnd()) return NULL; + if (!p_f->is_IfFalse()) return nullptr; + if (!p_f->in(0)->is_CountedLoopEnd()) return nullptr; CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd(); CountedLoopNode* loop_node = pre_end->loopnode(); - if (loop_node == NULL || !loop_node->is_pre_loop()) return NULL; + if (loop_node == nullptr || !loop_node->is_pre_loop()) return nullptr; return pre_end; } @@ -3996,11 +3996,11 @@ void SuperWord::init() { _iteration_first.clear(); _iteration_last.clear(); _node_info.clear(); - _align_to_ref = NULL; - _lpt = NULL; - _lp = NULL; - _bb = NULL; - _iv = NULL; + _align_to_ref = nullptr; + _lpt = nullptr; + _lp = nullptr; + _bb = nullptr; + _iv = nullptr; _race_possible = 0; _early_return = false; _num_work_vecs = 0; @@ -4077,9 +4077,9 @@ int SWPointer::Tracer::_depth = 0; #endif //----------------------------SWPointer------------------------ SWPointer::SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool analyze_only) : - _mem(mem), _slp(slp), _base(NULL), _adr(NULL), - _scale(0), _offset(0), _invar(NULL), _negate_invar(false), - _invar_scale(NULL), + _mem(mem), _slp(slp), _base(nullptr), _adr(nullptr), + _scale(0), _offset(0), _invar(nullptr), _negate_invar(false), + _invar_scale(nullptr), _nstack(nstack), _analyze_only(analyze_only), _stack_idx(0) #ifndef PRODUCT @@ -4146,9 +4146,9 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool anal // Following is used to create a temporary object during // the pattern match of an address expression. SWPointer::SWPointer(SWPointer* p) : - _mem(p->_mem), _slp(p->_slp), _base(NULL), _adr(NULL), - _scale(0), _offset(0), _invar(NULL), _negate_invar(false), - _invar_scale(NULL), + _mem(p->_mem), _slp(p->_slp), _base(nullptr), _adr(nullptr), + _scale(0), _offset(0), _invar(nullptr), _negate_invar(false), + _invar_scale(nullptr), _nstack(p->_nstack), _analyze_only(p->_analyze_only), _stack_idx(p->_stack_idx) #ifndef PRODUCT @@ -4262,7 +4262,7 @@ bool SWPointer::scaled_iv(Node* n) { return true; } } else if (opc == Op_LShiftL && n->in(2)->is_Con()) { - if (!has_iv() && _invar == NULL) { + if (!has_iv() && _invar == nullptr) { // Need to preserve the current _offset value, so // create a temporary object for this expression subtree. // Hacky, so should re-engineer the address pattern match. @@ -4275,7 +4275,7 @@ bool SWPointer::scaled_iv(Node* n) { _scale = tmp._scale << scale; _offset += tmp._offset << scale; _invar = tmp._invar; - if (_invar != NULL) { + if (_invar != nullptr) { _negate_invar = tmp._negate_invar; _invar_scale = n->in(2); } @@ -4313,7 +4313,7 @@ bool SWPointer::offset_plus_k(Node* n, bool negate) { NOT_PRODUCT(_tracer.offset_plus_k_4(n);) return false; } - if (_invar != NULL) { // already has an invariant + if (_invar != nullptr) { // already has an invariant NOT_PRODUCT(_tracer.offset_plus_k_5(n, _invar);) return false; } @@ -4407,10 +4407,10 @@ bool SWPointer::has_potential_dependence(GrowableArray swptrs) { void SWPointer::print() { #ifndef PRODUCT tty->print("base: [%d] adr: [%d] scale: %d offset: %d", - _base != NULL ? _base->_idx : 0, - _adr != NULL ? _adr->_idx : 0, + _base != nullptr ? _base->_idx : 0, + _adr != nullptr ? _adr->_idx : 0, _scale, _offset); - if (_invar != NULL) { + if (_invar != nullptr) { tty->print(" invar: %c[%d] << [%d]", _negate_invar?'-':'+', _invar->_idx, _invar_scale->_idx); } tty->cr(); @@ -4606,13 +4606,13 @@ void SWPointer::Tracer::scaled_iv_9(Node* n, int scale, int offset, Node* invar, print_depth(); tty->print_cr(" %d SWPointer::scaled_iv: Op_LShiftL PASSED, setting _scale = %d, _offset = %d", n->_idx, scale, offset); print_depth(); tty->print_cr(" \\ SWPointer::scaled_iv: in(1) [%d] is scaled_iv_plus_offset, in(2) [%d] used to scale: _scale = %d, _offset = %d", n->in(1)->_idx, n->in(2)->_idx, scale, offset); - if (invar != NULL) { + if (invar != nullptr) { print_depth(); tty->print_cr(" \\ SWPointer::scaled_iv: scaled invariant: %c[%d]", (negate_invar?'-':'+'), invar->_idx); } inc_depth(); inc_depth(); print_depth(); n->in(1)->dump(); print_depth(); n->in(2)->dump(); - if (invar != NULL) { + if (invar != nullptr) { print_depth(); invar->dump(); } dec_depth(); dec_depth(); @@ -4653,7 +4653,7 @@ void SWPointer::Tracer::offset_plus_k_4(Node* n) { void SWPointer::Tracer::offset_plus_k_5(Node* n, Node* _invar) { if(_slp->is_trace_alignment()) { print_depth(); tty->print_cr(" %d SWPointer::offset_plus_k: FAILED since another invariant has been detected before", n->_idx); - print_depth(); tty->print(" \\ %d SWPointer::offset_plus_k: _invar != NULL: ", _invar->_idx); _invar->dump(); + print_depth(); tty->print(" \\ %d SWPointer::offset_plus_k: _invar is not null: ", _invar->_idx); _invar->dump(); } } @@ -4721,8 +4721,8 @@ const SWNodeInfo SWNodeInfo::initial; // Make a new dependence graph node for an ideal node. DepMem* DepGraph::make_node(Node* node) { DepMem* m = new (_arena) DepMem(node); - if (node != NULL) { - assert(_map.at_grow(node->_idx) == NULL, "one init only"); + if (node != nullptr) { + assert(_map.at_grow(node->_idx) == nullptr, "one init only"); _map.at_put_grow(node->_idx, m); } return m; @@ -4742,14 +4742,14 @@ DepEdge* DepGraph::make_edge(DepMem* dpred, DepMem* dsucc) { //------------------------------in_cnt--------------------------- int DepMem::in_cnt() { int ct = 0; - for (DepEdge* e = _in_head; e != NULL; e = e->next_in()) ct++; + for (DepEdge* e = _in_head; e != nullptr; e = e->next_in()) ct++; return ct; } //------------------------------out_cnt--------------------------- int DepMem::out_cnt() { int ct = 0; - for (DepEdge* e = _out_head; e != NULL; e = e->next_out()) ct++; + for (DepEdge* e = _out_head; e != nullptr; e = e->next_out()) ct++; return ct; } @@ -4757,14 +4757,14 @@ int DepMem::out_cnt() { void DepMem::print() { #ifndef PRODUCT tty->print(" DepNode %d (", _node->_idx); - for (DepEdge* p = _in_head; p != NULL; p = p->next_in()) { + for (DepEdge* p = _in_head; p != nullptr; p = p->next_in()) { Node* pred = p->pred()->node(); - tty->print(" %d", pred != NULL ? pred->_idx : 0); + tty->print(" %d", pred != nullptr ? pred->_idx : 0); } tty->print(") ["); - for (DepEdge* s = _out_head; s != NULL; s = s->next_out()) { + for (DepEdge* s = _out_head; s != nullptr; s = s->next_out()) { Node* succ = s->succ()->node(); - tty->print(" %d", succ != NULL ? succ->_idx : 0); + tty->print(" %d", succ != nullptr ? succ->_idx : 0); } tty->print_cr(" ]"); #endif @@ -4797,14 +4797,14 @@ DepPreds::DepPreds(Node* n, DepGraph& dg) { } else { _next_idx = 1; _end_idx = _n->req(); - _dep_next = NULL; + _dep_next = nullptr; } next(); } //------------------------------next--------------------------- void DepPreds::next() { - if (_dep_next != NULL) { + if (_dep_next != nullptr) { _current = _dep_next->pred()->node(); _dep_next = _dep_next->next_in(); } else if (_next_idx < _end_idx) { @@ -4832,14 +4832,14 @@ DepSuccs::DepSuccs(Node* n, DepGraph& dg) { } else { _next_idx = 0; _end_idx = _n->outcnt(); - _dep_next = NULL; + _dep_next = nullptr; } next(); } //-------------------------------next--------------------------- void DepSuccs::next() { - if (_dep_next != NULL) { + if (_dep_next != nullptr) { _current = _dep_next->succ()->node(); _dep_next = _dep_next->next_out(); } else if (_next_idx < _end_idx) { @@ -4853,10 +4853,10 @@ void DepSuccs::next() { // --------------------------------- vectorization/simd ----------------------------------- // bool SuperWord::same_origin_idx(Node* a, Node* b) const { - return a != NULL && b != NULL && _clone_map.same_idx(a->_idx, b->_idx); + return a != nullptr && b != nullptr && _clone_map.same_idx(a->_idx, b->_idx); } bool SuperWord::same_generation(Node* a, Node* b) const { - return a != NULL && b != NULL && _clone_map.same_gen(a->_idx, b->_idx); + return a != nullptr && b != nullptr && _clone_map.same_gen(a->_idx, b->_idx); } Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { @@ -4868,7 +4868,7 @@ Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { _clone_map.gen(ld->_idx)); } #endif - return NULL; //we think that any ld in the first gen being vectorizable + return nullptr; //we think that any ld in the first gen being vectorizable } Node* mem = ld->in(MemNode::Memory); @@ -4882,7 +4882,7 @@ Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { mem->dump(); } #endif - return NULL; + return nullptr; } if (!in_bb(mem) || same_generation(mem, ld)) { #ifndef PRODUCT @@ -4891,7 +4891,7 @@ Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { _clone_map.gen(mem->_idx)); } #endif - return NULL; // does not depend on loop volatile node or depends on the same generation + return nullptr; // does not depend on loop volatile node or depends on the same generation } //otherwise first node should depend on mem-phi @@ -4906,7 +4906,7 @@ Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { first->dump(); } #endif - return NULL; + return nullptr; } Node* tail = 0; @@ -4924,7 +4924,7 @@ Node* SuperWord::find_phi_for_mem_dep(LoadNode* ld) { phi->dump(); } #endif - return NULL; + return nullptr; } // now all conditions are met @@ -4971,7 +4971,7 @@ Node* SuperWord::last_node(Node* nd) { } int SuperWord::mark_generations() { - Node *ii_err = NULL, *tail_err = NULL; + Node *ii_err = nullptr, *tail_err = nullptr; for (int i = 0; i < _mem_slice_head.length(); i++) { Node* phi = _mem_slice_head.at(i); assert(phi->is_Phi(), "must be phi"); @@ -5234,7 +5234,7 @@ bool SuperWord::hoist_loads_in_graph() { for (int i = 0; i < loads.length(); i++) { LoadNode* ld = loads.at(i)->as_Load(); Node* phi = find_phi_for_mem_dep(ld); - if (phi != NULL) { + if (phi != nullptr) { #ifndef PRODUCT if (_vector_loop_debug) { tty->print_cr("SuperWord::hoist_loads_in_graph replacing MemNode::Memory(%d) edge in %d with one from %d", diff --git a/src/hotspot/share/opto/superword.hpp b/src/hotspot/share/opto/superword.hpp index cfa9e2db5f6..6d24e528be5 100644 --- a/src/hotspot/share/opto/superword.hpp +++ b/src/hotspot/share/opto/superword.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -99,7 +99,7 @@ class DepMem : public ArenaObj { DepEdge* _out_head; // Head of list of out edges, null terminated public: - DepMem(Node* node) : _node(node), _in_head(NULL), _out_head(NULL) {} + DepMem(Node* node) : _node(node), _in_head(nullptr), _out_head(nullptr) {} Node* node() { return _node; } DepEdge* in_head() { return _in_head; } @@ -122,9 +122,9 @@ class DepGraph { DepMem* _tail; public: - DepGraph(Arena* a) : _arena(a), _map(a, 8, 0, NULL) { - _root = new (_arena) DepMem(NULL); - _tail = new (_arena) DepMem(NULL); + DepGraph(Arena* a) : _arena(a), _map(a, 8, 0, nullptr) { + _root = new (_arena) DepMem(nullptr); + _tail = new (_arena) DepMem(nullptr); } DepMem* root() { return _root; } @@ -197,7 +197,7 @@ class SWNodeInfo { const Type* _velt_type; // vector element type Node_List* _my_pack; // pack containing this node - SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(NULL), _my_pack(NULL) {} + SWNodeInfo() : _alignment(-1), _depth(0), _velt_type(nullptr), _my_pack(nullptr) {} static const SWNodeInfo initial; }; @@ -210,11 +210,11 @@ class CMoveKit { CMoveKit(Arena* a, SuperWord* sw) : _sw(sw) {_dict = new Dict(cmpkey, hashkey, a);} void* _2p(Node* key) const { return (void*)(intptr_t)key; } // 2 conversion functions to make gcc happy Dict* dict() const { return _dict; } - void map(Node* key, Node_List* val) { assert(_dict->operator[](_2p(key)) == NULL, "key existed"); _dict->Insert(_2p(key), (void*)val); } + void map(Node* key, Node_List* val) { assert(_dict->operator[](_2p(key)) == nullptr, "key existed"); _dict->Insert(_2p(key), (void*)val); } void unmap(Node* key) { _dict->Delete(_2p(key)); } Node_List* pack(Node* key) const { return (Node_List*)_dict->operator[](_2p(key)); } Node* is_Bool_candidate(Node* nd) const; // if it is the right candidate return corresponding CMove* , - Node* is_Cmp_candidate(Node* nd) const; // otherwise return NULL + Node* is_Cmp_candidate(Node* nd) const; // otherwise return null // Determine if the current pack is a cmove candidate that can be vectorized. bool can_merge_cmove_pack(Node_List* cmove_pk); void make_cmove_pack(Node_List* cmove_pk); @@ -229,7 +229,7 @@ class OrderedPair { Node* _p1; Node* _p2; public: - OrderedPair() : _p1(NULL), _p2(NULL) {} + OrderedPair() : _p1(nullptr), _p2(nullptr) {} OrderedPair(Node* p1, Node* p2) { if (p1->_idx < p2->_idx) { _p1 = p1; _p2 = p2; @@ -384,7 +384,7 @@ class SuperWord : public ResourceObj { int iv_stride() const { return lp()->stride_con(); } CountedLoopNode* pre_loop_head() const { - assert(_pre_loop_end != NULL && _pre_loop_end->loopnode() != NULL, "should find head from pre loop end"); + assert(_pre_loop_end != nullptr && _pre_loop_end->loopnode() != nullptr, "should find head from pre loop end"); return _pre_loop_end->loopnode(); } void set_pre_loop_end(CountedLoopEndNode* pre_loop_end) { @@ -393,8 +393,8 @@ class SuperWord : public ResourceObj { } CountedLoopEndNode* pre_loop_end() const { #ifdef ASSERT - assert(_lp != NULL, "sanity"); - assert(_pre_loop_end != NULL, "should be set when fetched"); + assert(_lp != nullptr, "sanity"); + assert(_pre_loop_end != nullptr, "should be set when fetched"); Node* found_pre_end = find_pre_loop_end(_lp); assert(_pre_loop_end == found_pre_end && _pre_loop_end == pre_loop_head()->loopexit(), "should find the pre loop end and must be the same result"); @@ -417,7 +417,7 @@ class SuperWord : public ResourceObj { Node* ctrl(Node* n) const { return _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; } // block accessors - bool in_bb(Node* n) { return n != NULL && n->outcnt() > 0 && ctrl(n) == _bb; } + bool in_bb(Node* n) { return n != nullptr && n->outcnt() > 0 && ctrl(n) == _bb; } int bb_idx(Node* n) { assert(in_bb(n), "must be"); return _bb_idx.at(n->_idx); } void set_bb_idx(Node* n, int i) { _bb_idx.at_put_grow(n->_idx, i); } @@ -452,7 +452,7 @@ class SuperWord : public ResourceObj { bool same_memory_slice(MemNode* best_align_to_mem_ref, MemNode* mem_ref) const; // my_pack - Node_List* my_pack(Node* n) { return !in_bb(n) ? NULL : _node_info.adr_at(bb_idx(n))->_my_pack; } + Node_List* my_pack(Node* n) { return !in_bb(n) ? nullptr : _node_info.adr_at(bb_idx(n))->_my_pack; } void set_my_pack(Node* n, Node_List* p) { int i = bb_idx(n); grow_node_info(i); _node_info.adr_at(i)->_my_pack = p; } // is pack good for converting into one vector node replacing bunches of Cmp, Bool, CMov nodes. bool is_cmov_pack(Node_List* p); @@ -630,12 +630,12 @@ class SWPointer : public ArenaObj { MemNode* _mem; // My memory reference node SuperWord* _slp; // SuperWord class - Node* _base; // NULL if unsafe nonheap reference + Node* _base; // null if unsafe nonheap reference Node* _adr; // address pointer int _scale; // multiplier for iv (in bytes), 0 if no loop iv int _offset; // constant offset (in bytes) - Node* _invar; // invariant offset (in bytes), NULL if none + Node* _invar; // invariant offset (in bytes), null if none bool _negate_invar; // if true then use: (0 - _invar) Node* _invar_scale; // multiplier for invariant @@ -671,7 +671,7 @@ class SWPointer : public ArenaObj { // the pattern match of an address expression. SWPointer(SWPointer* p); - bool valid() { return _adr != NULL; } + bool valid() { return _adr != nullptr; } bool has_iv() { return _scale != 0; } Node* base() { return _base; } diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 39d2cf9786a..bd9d7b1e4d0 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -47,7 +47,7 @@ // Optimization - Graph Style // Dictionary of types shared among compilations. -Dict* Type::_shared_type_dict = NULL; +Dict* Type::_shared_type_dict = nullptr; // Array which maps compiler types to Basic Types const Type::TypeInfo Type::_type_info[Type::lastype] = { @@ -120,8 +120,8 @@ const Type* Type:: _zero_type[T_CONFLICT+1]; // Map basic types to array-body alias types. const TypeAryPtr* TypeAryPtr::_array_body_type[T_CONFLICT+1]; -const TypePtr::InterfaceSet* TypeAryPtr::_array_interfaces = NULL; -const TypePtr::InterfaceSet* TypeAryKlassPtr::_array_interfaces = NULL; +const TypePtr::InterfaceSet* TypeAryPtr::_array_interfaces = nullptr; +const TypePtr::InterfaceSet* TypeAryKlassPtr::_array_interfaces = nullptr; //============================================================================= // Convenience common pre-built types. @@ -137,8 +137,8 @@ const Type *Type::TOP; // No values in set //------------------------------get_const_type--------------------------- const Type* Type::get_const_type(ciType* type, InterfaceHandling interface_handling) { - if (type == NULL) { - return NULL; + if (type == nullptr) { + return nullptr; } else if (type->is_primitive_type()) { return get_const_basic_type(type->basic_type()); } else { @@ -162,16 +162,16 @@ BasicType Type::array_element_basic_type() const { } // For two instance arrays of same dimension, return the base element types. -// Otherwise or if the arrays have different dimensions, return NULL. +// Otherwise or if the arrays have different dimensions, return null. void Type::get_arrays_base_elements(const Type *a1, const Type *a2, const TypeInstPtr **e1, const TypeInstPtr **e2) { - if (e1) *e1 = NULL; - if (e2) *e2 = NULL; - const TypeAryPtr* a1tap = (a1 == NULL) ? NULL : a1->isa_aryptr(); - const TypeAryPtr* a2tap = (a2 == NULL) ? NULL : a2->isa_aryptr(); + if (e1) *e1 = nullptr; + if (e2) *e2 = nullptr; + const TypeAryPtr* a1tap = (a1 == nullptr) ? nullptr : a1->isa_aryptr(); + const TypeAryPtr* a2tap = (a2 == nullptr) ? nullptr : a2->isa_aryptr(); - if (a1tap != NULL && a2tap != NULL) { + if (a1tap != nullptr && a2tap != nullptr) { // Handle multidimensional arrays const TypePtr* a1tp = a1tap->elem()->make_ptr(); const TypePtr* a2tp = a2tap->elem()->make_ptr(); @@ -250,7 +250,7 @@ const Type* Type::make_from_constant(ciConstant constant, bool require_constant, case T_DOUBLE: return TypeD::make(constant.as_double()); case T_ARRAY: case T_OBJECT: { - const Type* con_type = NULL; + const Type* con_type = nullptr; ciObject* oop_constant = constant.as_object(); if (oop_constant->is_null_object()) { con_type = Type::get_zero_type(T_OBJECT); @@ -274,10 +274,10 @@ const Type* Type::make_from_constant(ciConstant constant, bool require_constant, case T_ILLEGAL: // Invalid ciConstant returned due to OutOfMemoryError in the CI assert(Compile::current()->env()->failing(), "otherwise should not see this"); - return NULL; + return nullptr; default: // Fall through to failure - return NULL; + return nullptr; } } @@ -316,7 +316,7 @@ const Type* Type::make_constant_from_array_element(ciArray* array, int off, int // Decode the results of GraphKit::array_element_address. ciConstant element_value = array->element_value_by_offset(off); if (element_value.basic_type() == T_ILLEGAL) { - return NULL; // wrong offset + return nullptr; // wrong offset } ciConstant con = check_mismatched_access(element_value, loadbt, is_unsigned_load); @@ -328,21 +328,21 @@ const Type* Type::make_constant_from_array_element(ciArray* array, int off, int bool is_narrow_oop = (loadbt == T_NARROWOOP); return Type::make_from_constant(con, /*require_constant=*/true, stable_dimension, is_narrow_oop, /*is_autobox_cache=*/false); } - return NULL; + return nullptr; } const Type* Type::make_constant_from_field(ciInstance* holder, int off, bool is_unsigned_load, BasicType loadbt) { ciField* field; ciType* type = holder->java_mirror_type(); - if (type != NULL && type->is_instance_klass() && off >= InstanceMirrorKlass::offset_of_static_fields()) { + if (type != nullptr && type->is_instance_klass() && off >= InstanceMirrorKlass::offset_of_static_fields()) { // Static field field = type->as_instance_klass()->get_field_by_offset(off, /*is_static=*/true); } else { // Instance field field = holder->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/false); } - if (field == NULL) { - return NULL; // Wrong offset + if (field == nullptr) { + return nullptr; // Wrong offset } return Type::make_constant_from_field(field, holder, loadbt, is_unsigned_load); } @@ -350,13 +350,13 @@ const Type* Type::make_constant_from_field(ciInstance* holder, int off, bool is_ const Type* Type::make_constant_from_field(ciField* field, ciInstance* holder, BasicType loadbt, bool is_unsigned_load) { if (!field->is_constant()) { - return NULL; // Non-constant field + return nullptr; // Non-constant field } ciConstant field_value; if (field->is_static()) { // final static field field_value = field->constant_value(); - } else if (holder != NULL) { + } else if (holder != nullptr) { // final or stable non-static field // Treat final non-static fields of trusted classes (classes in // java.lang.invoke and sun.invoke packages and subpackages) as @@ -364,7 +364,7 @@ const Type* Type::make_constant_from_field(ciField* field, ciInstance* holder, field_value = field->constant_value_of(holder); } if (!field_value.is_valid()) { - return NULL; // Not a constant + return nullptr; // Not a constant } ciConstant con = check_mismatched_access(field_value, loadbt, is_unsigned_load); @@ -379,7 +379,7 @@ const Type* Type::make_constant_from_field(ciField* field, ciInstance* holder, const Type* con_type = make_from_constant(con, /*require_constant=*/ true, stable_dimension, is_narrow_oop, field->is_autobox_cache()); - if (con_type != NULL && field->is_call_site_target()) { + if (con_type != nullptr && field->is_call_site_target()) { ciCallSite* call_site = holder->as_call_site(); if (!call_site->is_fully_initialized_constant_call_site()) { ciMethodHandle* target = con.as_object()->as_method_handle(); @@ -551,7 +551,7 @@ void Type::Initialize_shared(Compile* current) { false, 0, oopDesc::klass_offset_in_bytes()); TypeOopPtr::BOTTOM = TypeOopPtr::make(TypePtr::BotPTR, OffsetBot, TypeOopPtr::InstanceBot); - TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, NULL, OffsetBot); + TypeMetadataPtr::BOTTOM = TypeMetadataPtr::make(TypePtr::BotPTR, nullptr, OffsetBot); TypeNarrowOop::NULL_PTR = TypeNarrowOop::make( TypePtr::NULL_PTR ); TypeNarrowOop::BOTTOM = TypeNarrowOop::make( TypeInstPtr::BOTTOM ); @@ -574,9 +574,9 @@ void Type::Initialize_shared(Compile* current) { TypeAryPtr::_array_interfaces = new TypePtr::InterfaceSet(&array_interfaces); TypeAryKlassPtr::_array_interfaces = TypeAryPtr::_array_interfaces; - TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), NULL /* current->env()->Object_klass() */, false, arrayOopDesc::length_offset_in_bytes()); + TypeAryPtr::RANGE = TypeAryPtr::make( TypePtr::BotPTR, TypeAry::make(Type::BOTTOM,TypeInt::POS), nullptr /* current->env()->Object_klass() */, false, arrayOopDesc::length_offset_in_bytes()); - TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + TypeAryPtr::NARROWOOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeNarrowOop::BOTTOM, TypeInt::POS), nullptr /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); #ifdef _LP64 if (UseCompressedOops) { @@ -586,7 +586,7 @@ void Type::Initialize_shared(Compile* current) { #endif { // There is no shared klass for Object[]. See note in TypeAryPtr::klass(). - TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), NULL /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); + TypeAryPtr::OOPS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInstPtr::BOTTOM,TypeInt::POS), nullptr /*ciArrayKlass::make(o)*/, false, Type::OffsetBot); } TypeAryPtr::BYTES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::BYTE ,TypeInt::POS), ciTypeArrayKlass::make(T_BYTE), true, Type::OffsetBot); TypeAryPtr::SHORTS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(TypeInt::SHORT ,TypeInt::POS), ciTypeArrayKlass::make(T_SHORT), true, Type::OffsetBot); @@ -596,8 +596,8 @@ void Type::Initialize_shared(Compile* current) { TypeAryPtr::FLOATS = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::FLOAT ,TypeInt::POS), ciTypeArrayKlass::make(T_FLOAT), true, Type::OffsetBot); TypeAryPtr::DOUBLES = TypeAryPtr::make(TypePtr::BotPTR, TypeAry::make(Type::DOUBLE ,TypeInt::POS), ciTypeArrayKlass::make(T_DOUBLE), true, Type::OffsetBot); - // Nobody should ask _array_body_type[T_NARROWOOP]. Use NULL as assert. - TypeAryPtr::_array_body_type[T_NARROWOOP] = NULL; + // Nobody should ask _array_body_type[T_NARROWOOP]. Use null as assert. + TypeAryPtr::_array_body_type[T_NARROWOOP] = nullptr; TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; @@ -669,7 +669,7 @@ void Type::Initialize_shared(Compile* current) { _zero_type[T_VOID] = Type::TOP; // the only void value is no value at all // get_zero_type() should not happen for T_CONFLICT - _zero_type[T_CONFLICT]= NULL; + _zero_type[T_CONFLICT]= nullptr; TypeVect::VECTMASK = (TypeVect*)(new TypeVectMask(TypeInt::BOOL, MaxVectorSize))->hashcons(); mreg2type[Op_RegVectMask] = TypeVect::VECTMASK; @@ -704,14 +704,14 @@ void Type::Initialize_shared(Compile* current) { // Restore working type arena. current->set_type_arena(save); - current->set_type_dict(NULL); + current->set_type_dict(nullptr); } //------------------------------Initialize------------------------------------- void Type::Initialize(Compile* current) { - assert(current->type_arena() != NULL, "must have created type arena"); + assert(current->type_arena() != nullptr, "must have created type arena"); - if (_shared_type_dict == NULL) { + if (_shared_type_dict == nullptr) { Initialize_shared(current); } @@ -802,7 +802,7 @@ private: _in1(in1), _in2(in2), _res(res) { } VerifyMeetResultEntry(): - _in1(NULL), _in2(NULL), _res(NULL) { + _in1(nullptr), _in2(nullptr), _res(nullptr) { } bool operator==(const VerifyMeetResultEntry& rhs) const { @@ -822,7 +822,7 @@ private: if ((intptr_t) v1._in2 < (intptr_t) v2._in2) { return -1; } else if (v1._in2 == v2._in2) { - assert(v1._res == v2._res || v1._res == NULL || v2._res == NULL, "same inputs should lead to same result"); + assert(v1._res == v2._res || v1._res == nullptr || v2._res == nullptr, "same inputs should lead to same result"); return 0; } return 1; @@ -872,9 +872,9 @@ private: // const Type* meet(const Type* t1, const Type* t2) { bool found = false; - const VerifyMeetResultEntry meet(t1, t2, NULL); + const VerifyMeetResultEntry meet(t1, t2, nullptr); int pos = _cache.find_sorted(meet, found); - const Type* res = NULL; + const Type* res = nullptr; if (found) { res = _cache.at(pos).res(); } else { @@ -901,7 +901,7 @@ public: }; void Type::assert_type_verify_empty() const { - assert(Compile::current()->_type_verify == NULL || Compile::current()->_type_verify->empty_cache(), "cache should have been discarded"); + assert(Compile::current()->_type_verify == nullptr || Compile::current()->_type_verify->empty_cache(), "cache should have been discarded"); } class VerifyMeet { @@ -909,7 +909,7 @@ private: Compile* _C; public: VerifyMeet(Compile* C) : _C(C) { - if (C->_type_verify == NULL) { + if (C->_type_verify == nullptr) { C->_type_verify = new (C->comp_arena())VerifyMeetResult(C); } _C->_type_verify->_depth++; @@ -1714,9 +1714,9 @@ const Type *TypeInt::widen( const Type *old, const Type* limit ) const { // Only happens for pessimistic optimizations. const Type *TypeInt::narrow( const Type *old ) const { if (_lo >= _hi) return this; // already narrow enough - if (old == NULL) return this; + if (old == nullptr) return this; const TypeInt* ot = old->isa_int(); - if (ot == NULL) return this; + if (ot == nullptr) return this; jint olo = ot->_lo; jint ohi = ot->_hi; @@ -1745,7 +1745,7 @@ const Type *TypeInt::narrow( const Type *old ) const { //-----------------------------filter------------------------------------------ const Type *TypeInt::filter_helper(const Type *kills, bool include_speculative) const { const TypeInt* ft = join_helper(kills, include_speculative)->isa_int(); - if (ft == NULL || ft->empty()) + if (ft == nullptr || ft->empty()) return Type::TOP; // Canonical empty value if (ft->_widen < this->_widen) { // Do not allow the value of kill->_widen to affect the outcome. @@ -1980,9 +1980,9 @@ const Type *TypeLong::widen( const Type *old, const Type* limit ) const { // Only happens for pessimistic optimizations. const Type *TypeLong::narrow( const Type *old ) const { if (_lo >= _hi) return this; // already narrow enough - if (old == NULL) return this; + if (old == nullptr) return this; const TypeLong* ot = old->isa_long(); - if (ot == NULL) return this; + if (ot == nullptr) return this; jlong olo = ot->_lo; jlong ohi = ot->_hi; @@ -2011,7 +2011,7 @@ const Type *TypeLong::narrow( const Type *old ) const { //-----------------------------filter------------------------------------------ const Type *TypeLong::filter_helper(const Type *kills, bool include_speculative) const { const TypeLong* ft = join_helper(kills, include_speculative)->isa_long(); - if (ft == NULL || ft->empty()) + if (ft == nullptr || ft->empty()) return Type::TOP; // Canonical empty value if (ft->_widen < this->_widen) { // Do not allow the value of kill->_widen to affect the outcome. @@ -2045,10 +2045,10 @@ bool TypeLong::is_finite() const { #ifndef PRODUCT static const char* longnamenear(jlong x, const char* xname, char* buf, size_t buf_size, jlong n) { if (n > x) { - if (n >= x + 10000) return NULL; + if (n >= x + 10000) return nullptr; os::snprintf_checked(buf, buf_size, "%s+" JLONG_FORMAT, xname, n - x); } else if (n < x) { - if (n <= x - 10000) return NULL; + if (n <= x - 10000) return nullptr; os::snprintf_checked(buf, buf_size, "%s-" JLONG_FORMAT, xname, x - n); } else { return xname; @@ -2066,11 +2066,11 @@ static const char* longname(char* buf, size_t buf_size, jlong n) { return "max"; else if (n > max_jlong - 10000) os::snprintf_checked(buf, buf_size, "max-" JLONG_FORMAT, max_jlong - n); - else if ((str = longnamenear(max_juint, "maxuint", buf, buf_size, n)) != NULL) + else if ((str = longnamenear(max_juint, "maxuint", buf, buf_size, n)) != nullptr) return str; - else if ((str = longnamenear(max_jint, "maxint", buf, buf_size, n)) != NULL) + else if ((str = longnamenear(max_jint, "maxint", buf, buf_size, n)) != nullptr) return str; - else if ((str = longnamenear(min_jint, "minint", buf, buf_size, n)) != NULL) + else if ((str = longnamenear(min_jint, "minint", buf, buf_size, n)) != nullptr) return str; else os::snprintf_checked(buf, buf_size, JLONG_FORMAT, n); @@ -2160,7 +2160,7 @@ const TypeTuple *TypeTuple::make_domain(ciInstanceKlass* recv, ciSignature* sig, uint pos = TypeFunc::Parms; const Type **field_array; - if (recv != NULL) { + if (recv != nullptr) { arg_cnt++; field_array = fields(arg_cnt); // Use get_const_type here because it respects UseUniqueSubclasses: @@ -2447,7 +2447,7 @@ bool TypeAry::ary_must_be_exact() const { // In such cases, an array built on this ary must have no subclasses. if (_elem == BOTTOM) return false; // general array not exact if (_elem == TOP ) return false; // inverted general array not exact - const TypeOopPtr* toop = NULL; + const TypeOopPtr* toop = nullptr; if (UseCompressedOops && _elem->isa_narrowoop()) { toop = _elem->make_ptr()->isa_oopptr(); } else { @@ -2474,13 +2474,13 @@ bool TypeAry::ary_must_be_exact() const { //==============================TypeVect======================================= // Convenience common pre-built types. -const TypeVect *TypeVect::VECTA = NULL; // vector length agnostic -const TypeVect *TypeVect::VECTS = NULL; // 32-bit vectors -const TypeVect *TypeVect::VECTD = NULL; // 64-bit vectors -const TypeVect *TypeVect::VECTX = NULL; // 128-bit vectors -const TypeVect *TypeVect::VECTY = NULL; // 256-bit vectors -const TypeVect *TypeVect::VECTZ = NULL; // 512-bit vectors -const TypeVect *TypeVect::VECTMASK = NULL; // predicate/mask vector +const TypeVect *TypeVect::VECTA = nullptr; // vector length agnostic +const TypeVect *TypeVect::VECTS = nullptr; // 32-bit vectors +const TypeVect *TypeVect::VECTD = nullptr; // 64-bit vectors +const TypeVect *TypeVect::VECTX = nullptr; // 128-bit vectors +const TypeVect *TypeVect::VECTY = nullptr; // 256-bit vectors +const TypeVect *TypeVect::VECTZ = nullptr; // 512-bit vectors +const TypeVect *TypeVect::VECTMASK = nullptr; // predicate/mask vector //------------------------------make------------------------------------------- const TypeVect* TypeVect::make(const Type *elem, uint length, bool is_mask) { @@ -2508,7 +2508,7 @@ const TypeVect* TypeVect::make(const Type *elem, uint length, bool is_mask) { return (TypeVect*)(new TypeVectZ(elem, length))->hashcons(); } ShouldNotReachHere(); - return NULL; + return nullptr; } const TypeVect *TypeVect::makemask(const Type* elem, uint length) { @@ -2678,18 +2678,18 @@ intptr_t TypePtr::get_con() const { // Compute the MEET of two types. It returns a new Type object. const Type *TypePtr::xmeet(const Type *t) const { const Type* res = xmeet_helper(t); - if (res->isa_ptr() == NULL) { + if (res->isa_ptr() == nullptr) { return res; } const TypePtr* res_ptr = res->is_ptr(); - if (res_ptr->speculative() != NULL) { - // type->speculative() == NULL means that speculation is no better + if (res_ptr->speculative() != nullptr) { + // type->speculative() is null means that speculation is no better // than type, i.e. type->speculative() == type. So there are 2 // ways to represent the fact that we have no useful speculative // data and we should use a single one to be able to test for // equality between types. Check whether type->speculative() == - // type and set speculative to NULL if it is the case. + // type and set speculative to null if it is the case. if (res_ptr->remove_speculative() == res_ptr->speculative()) { return res_ptr->remove_speculative(); } @@ -2810,11 +2810,11 @@ int TypePtr::hash(void) const { * Return same type without a speculative part */ const TypePtr* TypePtr::remove_speculative() const { - if (_speculative == NULL) { + if (_speculative == nullptr) { return this; } assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth"); - return make(AnyPtr, _ptr, _offset, NULL, _inline_depth); + return make(AnyPtr, _ptr, _offset, nullptr, _inline_depth); } /** @@ -2822,7 +2822,7 @@ const TypePtr* TypePtr::remove_speculative() const { * it */ const Type* TypePtr::cleanup_speculative() const { - if (speculative() == NULL) { + if (speculative() == nullptr) { return this; } const Type* no_spec = remove_speculative(); @@ -2839,7 +2839,7 @@ const Type* TypePtr::cleanup_speculative() const { // If the speculative may be null and is an inexact klass then it // doesn't help if (speculative() != TypePtr::NULL_PTR && speculative()->maybe_null() && - (spec_oopptr == NULL || !spec_oopptr->klass_is_exact())) { + (spec_oopptr == nullptr || !spec_oopptr->klass_is_exact())) { return no_spec; } return this; @@ -2849,8 +2849,8 @@ const Type* TypePtr::cleanup_speculative() const { * dual of the speculative part of the type */ const TypePtr* TypePtr::dual_speculative() const { - if (_speculative == NULL) { - return NULL; + if (_speculative == nullptr) { + return nullptr; } return _speculative->dual()->is_ptr(); } @@ -2861,11 +2861,11 @@ const TypePtr* TypePtr::dual_speculative() const { * @param other type to meet with */ const TypePtr* TypePtr::xmeet_speculative(const TypePtr* other) const { - bool this_has_spec = (_speculative != NULL); - bool other_has_spec = (other->speculative() != NULL); + bool this_has_spec = (_speculative != nullptr); + bool other_has_spec = (other->speculative() != nullptr); if (!this_has_spec && !other_has_spec) { - return NULL; + return nullptr; } // If we are at a point where control flow meets and one branch has @@ -2910,7 +2910,7 @@ int TypePtr::meet_inline_depth(int depth) const { * @param other type to compare this one to */ bool TypePtr::eq_speculative(const TypePtr* other) const { - if (_speculative == NULL || other->speculative() == NULL) { + if (_speculative == nullptr || other->speculative() == nullptr) { return _speculative == other->speculative(); } @@ -2925,7 +2925,7 @@ bool TypePtr::eq_speculative(const TypePtr* other) const { * Hash of the speculative part of the type */ int TypePtr::hash_speculative() const { - if (_speculative == NULL) { + if (_speculative == nullptr) { return 0; } @@ -2938,15 +2938,15 @@ int TypePtr::hash_speculative() const { * @param offset offset to add */ const TypePtr* TypePtr::add_offset_speculative(intptr_t offset) const { - if (_speculative == NULL) { - return NULL; + if (_speculative == nullptr) { + return nullptr; } return _speculative->add_offset(offset)->is_ptr(); } const TypePtr* TypePtr::with_offset_speculative(intptr_t offset) const { - if (_speculative == NULL) { - return NULL; + if (_speculative == nullptr) { + return nullptr; } return _speculative->with_offset(offset)->is_ptr(); } @@ -2955,20 +2955,20 @@ const TypePtr* TypePtr::with_offset_speculative(intptr_t offset) const { * return exact klass from the speculative type if there's one */ ciKlass* TypePtr::speculative_type() const { - if (_speculative != NULL && _speculative->isa_oopptr()) { + if (_speculative != nullptr && _speculative->isa_oopptr()) { const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr(); if (speculative->klass_is_exact()) { return speculative->exact_klass(); } } - return NULL; + return nullptr; } /** * return true if speculative type may be null */ bool TypePtr::speculative_maybe_null() const { - if (_speculative != NULL) { + if (_speculative != nullptr) { const TypePtr* speculative = _speculative->join(this)->is_ptr(); return speculative->maybe_null(); } @@ -2976,7 +2976,7 @@ bool TypePtr::speculative_maybe_null() const { } bool TypePtr::speculative_always_null() const { - if (_speculative != NULL) { + if (_speculative != nullptr) { const TypePtr* speculative = _speculative->join(this)->is_ptr(); return speculative == TypePtr::NULL_PTR; } @@ -2989,7 +2989,7 @@ bool TypePtr::speculative_always_null() const { */ ciKlass* TypePtr::speculative_type_not_null() const { if (speculative_maybe_null()) { - return NULL; + return nullptr; } return speculative_type(); } @@ -3004,14 +3004,14 @@ ciKlass* TypePtr::speculative_type_not_null() const { */ bool TypePtr::would_improve_type(ciKlass* exact_kls, int inline_depth) const { // no profiling? - if (exact_kls == NULL) { + if (exact_kls == nullptr) { return false; } if (speculative() == TypePtr::NULL_PTR) { return false; } // no speculative type or non exact speculative type? - if (speculative_type() == NULL) { + if (speculative_type() == nullptr) { return true; } // If the node already has an exact speculative type keep it, @@ -3054,7 +3054,7 @@ bool TypePtr::would_improve_ptr(ProfilePtrKind ptr_kind) const { if (speculative_always_null()) { return false; } - if (ptr_kind == ProfileAlwaysNull && speculative() != NULL && speculative()->isa_oopptr()) { + if (ptr_kind == ProfileAlwaysNull && speculative() != nullptr && speculative()->isa_oopptr()) { return false; } return true; @@ -3062,12 +3062,12 @@ bool TypePtr::would_improve_ptr(ProfilePtrKind ptr_kind) const { //------------------------------dump2------------------------------------------ const char *const TypePtr::ptr_msg[TypePtr::lastPTR] = { - "TopPTR","AnyNull","Constant","NULL","NotNull","BotPTR" + "TopPTR","AnyNull","Constant","null","NotNull","BotPTR" }; #ifndef PRODUCT void TypePtr::dump2( Dict &d, uint depth, outputStream *st ) const { - if( _ptr == Null ) st->print("NULL"); + if( _ptr == Null ) st->print("null"); else st->print("%s *", ptr_msg[_ptr]); if( _offset == OffsetTop ) st->print("+top"); else if( _offset == OffsetBot ) st->print("+bot"); @@ -3080,7 +3080,7 @@ void TypePtr::dump2( Dict &d, uint depth, outputStream *st ) const { *dump the speculative part of the type */ void TypePtr::dump_speculative(outputStream *st) const { - if (_speculative != NULL) { + if (_speculative != nullptr) { st->print(" (speculative="); _speculative->dump_on(st); st->print(")"); @@ -3121,19 +3121,19 @@ const TypeRawPtr *TypeRawPtr::NOTNULL; //------------------------------make------------------------------------------- const TypeRawPtr *TypeRawPtr::make( enum PTR ptr ) { assert( ptr != Constant, "what is the constant?" ); - assert( ptr != Null, "Use TypePtr for NULL" ); + assert( ptr != Null, "Use TypePtr for null" ); return (TypeRawPtr*)(new TypeRawPtr(ptr,0))->hashcons(); } const TypeRawPtr *TypeRawPtr::make( address bits ) { - assert( bits, "Use TypePtr for NULL" ); + assert( bits, "Use TypePtr for null" ); return (TypeRawPtr*)(new TypeRawPtr(Constant,bits))->hashcons(); } //------------------------------cast_to_ptr_type------------------------------- const TypeRawPtr* TypeRawPtr::cast_to_ptr_type(PTR ptr) const { assert( ptr != Constant, "what is the constant?" ); - assert( ptr != Null, "Use TypePtr for NULL" ); + assert( ptr != Null, "Use TypePtr for null" ); assert( _bits==0, "Why cast a constant address?"); if( ptr == _ptr ) return this; return make(ptr); @@ -3223,7 +3223,7 @@ const TypePtr* TypeRawPtr::add_offset(intptr_t offset) const { } default: ShouldNotReachHere(); } - return NULL; // Lint noise + return nullptr; // Lint noise } //------------------------------eq--------------------------------------------- @@ -3254,12 +3254,12 @@ void TypeRawPtr::dump2( Dict &d, uint depth, outputStream *st ) const { const TypeOopPtr *TypeOopPtr::BOTTOM; TypePtr::InterfaceSet::InterfaceSet() - : _list(Compile::current()->type_arena(), 0, 0, NULL), + : _list(Compile::current()->type_arena(), 0, 0, nullptr), _hash_computed(0), _exact_klass_computed(0), _is_loaded_computed(0) { } TypePtr::InterfaceSet::InterfaceSet(GrowableArray* interfaces) - : _list(Compile::current()->type_arena(), interfaces->length(), 0, NULL), + : _list(Compile::current()->type_arena(), interfaces->length(), 0, nullptr), _hash_computed(0), _exact_klass_computed(0), _is_loaded_computed(0) { for (int i = 0; i < interfaces->length(); i++) { add(interfaces->at(i)); @@ -3446,14 +3446,14 @@ ciKlass* TypePtr::InterfaceSet::exact_klass() const { void TypePtr::InterfaceSet::compute_exact_klass() { if (_list.length() == 0) { _exact_klass_computed = 1; - _exact_klass = NULL; + _exact_klass = nullptr; return; } - ciKlass* res = NULL; + ciKlass* res = nullptr; for (int i = 0; i < _list.length(); i++) { ciKlass* interface = _list.at(i); if (eq(interfaces(interface, false, true, false, trust_interfaces))) { - assert(res == NULL, ""); + assert(res == nullptr, ""); res = _list.at(i); } } @@ -3501,7 +3501,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfa if (_offset > 0 || _offset == Type::OffsetTop || _offset == Type::OffsetBot) { if (_offset == oopDesc::klass_offset_in_bytes()) { _is_ptr_to_narrowklass = UseCompressedClassPointers; - } else if (klass() == NULL) { + } else if (klass() == nullptr) { // Array with unknown body type assert(this->isa_aryptr(), "only arrays without klass"); _is_ptr_to_narrowoop = UseCompressedOops; @@ -3527,12 +3527,12 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfa } else if (klass() == ciEnv::current()->Class_klass() && _offset >= InstanceMirrorKlass::offset_of_static_fields()) { // Static fields - ciField* field = NULL; - if (const_oop() != NULL) { + ciField* field = nullptr; + if (const_oop() != nullptr) { ciInstanceKlass* k = const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); field = k->get_field_by_offset(_offset, true); } - if (field != NULL) { + if (field != nullptr) { BasicType basic_elem_type = field->layout_type(); _is_ptr_to_narrowoop = UseCompressedOops && ::is_reference_type(basic_elem_type); } else { @@ -3542,7 +3542,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, const InterfaceSet& interfa } else { // Instance fields which contains a compressed oop references. ciField* field = ik->get_field_by_offset(_offset, false); - if (field != NULL) { + if (field != nullptr) { BasicType basic_elem_type = field->layout_type(); _is_ptr_to_narrowoop = UseCompressedOops && ::is_reference_type(basic_elem_type); } else if (klass()->equals(ciEnv::current()->Object_klass())) { @@ -3566,7 +3566,7 @@ const TypeOopPtr *TypeOopPtr::make(PTR ptr, int offset, int instance_id, assert(ptr != Constant, "no constant generic pointers"); ciKlass* k = Compile::current()->env()->Object_klass(); bool xk = false; - ciObject* o = NULL; + ciObject* o = nullptr; return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, InterfaceSet(), xk, o, offset, instance_id, speculative, inline_depth))->hashcons(); } @@ -3598,7 +3598,7 @@ const TypeOopPtr* TypeOopPtr::cast_to_exactness(bool klass_is_exact) const { // It is the type that is loaded from an object of this type. const TypeKlassPtr* TypeOopPtr::as_klass_type(bool try_for_exact) const { ShouldNotReachHere(); - return NULL; + return nullptr; } //------------------------------meet------------------------------------------- @@ -3679,7 +3679,7 @@ const Type *TypeOopPtr::xmeet_helper(const Type *t) const { // Dual of a pure heap pointer. No relevant klass or oop information. const Type *TypeOopPtr::xdual() const { assert(klass() == Compile::current()->env()->Object_klass(), "no klasses here"); - assert(const_oop() == NULL, "no constants here"); + assert(const_oop() == nullptr, "no constants here"); return new TypeOopPtr(_base, dual_ptr(), klass(), _interfaces, klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative(), dual_inline_depth()); } @@ -3689,7 +3689,7 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_ if (klass->is_instance_klass()) { Compile* C = Compile::current(); Dependencies* deps = C->dependencies(); - assert((deps != NULL) == (C->method() != NULL && C->method()->code_size() > 0), "sanity"); + assert((deps != nullptr) == (C->method() != nullptr && C->method()->code_size() > 0), "sanity"); // Element is an instance bool klass_is_exact = false; if (klass->is_loaded()) { @@ -3697,15 +3697,15 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_ ciInstanceKlass* ik = klass->as_instance_klass(); klass_is_exact = ik->is_final(); if (!klass_is_exact && klass_change - && deps != NULL && UseUniqueSubclasses) { + && deps != nullptr && UseUniqueSubclasses) { ciInstanceKlass* sub = ik->unique_concrete_subklass(); - if (sub != NULL) { + if (sub != nullptr) { deps->assert_abstract_with_unique_concrete_subtype(ik, sub); klass = ik = sub; klass_is_exact = sub->is_final(); } } - if (!klass_is_exact && try_for_exact && deps != NULL && + if (!klass_is_exact && try_for_exact && deps != nullptr && !ik->is_interface() && !ik->has_subklass()) { // Add a dependence; if concrete subclass added we need to recompile deps->assert_leaf_type(ik); @@ -3713,7 +3713,7 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_ } } const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling); - return TypeInstPtr::make(TypePtr::BotPTR, klass, interfaces, klass_is_exact, NULL, 0); + return TypeInstPtr::make(TypePtr::BotPTR, klass, interfaces, klass_is_exact, nullptr, 0); } else if (klass->is_obj_array_klass()) { // Element is an object array. Recursively call ourself. ciKlass* eklass = klass->as_obj_array_klass()->element_klass(); @@ -3722,8 +3722,8 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_ const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS); // We used to pass NotNull in here, asserting that the sub-arrays // are all not-null. This is not true in generally, as code can - // slam NULLs down in the subarrays. - const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, NULL, xk, 0); + // slam nulls down in the subarrays. + const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::BotPTR, arr0, nullptr, xk, 0); return arr; } else if (klass->is_type_array_klass()) { // Element is an typeArray @@ -3735,7 +3735,7 @@ const TypeOopPtr* TypeOopPtr::make_from_klass_common(ciKlass* klass, bool klass_ return arr; } else { ShouldNotReachHere(); - return NULL; + return nullptr; } } @@ -3752,7 +3752,7 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_const if (make_constant) { return TypeInstPtr::make(o); } else { - return TypeInstPtr::make(TypePtr::NotNull, klass, true, NULL, 0); + return TypeInstPtr::make(TypePtr::NotNull, klass, true, nullptr, 0); } } else if (klass->is_obj_array_klass()) { // Element is an object array. Recursively call ourself. @@ -3761,7 +3761,7 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_const const TypeAry* arr0 = TypeAry::make(etype, TypeInt::make(o->as_array()->length())); // We used to pass NotNull in here, asserting that the sub-arrays // are all not-null. This is not true in generally, as code can - // slam NULLs down in the subarrays. + // slam nulls down in the subarrays. if (make_constant) { return TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0); } else { @@ -3782,7 +3782,7 @@ const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_const } fatal("unhandled object type"); - return NULL; + return nullptr; } //------------------------------get_con---------------------------------------- @@ -3829,7 +3829,7 @@ bool TypeOopPtr::eq( const Type *t ) const { _instance_id != a->_instance_id) return false; ciObject* one = const_oop(); ciObject* two = a->const_oop(); - if (one == NULL || two == NULL) { + if (one == nullptr || two == nullptr) { return (one == two) && TypePtr::eq(t); } else { return one->equals(two) && TypePtr::eq(t); @@ -3888,11 +3888,11 @@ const TypeOopPtr* TypeOopPtr::with_offset(intptr_t offset) const { * Return same type without a speculative part */ const TypeOopPtr* TypeOopPtr::remove_speculative() const { - if (_speculative == NULL) { + if (_speculative == nullptr) { return this; } assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth"); - return make(_ptr, _offset, _instance_id, NULL, _inline_depth); + return make(_ptr, _offset, _instance_id, nullptr, _inline_depth); } /** @@ -3990,7 +3990,7 @@ ciKlass* TypeInstPtr::exact_klass_helper() const { if (_interfaces.eq(interfaces)) { return _klass; } - return NULL; + return nullptr; } return _interfaces.exact_klass(); } @@ -3999,9 +3999,9 @@ ciKlass* TypeInstPtr::exact_klass_helper() const { TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int off, int instance_id, const TypePtr* speculative, int inline_depth) : TypeOopPtr(InstPtr, ptr, k, interfaces, xk, o, off, instance_id, speculative, inline_depth) { - assert(k == NULL || !k->is_loaded() || !k->is_interface(), "no interface here"); - assert(k != NULL && - (k->is_loaded() || o == NULL), + assert(k == nullptr || !k->is_loaded() || !k->is_interface(), "no interface here"); + assert(k != nullptr && + (k->is_loaded() || o == nullptr), "cannot have constants with non-loaded klass"); }; @@ -4016,11 +4016,11 @@ const TypeInstPtr *TypeInstPtr::make(PTR ptr, const TypePtr* speculative, int inline_depth) { assert( !k->is_loaded() || k->is_instance_klass(), "Must be for instance"); - // Either const_oop() is NULL or else ptr is Constant + // Either const_oop() is null or else ptr is Constant assert( (!o && ptr != Constant) || (o && ptr == Constant), "constant pointers must have a value supplied" ); // Ptr is never Null - assert( ptr != Null, "NULL pointers are not typed" ); + assert( ptr != Null, "null pointers are not typed" ); assert(instance_id <= 0 || xk, "instances are always exactly typed"); if (ptr == Constant) { @@ -4078,7 +4078,7 @@ TypePtr::InterfaceSet TypePtr::interfaces(ciKlass*& k, bool klass, bool interfac */ const Type* TypeInstPtr::get_const_boxed_value() const { assert(is_ptr_to_boxed_value(), "should be called only for boxed value"); - assert((const_oop() != NULL), "should be called only for constant object"); + assert((const_oop() != nullptr), "should be called only for constant object"); ciConstant constant = const_oop()->as_instance()->field_value_by_offset(offset()); BasicType bt = constant.basic_type(); switch (bt) { @@ -4093,7 +4093,7 @@ const Type* TypeInstPtr::get_const_boxed_value() const { default: break; } fatal("Invalid boxed value type '%s'", type2name(bt)); - return NULL; + return nullptr; } //------------------------------cast_to_ptr_type------------------------------- @@ -4101,7 +4101,7 @@ const TypeInstPtr* TypeInstPtr::cast_to_ptr_type(PTR ptr) const { if( ptr == _ptr ) return this; // Reconstruct _sig info here since not a problem with later lazy // construction, _sig will show up on demand. - return make(ptr, klass(), _interfaces, klass_is_exact(), ptr == Constant ? const_oop() : NULL, _offset, _instance_id, _speculative, _inline_depth); + return make(ptr, klass(), _interfaces, klass_is_exact(), ptr == Constant ? const_oop() : nullptr, _offset, _instance_id, _speculative, _inline_depth); } @@ -4150,7 +4150,7 @@ const TypeInstPtr *TypeInstPtr::xmeet_unloaded(const TypeInstPtr *tinst, const I assert(loaded->ptr() != TypePtr::Null, "insanity check"); // if (loaded->ptr() == TypePtr::TopPTR) { return unloaded; } - else if (loaded->ptr() == TypePtr::AnyNull) { return make(ptr, unloaded->klass(), interfaces, false, NULL, off, instance_id, speculative, depth); } + else if (loaded->ptr() == TypePtr::AnyNull) { return make(ptr, unloaded->klass(), interfaces, false, nullptr, off, instance_id, speculative, depth); } else if (loaded->ptr() == TypePtr::BotPTR) { return TypeInstPtr::BOTTOM; } else if (loaded->ptr() == TypePtr::Constant || loaded->ptr() == TypePtr::NotNull) { if (unloaded->ptr() == TypePtr::BotPTR) { return TypeInstPtr::BOTTOM; } @@ -4220,7 +4220,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const { const TypePtr* speculative = xmeet_speculative(tp); int depth = meet_inline_depth(tp->inline_depth()); return make(ptr, klass(), _interfaces, klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, depth); + (ptr == Constant ? const_oop() : nullptr), offset, instance_id, speculative, depth); } case NotNull: case BotPTR: { @@ -4248,7 +4248,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const { case TopPTR: case AnyNull: { return make(ptr, klass(), _interfaces, klass_is_exact(), - (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, depth); + (ptr == Constant ? const_oop() : nullptr), offset, instance_id, speculative, depth); } case NotNull: case BotPTR: @@ -4286,7 +4286,7 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const { ciKlass* tinst_klass = tinst->klass(); ciKlass* this_klass = klass(); - ciKlass* res_klass = NULL; + ciKlass* res_klass = nullptr; bool res_xk = false; const Type* res; MeetResult kind = meet_instptr(ptr, interfaces, this, tinst, res_klass, res_xk); @@ -4314,11 +4314,11 @@ const Type *TypeInstPtr::xmeet_helper(const Type *t) const { } else if (kind == LCA) { instance_id = InstanceBot; } - ciObject* o = NULL; // Assume not constant when done + ciObject* o = nullptr; // Assume not constant when done ciObject* this_oop = const_oop(); ciObject* tinst_oop = tinst->const_oop(); if (ptr == Constant) { - if (this_oop != NULL && tinst_oop != NULL && + if (this_oop != nullptr && tinst_oop != nullptr && this_oop->equals(tinst_oop)) o = this_oop; else if (above_centerline(_ptr)) { @@ -4391,7 +4391,7 @@ template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, InterfaceS // centerline and or-ed above it. (N.B. Constants are always exact.) // Check for subtyping: - const T* subtype = NULL; + const T* subtype = nullptr; bool subtype_exact = false; InterfaceSet subtype_interfaces; @@ -4424,7 +4424,7 @@ template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, InterfaceS // Check for classes now being equal if (this_type->is_same_java_type_as(other_type)) { // If the klasses are equal, the constants may still differ. Fall to - // NotNull if they do (neither constant is NULL; that is a special case + // NotNull if they do (neither constant is null; that is a special case // handled elsewhere). res_klass = this_type->klass(); res_xk = this_xk; @@ -4451,10 +4451,10 @@ template TypePtr::MeetResult TypePtr::meet_instptr(PTR& ptr, InterfaceS //------------------------java_mirror_type-------------------------------------- ciType* TypeInstPtr::java_mirror_type() const { // must be a singleton type - if( const_oop() == NULL ) return NULL; + if( const_oop() == nullptr ) return nullptr; // must be of type java.lang.Class - if( klass() != ciEnv::current()->Class_klass() ) return NULL; + if( klass() != ciEnv::current()->Class_klass() ) return nullptr; return const_oop()->as_instance()->java_mirror_type(); } @@ -4564,12 +4564,12 @@ const TypeInstPtr* TypeInstPtr::with_offset(intptr_t offset) const { } const TypeInstPtr* TypeInstPtr::remove_speculative() const { - if (_speculative == NULL) { + if (_speculative == nullptr) { return this; } assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth"); return make(_ptr, klass(), _interfaces, klass_is_exact(), const_oop(), _offset, - _instance_id, NULL, _inline_depth); + _instance_id, nullptr, _inline_depth); } const TypePtr* TypeInstPtr::with_inline_depth(int depth) const { @@ -4640,11 +4640,11 @@ template bool TypePtr::is_meet_subtype_of_helper_for_array const T1* other_ary = this_one->is_array_type(other); const TypePtr* other_elem = other_ary->elem()->make_ptr(); const TypePtr* this_elem = this_one->elem()->make_ptr(); - if (other_elem != NULL && this_elem != NULL) { + if (other_elem != nullptr && this_elem != nullptr) { return this_one->is_reference_type(this_elem)->is_meet_subtype_of_helper(this_one->is_reference_type(other_elem), this_xk, other_xk); } - if (other_elem == NULL && this_elem == NULL) { + if (other_elem == nullptr && this_elem == nullptr) { return this_one->_klass->is_subtype_of(other->_klass); } @@ -4679,29 +4679,29 @@ const TypeAryPtr *TypeAryPtr::DOUBLES; //------------------------------make------------------------------------------- const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypePtr* speculative, int inline_depth) { - assert(!(k == NULL && ary->_elem->isa_int()), + assert(!(k == nullptr && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); if (!xk) xk = ary->ary_must_be_exact(); assert(instance_id <= 0 || xk, "instances are always exactly typed"); - if (k != NULL && k->is_loaded() && k->is_obj_array_klass() && + if (k != nullptr && k->is_loaded() && k->is_obj_array_klass() && k->as_obj_array_klass()->base_element_klass()->is_interface()) { - k = NULL; + k = nullptr; } - return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative, inline_depth))->hashcons(); + return (TypeAryPtr*)(new TypeAryPtr(ptr, nullptr, ary, k, xk, offset, instance_id, false, speculative, inline_depth))->hashcons(); } //------------------------------make------------------------------------------- const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypePtr* speculative, int inline_depth, bool is_autobox_cache) { - assert(!(k == NULL && ary->_elem->isa_int()), + assert(!(k == nullptr && ary->_elem->isa_int()), "integral arrays must be pre-equipped with a class"); assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" ); - if (!xk) xk = (o != NULL) || ary->ary_must_be_exact(); + if (!xk) xk = (o != nullptr) || ary->ary_must_be_exact(); assert(instance_id <= 0 || xk, "instances are always exactly typed"); - if (k != NULL && k->is_loaded() && k->is_obj_array_klass() && + if (k != nullptr && k->is_loaded() && k->is_obj_array_klass() && k->as_obj_array_klass()->base_element_klass()->is_interface()) { - k = NULL; + k = nullptr; } return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative, inline_depth))->hashcons(); } @@ -4709,7 +4709,7 @@ const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciK //------------------------------cast_to_ptr_type------------------------------- const TypeAryPtr* TypeAryPtr::cast_to_ptr_type(PTR ptr) const { if( ptr == _ptr ) return this; - return make(ptr, ptr == Constant ? const_oop() : NULL, _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth); + return make(ptr, ptr == Constant ? const_oop() : nullptr, _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth); } @@ -4744,7 +4744,7 @@ jint TypeAryPtr::max_array_length(BasicType etype) { //-----------------------------narrow_size_type------------------------------- // Narrow the given size type to the index range for the given array base type. -// Return NULL if the resulting int type becomes empty. +// Return null if the resulting int type becomes empty. const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const { jint hi = size->_hi; jint lo = size->_lo; @@ -4776,7 +4776,7 @@ const TypeInt* TypeAryPtr::narrow_size_type(const TypeInt* size) const { //-------------------------------cast_to_size---------------------------------- const TypeAryPtr* TypeAryPtr::cast_to_size(const TypeInt* new_size) const { - assert(new_size != NULL, ""); + assert(new_size != nullptr, ""); new_size = narrow_size_type(new_size); if (new_size == size()) return this; const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable()); @@ -4791,7 +4791,7 @@ const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const Type* elem = this->elem(); const TypePtr* elem_ptr = elem->make_ptr(); - if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->isa_aryptr()) { + if (stable_dimension > 1 && elem_ptr != nullptr && elem_ptr->isa_aryptr()) { // If this is widened from a narrow oop, TypeAry::make will re-narrow it. elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1); } @@ -4806,7 +4806,7 @@ int TypeAryPtr::stable_dimension() const { if (!is_stable()) return 0; int dim = 1; const TypePtr* elem_ptr = elem()->make_ptr(); - if (elem_ptr != NULL && elem_ptr->isa_aryptr()) + if (elem_ptr != nullptr && elem_ptr->isa_aryptr()) dim += elem_ptr->is_aryptr()->stable_dimension(); return dim; } @@ -4815,7 +4815,7 @@ int TypeAryPtr::stable_dimension() const { const TypeAryPtr* TypeAryPtr::cast_to_autobox_cache() const { if (is_autobox_cache()) return this; const TypeOopPtr* etype = elem()->make_oopptr(); - if (etype == NULL) return this; + if (etype == nullptr) return this; // The pointers in the autobox arrays are always non-null. etype = etype->cast_to_ptr_type(TypePtr::NotNull)->is_oopptr(); const TypeAry* new_ary = TypeAry::make(etype, size(), is_stable()); @@ -4886,7 +4886,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { case TopPTR: case AnyNull: { int instance_id = meet_instance_id(InstanceTop); - return make(ptr, (ptr == Constant ? const_oop() : NULL), + return make(ptr, (ptr == Constant ? const_oop() : nullptr), _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); } case BotPTR: @@ -4916,7 +4916,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { // else fall through to AnyNull case AnyNull: { int instance_id = meet_instance_id(InstanceTop); - return make(ptr, (ptr == Constant ? const_oop() : NULL), + return make(ptr, (ptr == Constant ? const_oop() : nullptr), _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); } default: ShouldNotReachHere(); @@ -4938,18 +4938,18 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { const TypePtr* speculative = xmeet_speculative(tap); int depth = meet_inline_depth(tap->inline_depth()); - ciKlass* res_klass = NULL; + ciKlass* res_klass = nullptr; bool res_xk = false; const Type* elem = tary->_elem; if (meet_aryptr(ptr, elem, this, tap, res_klass, res_xk) == NOT_SUBTYPE) { instance_id = InstanceBot; } - ciObject* o = NULL; // Assume not constant when done + ciObject* o = nullptr; // Assume not constant when done ciObject* this_oop = const_oop(); ciObject* tap_oop = tap->const_oop(); if (ptr == Constant) { - if (this_oop != NULL && tap_oop != NULL && + if (this_oop != nullptr && tap_oop != nullptr && this_oop->equals(tap_oop)) { o = tap_oop; } else if (above_centerline(_ptr)) { @@ -4988,7 +4988,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { ptr = NotNull; instance_id = InstanceBot; interfaces = this_interfaces.intersection_with(tp_interfaces); - return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, NULL,offset, instance_id, speculative, depth); + return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, nullptr,offset, instance_id, speculative, depth); } case Constant: case NotNull: @@ -5002,7 +5002,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { // to do the same here. if (tp->klass()->equals(ciEnv::current()->Object_klass()) && this_interfaces.contains(tp_interfaces) && !tp->klass_is_exact()) { // that is, my array type is a subtype of 'tp' klass - return make(ptr, (ptr == Constant ? const_oop() : NULL), + return make(ptr, (ptr == Constant ? const_oop() : nullptr), _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth); } } @@ -5015,7 +5015,7 @@ const Type *TypeAryPtr::xmeet_helper(const Type *t) const { instance_id = InstanceBot; } interfaces = this_interfaces.intersection_with(tp_interfaces); - return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, NULL, offset, instance_id, speculative, depth); + return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), interfaces, false, nullptr, offset, instance_id, speculative, depth); default: typerr(t); } } @@ -5035,7 +5035,7 @@ template TypePtr::MeetResult TypePtr::meet_aryptr(PTR& ptr, const Type* bool other_xk = other_ary->klass_is_exact(); PTR this_ptr = this_ary->ptr(); PTR other_ptr = other_ary->ptr(); - res_klass = NULL; + res_klass = nullptr; MeetResult result = SUBTYPE; if (elem->isa_int()) { // Integral array element types have irrelevant lattice relations. @@ -5195,11 +5195,11 @@ const TypeAryPtr* TypeAryPtr::with_ary(const TypeAry* ary) const { } const TypeAryPtr* TypeAryPtr::remove_speculative() const { - if (_speculative == NULL) { + if (_speculative == nullptr) { return this; } assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth"); - return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL, _inline_depth); + return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, nullptr, _inline_depth); } const TypePtr* TypeAryPtr::with_inline_depth(int depth) const { @@ -5236,7 +5236,7 @@ intptr_t TypeNarrowPtr::get_con() const { bool TypeNarrowPtr::eq( const Type *t ) const { const TypeNarrowPtr* tc = isa_same_narrowptr(t); - if (tc != NULL) { + if (tc != nullptr) { if (_ptrtype->base() != tc->_ptrtype->base()) { return false; } @@ -5369,7 +5369,7 @@ bool TypeMetadataPtr::eq( const Type *t ) const { const TypeMetadataPtr *a = (const TypeMetadataPtr*)t; ciMetadata* one = metadata(); ciMetadata* two = a->metadata(); - if (one == NULL || two == NULL) { + if (one == nullptr || two == nullptr) { return (one == two) && TypePtr::eq(t); } else { return one->equals(two) && TypePtr::eq(t); @@ -5402,7 +5402,7 @@ const TypePtr* TypeMetadataPtr::add_offset( intptr_t offset ) const { // Do not allow interface-vs.-noninterface joins to collapse to top. const Type *TypeMetadataPtr::filter_helper(const Type *kills, bool include_speculative) const { const TypeMetadataPtr* ft = join_helper(kills, include_speculative)->isa_metadataptr(); - if (ft == NULL || ft->empty()) + if (ft == nullptr || ft->empty()) return Type::TOP; // Canonical empty value return ft; } @@ -5505,7 +5505,7 @@ const Type *TypeMetadataPtr::xmeet( const Type *t ) const { if( _ptr == Constant && tptr != Constant) return this; ptr = NotNull; // Fall down in lattice } - return make(ptr, NULL, offset); + return make(ptr, nullptr, offset); break; } } // End of switch @@ -5552,7 +5552,7 @@ const TypeMetadataPtr* TypeMetadataPtr::make(ciMethodData* m) { //------------------------------make------------------------------------------- // Create a meta data constant const TypeMetadataPtr *TypeMetadataPtr::make(PTR ptr, ciMetadata* m, int offset) { - assert(m == NULL || !m->is_klass(), "wrong type"); + assert(m == nullptr || !m->is_klass(), "wrong type"); return (TypeMetadataPtr*)(new TypeMetadataPtr(ptr, m, offset))->hashcons(); } @@ -5560,7 +5560,7 @@ const TypeMetadataPtr *TypeMetadataPtr::make(PTR ptr, ciMetadata* m, int offset) const TypeKlassPtr* TypeAryPtr::as_klass_type(bool try_for_exact) const { const Type* elem = _ary->_elem; bool xk = klass_is_exact(); - if (elem->make_oopptr() != NULL) { + if (elem->make_oopptr() != nullptr) { elem = elem->make_oopptr()->as_klass_type(try_for_exact); if (elem->is_klassptr()->klass_is_exact()) { xk = true; @@ -5588,7 +5588,7 @@ const TypeKlassPtr* TypeKlassPtr::make(PTR ptr, ciKlass* klass, int offset, Inte //------------------------------TypeKlassPtr----------------------------------- TypeKlassPtr::TypeKlassPtr(TYPES t, PTR ptr, ciKlass* klass, const InterfaceSet& interfaces, int offset) : TypePtr(t, ptr, offset), _klass(klass), _interfaces(interfaces) { - assert(klass == NULL || !klass->is_loaded() || (klass->is_instance_klass() && !klass->is_interface()) || + assert(klass == nullptr || !klass->is_loaded() || (klass->is_instance_klass() && !klass->is_interface()) || klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), "no interface here"); } @@ -5603,7 +5603,7 @@ ciKlass* TypeKlassPtr::exact_klass_helper() const { if (_interfaces.eq(TypePtr::interfaces(k, true, false, true, ignore_interfaces))) { return _klass; } - return NULL; + return nullptr; } return _interfaces.exact_klass(); } @@ -5785,7 +5785,7 @@ const TypeOopPtr* TypeInstKlassPtr::as_instance_type(bool klass_change) const { bool xk = klass_is_exact(); Compile* C = Compile::current(); Dependencies* deps = C->dependencies(); - assert((deps != NULL) == (C->method() != NULL && C->method()->code_size() > 0), "sanity"); + assert((deps != nullptr) == (C->method() != nullptr && C->method()->code_size() > 0), "sanity"); // Element is an instance bool klass_is_exact = false; TypePtr::InterfaceSet interfaces = _interfaces; @@ -5794,9 +5794,9 @@ const TypeOopPtr* TypeInstKlassPtr::as_instance_type(bool klass_change) const { ciInstanceKlass* ik = k->as_instance_klass(); klass_is_exact = ik->is_final(); if (!klass_is_exact && klass_change - && deps != NULL && UseUniqueSubclasses) { + && deps != nullptr && UseUniqueSubclasses) { ciInstanceKlass* sub = ik->unique_concrete_subklass(); - if (sub != NULL) { + if (sub != nullptr) { ciKlass* sub_k = sub; TypePtr::InterfaceSet sub_interfaces = TypePtr::interfaces(sub_k, true, false, false, ignore_interfaces); assert(sub_k == sub, ""); @@ -5808,7 +5808,7 @@ const TypeOopPtr* TypeInstKlassPtr::as_instance_type(bool klass_change) const { } } } - return TypeInstPtr::make(TypePtr::BotPTR, k, interfaces, xk, NULL, 0); + return TypeInstPtr::make(TypePtr::BotPTR, k, interfaces, xk, nullptr, 0); } //------------------------------xmeet------------------------------------------ @@ -5886,7 +5886,7 @@ const Type *TypeInstKlassPtr::xmeet( const Type *t ) const { PTR ptr = meet_ptr(tkls->ptr()); InterfaceSet interfaces = meet_interfaces(tkls); - ciKlass* res_klass = NULL; + ciKlass* res_klass = nullptr; bool res_xk = false; switch(meet_instptr(ptr, interfaces, this, tkls, res_klass, res_xk)) { case UNLOADED: @@ -6038,15 +6038,15 @@ const TypeKlassPtr* TypeInstKlassPtr::try_improve() const { ciKlass* k = klass(); Compile* C = Compile::current(); Dependencies* deps = C->dependencies(); - assert((deps != NULL) == (C->method() != NULL && C->method()->code_size() > 0), "sanity"); + assert((deps != nullptr) == (C->method() != nullptr && C->method()->code_size() > 0), "sanity"); TypePtr::InterfaceSet interfaces = _interfaces; if (k->is_loaded()) { ciInstanceKlass* ik = k->as_instance_klass(); bool klass_is_exact = ik->is_final(); if (!klass_is_exact && - deps != NULL) { + deps != nullptr) { ciInstanceKlass* sub = ik->unique_concrete_subklass(); - if (sub != NULL) { + if (sub != nullptr) { ciKlass *sub_k = sub; TypePtr::InterfaceSet sub_interfaces = TypePtr::interfaces(sub_k, true, false, false, ignore_interfaces); assert(sub_k == sub, ""); @@ -6072,14 +6072,14 @@ const TypeAryKlassPtr *TypeAryKlassPtr::make(PTR ptr, ciKlass* k, int offset, In // Element is an object array. Recursively call ourself. ciKlass* eklass = k->as_obj_array_klass()->element_klass(); const TypeKlassPtr *etype = TypeKlassPtr::make(eklass, interface_handling)->cast_to_exactness(false); - return TypeAryKlassPtr::make(ptr, etype, NULL, offset); + return TypeAryKlassPtr::make(ptr, etype, nullptr, offset); } else if (k->is_type_array_klass()) { // Element is an typeArray const Type* etype = get_const_basic_type(k->as_type_array_klass()->element_type()); return TypeAryKlassPtr::make(ptr, etype, k, offset); } else { ShouldNotReachHere(); - return NULL; + return nullptr; } } @@ -6106,7 +6106,7 @@ int TypeAryKlassPtr::hash(void) const { // Compute the defining klass for this class ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const { // Compute _klass based on element type. - ciKlass* k_ary = NULL; + ciKlass* k_ary = nullptr; const TypeInstPtr *tinst; const TypeAryPtr *tary; const Type* el = elem(); @@ -6115,15 +6115,15 @@ ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const { } // Get element klass - if ((tinst = el->isa_instptr()) != NULL) { - // Leave k_ary at NULL. - } else if ((tary = el->isa_aryptr()) != NULL) { - // Leave k_ary at NULL. + if ((tinst = el->isa_instptr()) != nullptr) { + // Leave k_ary at null. + } else if ((tary = el->isa_aryptr()) != nullptr) { + // Leave k_ary at null. } else if ((el->base() == Type::Top) || (el->base() == Type::Bottom)) { // element type of Bottom occurs from meet of basic type // and object; Top occurs when doing join on Bottom. - // Leave k_ary at NULL. + // Leave k_ary at null. } else { // Cannot compute array klass directly from basic type, // since subtypes of TypeInt all have basic type T_INT. @@ -6184,8 +6184,8 @@ ciKlass* TypeAryPtr::klass() const { ciKlass* TypeAryPtr::exact_klass_helper() const { if (_ary->_elem->make_ptr() && _ary->_elem->make_ptr()->isa_oopptr()) { ciKlass* k = _ary->_elem->make_ptr()->is_oopptr()->exact_klass_helper(); - if (k == NULL) { - return NULL; + if (k == nullptr) { + return nullptr; } k = ciObjArrayKlass::make(k); return k; @@ -6248,10 +6248,10 @@ const TypeKlassPtr *TypeAryKlassPtr::cast_to_exactness(bool klass_is_exact) cons const TypeOopPtr* TypeAryKlassPtr::as_instance_type(bool klass_change) const { ciKlass* k = klass(); bool xk = klass_is_exact(); - const Type* el = NULL; + const Type* el = nullptr; if (elem()->isa_klassptr()) { el = elem()->is_klassptr()->as_instance_type(false)->cast_to_exactness(false); - k = NULL; + k = nullptr; } else { el = elem(); } @@ -6334,7 +6334,7 @@ const Type *TypeAryKlassPtr::xmeet( const Type *t ) const { const Type* elem = _elem->meet(tap->_elem); PTR ptr = meet_ptr(tap->ptr()); - ciKlass* res_klass = NULL; + ciKlass* res_klass = nullptr; bool res_xk = false; meet_aryptr(ptr, elem, this, tap, res_klass, res_xk); assert(res_xk == (ptr == Constant), ""); @@ -6418,10 +6418,10 @@ template bool TypePtr::is_java_subtype_of_helper_for_array( const TypePtr* other_elem = other_ary->elem()->make_ptr(); const TypePtr* this_elem = this_one->elem()->make_ptr(); - if (this_elem != NULL && other_elem != NULL) { + if (this_elem != nullptr && other_elem != nullptr) { return this_one->is_reference_type(this_elem)->is_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact); } - if (this_elem == NULL && other_elem == NULL) { + if (this_elem == nullptr && other_elem == nullptr) { return this_one->_klass->is_subtype_of(other->_klass); } return false; @@ -6450,11 +6450,11 @@ template bool TypePtr::is_same_java_type_as_helper_for_arra const TypePtr* other_elem = other_ary->elem()->make_ptr(); const TypePtr* this_elem = this_one->elem()->make_ptr(); - if (other_elem != NULL && this_elem != NULL) { + if (other_elem != nullptr && this_elem != nullptr) { return this_one->is_reference_type(this_elem)->is_same_java_type_as(this_one->is_reference_type(other_elem)); } - if (other_elem == NULL && this_elem == NULL) { - assert(this_one->_klass != NULL && other->_klass != NULL, ""); + if (other_elem == nullptr && this_elem == nullptr) { + assert(this_one->_klass != nullptr && other->_klass != nullptr, ""); return this_one->_klass->equals(other->_klass); } return false; @@ -6490,10 +6490,10 @@ template bool TypePtr::maybe_java_subtype_of_helper_for_arr const TypePtr* this_elem = this_one->elem()->make_ptr(); const TypePtr* other_elem = other_ary->elem()->make_ptr(); - if (other_elem != NULL && this_elem != NULL) { + if (other_elem != nullptr && this_elem != nullptr) { return this_one->is_reference_type(this_elem)->maybe_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact); } - if (other_elem == NULL && this_elem == NULL) { + if (other_elem == nullptr && this_elem == nullptr) { return this_one->_klass->is_subtype_of(other->_klass); } return false; @@ -6513,8 +6513,8 @@ const Type *TypeAryKlassPtr::xdual() const { ciKlass* TypeAryKlassPtr::exact_klass_helper() const { if (elem()->isa_klassptr()) { ciKlass* k = elem()->is_klassptr()->exact_klass_helper(); - if (k == NULL) { - return NULL; + if (k == nullptr) { + return nullptr; } k = ciObjArrayKlass::make(k); return k; @@ -6524,12 +6524,12 @@ ciKlass* TypeAryKlassPtr::exact_klass_helper() const { } ciKlass* TypeAryKlassPtr::klass() const { - if (_klass != NULL) { + if (_klass != nullptr) { return _klass; } - ciKlass* k = NULL; + ciKlass* k = nullptr; if (elem()->isa_klassptr()) { - // leave NULL + // leave null } else if ((elem()->base() == Type::Top) || (elem()->base() == Type::Bottom)) { } else { @@ -6596,10 +6596,10 @@ const TypeFunc *TypeFunc::make( const TypeTuple *domain, const TypeTuple *range const TypeFunc *TypeFunc::make(ciMethod* method) { Compile* C = Compile::current(); const TypeFunc* tf = C->last_tf(method); // check cache - if (tf != NULL) return tf; // The hit rate here is almost 50%. + if (tf != nullptr) return tf; // The hit rate here is almost 50%. const TypeTuple *domain; if (method->is_static()) { - domain = TypeTuple::make_domain(NULL, method->signature(), ignore_interfaces); + domain = TypeTuple::make_domain(nullptr, method->signature(), ignore_interfaces); } else { domain = TypeTuple::make_domain(method->holder(), method->signature(), ignore_interfaces); } diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp index 2528e26b161..51d6d27f971 100644 --- a/src/hotspot/share/opto/type.hpp +++ b/src/hotspot/share/opto/type.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -177,7 +177,7 @@ protected: // Each class of type is also identified by its base. const TYPES _base; // Enum of Types type - Type( TYPES t ) : _dual(NULL), _base(t) {} // Simple types + Type( TYPES t ) : _dual(nullptr), _base(t) {} // Simple types // ~Type(); // Use fast deallocation const Type *hashcons(); // Hash-cons the type virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; @@ -279,46 +279,46 @@ public: double getd() const; const TypeInt *is_int() const; - const TypeInt *isa_int() const; // Returns NULL if not an Int + const TypeInt *isa_int() const; // Returns null if not an Int const TypeInteger* is_integer(BasicType bt) const; const TypeInteger* isa_integer(BasicType bt) const; const TypeLong *is_long() const; - const TypeLong *isa_long() const; // Returns NULL if not a Long - const TypeD *isa_double() const; // Returns NULL if not a Double{Top,Con,Bot} + const TypeLong *isa_long() const; // Returns null if not a Long + const TypeD *isa_double() const; // Returns null if not a Double{Top,Con,Bot} const TypeD *is_double_constant() const; // Asserts it is a DoubleCon - const TypeD *isa_double_constant() const; // Returns NULL if not a DoubleCon - const TypeF *isa_float() const; // Returns NULL if not a Float{Top,Con,Bot} + const TypeD *isa_double_constant() const; // Returns null if not a DoubleCon + const TypeF *isa_float() const; // Returns null if not a Float{Top,Con,Bot} const TypeF *is_float_constant() const; // Asserts it is a FloatCon - const TypeF *isa_float_constant() const; // Returns NULL if not a FloatCon + const TypeF *isa_float_constant() const; // Returns null if not a FloatCon const TypeTuple *is_tuple() const; // Collection of fields, NOT a pointer const TypeAry *is_ary() const; // Array, NOT array pointer - const TypeAry *isa_ary() const; // Returns NULL of not ary + const TypeAry *isa_ary() const; // Returns null of not ary const TypeVect *is_vect() const; // Vector - const TypeVect *isa_vect() const; // Returns NULL if not a Vector + const TypeVect *isa_vect() const; // Returns null if not a Vector const TypeVectMask *is_vectmask() const; // Predicate/Mask Vector - const TypeVectMask *isa_vectmask() const; // Returns NULL if not a Vector Predicate/Mask + const TypeVectMask *isa_vectmask() const; // Returns null if not a Vector Predicate/Mask const TypePtr *is_ptr() const; // Asserts it is a ptr type - const TypePtr *isa_ptr() const; // Returns NULL if not ptr type + const TypePtr *isa_ptr() const; // Returns null if not ptr type const TypeRawPtr *isa_rawptr() const; // NOT Java oop const TypeRawPtr *is_rawptr() const; // Asserts is rawptr const TypeNarrowOop *is_narrowoop() const; // Java-style GC'd pointer - const TypeNarrowOop *isa_narrowoop() const; // Returns NULL if not oop ptr type + const TypeNarrowOop *isa_narrowoop() const; // Returns null if not oop ptr type const TypeNarrowKlass *is_narrowklass() const; // compressed klass pointer - const TypeNarrowKlass *isa_narrowklass() const;// Returns NULL if not oop ptr type - const TypeOopPtr *isa_oopptr() const; // Returns NULL if not oop ptr type + const TypeNarrowKlass *isa_narrowklass() const;// Returns null if not oop ptr type + const TypeOopPtr *isa_oopptr() const; // Returns null if not oop ptr type const TypeOopPtr *is_oopptr() const; // Java-style GC'd pointer - const TypeInstPtr *isa_instptr() const; // Returns NULL if not InstPtr + const TypeInstPtr *isa_instptr() const; // Returns null if not InstPtr const TypeInstPtr *is_instptr() const; // Instance - const TypeAryPtr *isa_aryptr() const; // Returns NULL if not AryPtr + const TypeAryPtr *isa_aryptr() const; // Returns null if not AryPtr const TypeAryPtr *is_aryptr() const; // Array oop - const TypeMetadataPtr *isa_metadataptr() const; // Returns NULL if not oop ptr type + const TypeMetadataPtr *isa_metadataptr() const; // Returns null if not oop ptr type const TypeMetadataPtr *is_metadataptr() const; // Java-style GC'd pointer - const TypeKlassPtr *isa_klassptr() const; // Returns NULL if not KlassPtr + const TypeKlassPtr *isa_klassptr() const; // Returns null if not KlassPtr const TypeKlassPtr *is_klassptr() const; // assert if not KlassPtr - const TypeInstKlassPtr *isa_instklassptr() const; // Returns NULL if not IntKlassPtr + const TypeInstKlassPtr *isa_instklassptr() const; // Returns null if not IntKlassPtr const TypeInstKlassPtr *is_instklassptr() const; // assert if not IntKlassPtr - const TypeAryKlassPtr *isa_aryklassptr() const; // Returns NULL if not AryKlassPtr + const TypeAryKlassPtr *isa_aryklassptr() const; // Returns null if not AryKlassPtr const TypeAryKlassPtr *is_aryklassptr() const; // assert if not AryKlassPtr virtual bool is_finite() const; // Has a finite value @@ -386,12 +386,12 @@ public: // Create basic type static const Type* get_const_basic_type(BasicType type) { - assert((uint)type <= T_CONFLICT && _const_basic_type[type] != NULL, "bad type"); + assert((uint)type <= T_CONFLICT && _const_basic_type[type] != nullptr, "bad type"); return _const_basic_type[type]; } // For two instance arrays of same dimension, return the base element types. - // Otherwise or if the arrays have different dimensions, return NULL. + // Otherwise or if the arrays have different dimensions, return null. static void get_arrays_base_elements(const Type *a1, const Type *a2, const TypeInstPtr **e1, const TypeInstPtr **e2); @@ -407,7 +407,7 @@ public: // Create standard zero value: static const Type* get_zero_type(BasicType type) { - assert((uint)type <= T_CONFLICT && _zero_type[type] != NULL, "bad type"); + assert((uint)type <= T_CONFLICT && _zero_type[type] != nullptr, "bad type"); return _zero_type[type]; } @@ -465,14 +465,14 @@ public: bool is_unsigned_load); // Speculative type helper methods. See TypePtr. - virtual const TypePtr* speculative() const { return NULL; } - virtual ciKlass* speculative_type() const { return NULL; } - virtual ciKlass* speculative_type_not_null() const { return NULL; } + virtual const TypePtr* speculative() const { return nullptr; } + virtual ciKlass* speculative_type() const { return nullptr; } + virtual ciKlass* speculative_type_not_null() const { return nullptr; } virtual bool speculative_maybe_null() const { return true; } virtual bool speculative_always_null() const { return true; } virtual const Type* remove_speculative() const { return this; } virtual const Type* cleanup_speculative() const { return this; } - virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return exact_kls != NULL; } + virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const { return exact_kls != nullptr; } virtual bool would_improve_ptr(ProfilePtrKind ptr_kind) const { return ptr_kind == ProfileAlwaysNull || ptr_kind == ProfileNeverNull; } const Type* maybe_remove_speculative(bool include_speculative) const; @@ -929,7 +929,7 @@ public: enum PTR { TopPTR, AnyNull, Constant, Null, NotNull, BotPTR, lastPTR }; protected: TypePtr(TYPES t, PTR ptr, int offset, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom) : Type(t), _speculative(speculative), _inline_depth(inline_depth), _offset(offset), _ptr(ptr) {} @@ -1005,7 +1005,7 @@ public: const PTR ptr() const { return _ptr; } static const TypePtr *make(TYPES t, PTR ptr, int offset, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom); // Return a 'ptr' version of this type @@ -1085,7 +1085,7 @@ public: virtual intptr_t get_con() const; virtual const TypePtr* add_offset(intptr_t offset) const; - virtual const TypeRawPtr* with_offset(intptr_t offset) const { ShouldNotReachHere(); return NULL;} + virtual const TypeRawPtr* with_offset(intptr_t offset) const { ShouldNotReachHere(); return nullptr;} virtual const Type *xmeet( const Type *t ) const; virtual const Type *xdual() const; // Compute dual right now. @@ -1117,9 +1117,9 @@ public: }; protected: - // Oop is NULL, unless this is a constant oop. + // Oop is null, unless this is a constant oop. ciObject* _const_oop; // Constant oop - // If _klass is NULL, then so is _sig. This is an unloaded klass. + // If _klass is null, then so is _sig. This is an unloaded klass. ciKlass* _klass; // Klass object const InterfaceSet _interfaces; @@ -1145,7 +1145,7 @@ protected: // Do not allow interface-vs.-noninterface joins to collapse to top. virtual const Type *filter_helper(const Type *kills, bool include_speculative) const; - virtual ciKlass* exact_klass_helper() const { return NULL; } + virtual ciKlass* exact_klass_helper() const { return nullptr; } virtual ciKlass* klass() const { return _klass; } public: @@ -1188,18 +1188,18 @@ public: // Creates a singleton type given an object. // If the object cannot be rendered as a constant, // may return a non-singleton type. - // If require_constant, produce a NULL if a singleton is not possible. + // If require_constant, produce a null if a singleton is not possible. static const TypeOopPtr* make_from_constant(ciObject* o, bool require_constant = false); // Make a generic (unclassed) pointer to an oop. static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom); ciObject* const_oop() const { return _const_oop; } // Exact klass, possibly an interface or an array of interface - ciKlass* exact_klass(bool maybe_null = false) const { assert(klass_is_exact(), ""); ciKlass* k = exact_klass_helper(); assert(k != NULL || maybe_null, ""); return k; } + ciKlass* exact_klass(bool maybe_null = false) const { assert(klass_is_exact(), ""); ciKlass* k = exact_klass_helper(); assert(k != nullptr || maybe_null, ""); return k; } ciKlass* unloaded_klass() const { assert(!is_loaded(), "only for unloaded types"); return klass(); } virtual bool is_loaded() const { return klass()->is_loaded() && _interfaces.is_loaded(); } @@ -1310,24 +1310,24 @@ public: // Make a pointer to some value of type klass. static const TypeInstPtr *make(PTR ptr, ciKlass* klass, InterfaceHandling interface_handling = ignore_interfaces) { const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, true, false, interface_handling); - return make(ptr, klass, interfaces, false, NULL, 0, InstanceBot); + return make(ptr, klass, interfaces, false, nullptr, 0, InstanceBot); } // Make a pointer to some non-polymorphic value of exactly type klass. static const TypeInstPtr *make_exact(PTR ptr, ciKlass* klass) { const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces); - return make(ptr, klass, interfaces, true, NULL, 0, InstanceBot); + return make(ptr, klass, interfaces, true, nullptr, 0, InstanceBot); } // Make a pointer to some value of type klass with offset. static const TypeInstPtr *make(PTR ptr, ciKlass* klass, int offset) { const TypePtr::InterfaceSet interfaces = TypePtr::interfaces(klass, true, false, false, ignore_interfaces); - return make(ptr, klass, interfaces, false, NULL, offset, InstanceBot); + return make(ptr, klass, interfaces, false, nullptr, offset, InstanceBot); } static const TypeInstPtr *make(PTR ptr, ciKlass* k, const InterfaceSet& interfaces, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom); static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot) { @@ -1338,7 +1338,7 @@ public: /** Create constant type for a constant boxed value */ const Type* get_const_boxed_value() const; - // If this is a java.lang.Class constant, return the type for it or NULL. + // If this is a java.lang.Class constant, return the type for it or null. // Pass to Type::get_const_type to turn it to a type, which will usually // be a TypeInstPtr, but may also be a TypeInt::INT for int.class, etc. ciType* java_mirror_type() const; @@ -1399,7 +1399,7 @@ class TypeAryPtr : public TypeOopPtr { int dummy; bool top_or_bottom = (base_element_type(dummy) == Type::TOP || base_element_type(dummy) == Type::BOTTOM); - if (UseCompressedOops && (elem()->make_oopptr() != NULL && !top_or_bottom) && + if (UseCompressedOops && (elem()->make_oopptr() != nullptr && !top_or_bottom) && _offset != 0 && _offset != arrayOopDesc::length_offset_in_bytes() && _offset != arrayOopDesc::klass_offset_in_bytes()) { _is_ptr_to_narrowoop = true; @@ -1441,12 +1441,12 @@ public: static const TypeAryPtr *make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom); // Constant pointer to array static const TypeAryPtr *make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, - const TypePtr* speculative = NULL, + const TypePtr* speculative = nullptr, int inline_depth = InlineDepthBottom, bool is_autobox_cache = false); // Return a 'ptr' version of this type @@ -1494,7 +1494,7 @@ public: static const TypeAryPtr *DOUBLES; // selects one of the above: static const TypeAryPtr *get_array_body_type(BasicType elem) { - assert((uint)elem <= T_CONFLICT && _array_body_type[elem] != NULL, "bad elem type"); + assert((uint)elem <= T_CONFLICT && _array_body_type[elem] != nullptr, "bad elem type"); return _array_body_type[elem]; } static const TypeAryPtr *_array_body_type[T_CONFLICT+1]; @@ -1588,7 +1588,7 @@ public: virtual bool maybe_java_subtype_of_helper(const TypeKlassPtr* other, bool this_exact, bool other_exact) const { ShouldNotReachHere(); return false; } // Exact klass, possibly an interface or an array of interface - ciKlass* exact_klass(bool maybe_null = false) const { assert(klass_is_exact(), ""); ciKlass* k = exact_klass_helper(); assert(k != NULL || maybe_null, ""); return k; } + ciKlass* exact_klass(bool maybe_null = false) const { assert(klass_is_exact(), ""); ciKlass* k = exact_klass_helper(); assert(k != nullptr || maybe_null, ""); return k; } virtual bool klass_is_exact() const { return _ptr == Constant; } static const TypeKlassPtr* make(ciKlass* klass, InterfaceHandling interface_handling = ignore_interfaces); @@ -1596,20 +1596,20 @@ public: virtual bool is_loaded() const { return _klass->is_loaded(); } - virtual const TypeKlassPtr* cast_to_ptr_type(PTR ptr) const { ShouldNotReachHere(); return NULL; } + virtual const TypeKlassPtr* cast_to_ptr_type(PTR ptr) const { ShouldNotReachHere(); return nullptr; } - virtual const TypeKlassPtr *cast_to_exactness(bool klass_is_exact) const { ShouldNotReachHere(); return NULL; } + virtual const TypeKlassPtr *cast_to_exactness(bool klass_is_exact) const { ShouldNotReachHere(); return nullptr; } // corresponding pointer to instance, for a given class - virtual const TypeOopPtr* as_instance_type(bool klass_change = true) const { ShouldNotReachHere(); return NULL; } + virtual const TypeOopPtr* as_instance_type(bool klass_change = true) const { ShouldNotReachHere(); return nullptr; } - virtual const TypePtr *add_offset( intptr_t offset ) const { ShouldNotReachHere(); return NULL; } - virtual const Type *xmeet( const Type *t ) const { ShouldNotReachHere(); return NULL; } - virtual const Type *xdual() const { ShouldNotReachHere(); return NULL; } + virtual const TypePtr *add_offset( intptr_t offset ) const { ShouldNotReachHere(); return nullptr; } + virtual const Type *xmeet( const Type *t ) const { ShouldNotReachHere(); return nullptr; } + virtual const Type *xdual() const { ShouldNotReachHere(); return nullptr; } virtual intptr_t get_con() const; - virtual const TypeKlassPtr* with_offset(intptr_t offset) const { ShouldNotReachHere(); return NULL; } + virtual const TypeKlassPtr* with_offset(intptr_t offset) const { ShouldNotReachHere(); return nullptr; } virtual const TypeKlassPtr* try_improve() const { return this; } @@ -1708,7 +1708,7 @@ class TypeAryKlassPtr : public TypeKlassPtr { static const InterfaceSet* _array_interfaces; TypeAryKlassPtr(PTR ptr, const Type *elem, ciKlass* klass, int offset) : TypeKlassPtr(AryKlassPtr, ptr, klass, *_array_interfaces, offset), _elem(elem) { - assert(klass == NULL || klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), ""); + assert(klass == nullptr || klass->is_type_array_klass() || !klass->as_obj_array_klass()->base_element_klass()->is_interface(), ""); } virtual ciKlass* exact_klass_helper() const; @@ -1932,7 +1932,7 @@ public: //------------------------------accessors-------------------------------------- inline bool Type::is_ptr_to_narrowoop() const { #ifdef _LP64 - return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowoop_nv()); + return (isa_oopptr() != nullptr && is_oopptr()->is_ptr_to_narrowoop_nv()); #else return false; #endif @@ -1940,7 +1940,7 @@ inline bool Type::is_ptr_to_narrowoop() const { inline bool Type::is_ptr_to_narrowklass() const { #ifdef _LP64 - return (isa_oopptr() != NULL && is_oopptr()->is_ptr_to_narrowklass_nv()); + return (isa_oopptr() != nullptr && is_oopptr()->is_ptr_to_narrowklass_nv()); #else return false; #endif @@ -1962,7 +1962,7 @@ inline const TypeInteger *Type::is_integer(BasicType bt) const { } inline const TypeInteger *Type::isa_integer(BasicType bt) const { - return (((bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long)) ? (TypeInteger*)this : NULL); + return (((bt == T_INT && _base == Int) || (bt == T_LONG && _base == Long)) ? (TypeInteger*)this : nullptr); } inline const TypeInt *Type::is_int() const { @@ -1971,7 +1971,7 @@ inline const TypeInt *Type::is_int() const { } inline const TypeInt *Type::isa_int() const { - return ( _base == Int ? (TypeInt*)this : NULL); + return ( _base == Int ? (TypeInt*)this : nullptr); } inline const TypeLong *Type::is_long() const { @@ -1980,13 +1980,13 @@ inline const TypeLong *Type::is_long() const { } inline const TypeLong *Type::isa_long() const { - return ( _base == Long ? (TypeLong*)this : NULL); + return ( _base == Long ? (TypeLong*)this : nullptr); } inline const TypeF *Type::isa_float() const { return ((_base == FloatTop || _base == FloatCon || - _base == FloatBot) ? (TypeF*)this : NULL); + _base == FloatBot) ? (TypeF*)this : nullptr); } inline const TypeF *Type::is_float_constant() const { @@ -1995,13 +1995,13 @@ inline const TypeF *Type::is_float_constant() const { } inline const TypeF *Type::isa_float_constant() const { - return ( _base == FloatCon ? (TypeF*)this : NULL); + return ( _base == FloatCon ? (TypeF*)this : nullptr); } inline const TypeD *Type::isa_double() const { return ((_base == DoubleTop || _base == DoubleCon || - _base == DoubleBot) ? (TypeD*)this : NULL); + _base == DoubleBot) ? (TypeD*)this : nullptr); } inline const TypeD *Type::is_double_constant() const { @@ -2010,7 +2010,7 @@ inline const TypeD *Type::is_double_constant() const { } inline const TypeD *Type::isa_double_constant() const { - return ( _base == DoubleCon ? (TypeD*)this : NULL); + return ( _base == DoubleCon ? (TypeD*)this : nullptr); } inline const TypeTuple *Type::is_tuple() const { @@ -2024,7 +2024,7 @@ inline const TypeAry *Type::is_ary() const { } inline const TypeAry *Type::isa_ary() const { - return ((_base == Array) ? (TypeAry*)this : NULL); + return ((_base == Array) ? (TypeAry*)this : nullptr); } inline const TypeVectMask *Type::is_vectmask() const { @@ -2033,7 +2033,7 @@ inline const TypeVectMask *Type::is_vectmask() const { } inline const TypeVectMask *Type::isa_vectmask() const { - return (_base == VectorMask) ? (TypeVectMask*)this : NULL; + return (_base == VectorMask) ? (TypeVectMask*)this : nullptr; } inline const TypeVect *Type::is_vect() const { @@ -2042,7 +2042,7 @@ inline const TypeVect *Type::is_vect() const { } inline const TypeVect *Type::isa_vect() const { - return (_base >= VectorMask && _base <= VectorZ) ? (TypeVect*)this : NULL; + return (_base >= VectorMask && _base <= VectorZ) ? (TypeVect*)this : nullptr; } inline const TypePtr *Type::is_ptr() const { @@ -2053,7 +2053,7 @@ inline const TypePtr *Type::is_ptr() const { inline const TypePtr *Type::isa_ptr() const { // AnyPtr is the first Ptr and KlassPtr the last, with no non-ptrs between. - return (_base >= AnyPtr && _base <= AryKlassPtr) ? (TypePtr*)this : NULL; + return (_base >= AnyPtr && _base <= AryKlassPtr) ? (TypePtr*)this : nullptr; } inline const TypeOopPtr *Type::is_oopptr() const { @@ -2064,11 +2064,11 @@ inline const TypeOopPtr *Type::is_oopptr() const { inline const TypeOopPtr *Type::isa_oopptr() const { // OopPtr is the first and KlassPtr the last, with no non-oops between. - return (_base >= OopPtr && _base <= AryPtr) ? (TypeOopPtr*)this : NULL; + return (_base >= OopPtr && _base <= AryPtr) ? (TypeOopPtr*)this : nullptr; } inline const TypeRawPtr *Type::isa_rawptr() const { - return (_base == RawPtr) ? (TypeRawPtr*)this : NULL; + return (_base == RawPtr) ? (TypeRawPtr*)this : nullptr; } inline const TypeRawPtr *Type::is_rawptr() const { @@ -2077,7 +2077,7 @@ inline const TypeRawPtr *Type::is_rawptr() const { } inline const TypeInstPtr *Type::isa_instptr() const { - return (_base == InstPtr) ? (TypeInstPtr*)this : NULL; + return (_base == InstPtr) ? (TypeInstPtr*)this : nullptr; } inline const TypeInstPtr *Type::is_instptr() const { @@ -2086,7 +2086,7 @@ inline const TypeInstPtr *Type::is_instptr() const { } inline const TypeAryPtr *Type::isa_aryptr() const { - return (_base == AryPtr) ? (TypeAryPtr*)this : NULL; + return (_base == AryPtr) ? (TypeAryPtr*)this : nullptr; } inline const TypeAryPtr *Type::is_aryptr() const { @@ -2102,7 +2102,7 @@ inline const TypeNarrowOop *Type::is_narrowoop() const { inline const TypeNarrowOop *Type::isa_narrowoop() const { // OopPtr is the first and KlassPtr the last, with no non-oops between. - return (_base == NarrowOop) ? (TypeNarrowOop*)this : NULL; + return (_base == NarrowOop) ? (TypeNarrowOop*)this : nullptr; } inline const TypeNarrowKlass *Type::is_narrowklass() const { @@ -2111,7 +2111,7 @@ inline const TypeNarrowKlass *Type::is_narrowklass() const { } inline const TypeNarrowKlass *Type::isa_narrowklass() const { - return (_base == NarrowKlass) ? (TypeNarrowKlass*)this : NULL; + return (_base == NarrowKlass) ? (TypeNarrowKlass*)this : nullptr; } inline const TypeMetadataPtr *Type::is_metadataptr() const { @@ -2121,11 +2121,11 @@ inline const TypeMetadataPtr *Type::is_metadataptr() const { } inline const TypeMetadataPtr *Type::isa_metadataptr() const { - return (_base == MetadataPtr) ? (TypeMetadataPtr*)this : NULL; + return (_base == MetadataPtr) ? (TypeMetadataPtr*)this : nullptr; } inline const TypeKlassPtr *Type::isa_klassptr() const { - return (_base >= KlassPtr && _base <= AryKlassPtr ) ? (TypeKlassPtr*)this : NULL; + return (_base >= KlassPtr && _base <= AryKlassPtr ) ? (TypeKlassPtr*)this : nullptr; } inline const TypeKlassPtr *Type::is_klassptr() const { @@ -2134,7 +2134,7 @@ inline const TypeKlassPtr *Type::is_klassptr() const { } inline const TypeInstKlassPtr *Type::isa_instklassptr() const { - return (_base == InstKlassPtr) ? (TypeInstKlassPtr*)this : NULL; + return (_base == InstKlassPtr) ? (TypeInstKlassPtr*)this : nullptr; } inline const TypeInstKlassPtr *Type::is_instklassptr() const { @@ -2143,7 +2143,7 @@ inline const TypeInstKlassPtr *Type::is_instklassptr() const { } inline const TypeAryKlassPtr *Type::isa_aryklassptr() const { - return (_base == AryKlassPtr) ? (TypeAryKlassPtr*)this : NULL; + return (_base == AryKlassPtr) ? (TypeAryKlassPtr*)this : nullptr; } inline const TypeAryKlassPtr *Type::is_aryklassptr() const { @@ -2163,12 +2163,12 @@ inline const TypeOopPtr* Type::make_oopptr() const { inline const TypeNarrowOop* Type::make_narrowoop() const { return (_base == NarrowOop) ? is_narrowoop() : - (isa_ptr() ? TypeNarrowOop::make(is_ptr()) : NULL); + (isa_ptr() ? TypeNarrowOop::make(is_ptr()) : nullptr); } inline const TypeNarrowKlass* Type::make_narrowklass() const { return (_base == NarrowKlass) ? is_narrowklass() : - (isa_ptr() ? TypeNarrowKlass::make(is_ptr()) : NULL); + (isa_ptr() ? TypeNarrowKlass::make(is_ptr()) : nullptr); } inline bool Type::is_floatingpoint() const { diff --git a/src/hotspot/share/opto/vector.cpp b/src/hotspot/share/opto/vector.cpp index 12b5060b60c..55fce80af07 100644 --- a/src/hotspot/share/opto/vector.cpp +++ b/src/hotspot/share/opto/vector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -212,7 +212,7 @@ void PhaseVector::scalarize_vbox_node(VectorBoxNode* vec_box) { } jvms = kit.sync_jvms(); - Node* new_vbox = NULL; + Node* new_vbox = nullptr; { Node* vect = vec_box->in(VectorBoxNode::Value); const TypeInstPtr* vbox_type = vec_box->box_type(); @@ -294,7 +294,7 @@ void PhaseVector::scalarize_vbox_node(VectorBoxNode* vec_box) { // to the allocated object with vector value. for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { Node* debug = sfpt->in(i); - if (debug != NULL && debug->uncast(/*keep_deps*/false) == vec_box) { + if (debug != nullptr && debug->uncast(/*keep_deps*/false) == vec_box) { sfpt->set_req(i, sobj); } } @@ -404,7 +404,7 @@ Node* PhaseVector::expand_vbox_alloc_node(VectorBoxAllocateNode* vbox_alloc, ciField* field = ciEnv::current()->vector_VectorPayload_klass()->get_field_by_name(ciSymbols::payload_name(), ciSymbols::object_signature(), false); - assert(field != NULL, ""); + assert(field != nullptr, ""); Node* vec_field = kit.basic_plus_adr(vec_obj, field->offset_in_bytes()); const TypePtr* vec_adr_type = vec_field->bottom_type()->is_ptr(); @@ -445,7 +445,7 @@ void PhaseVector::expand_vunbox_node(VectorUnboxNode* vec_unbox) { ciField* field = ciEnv::current()->vector_VectorPayload_klass()->get_field_by_name(ciSymbols::payload_name(), ciSymbols::object_signature(), false); - assert(field != NULL, ""); + assert(field != nullptr, ""); int offset = field->offset_in_bytes(); Node* vec_adr = kit.basic_plus_adr(obj, offset); diff --git a/src/hotspot/share/opto/vectorIntrinsics.cpp b/src/hotspot/share/opto/vectorIntrinsics.cpp index 717b4264020..ae0315c61c7 100644 --- a/src/hotspot/share/opto/vectorIntrinsics.cpp +++ b/src/hotspot/share/opto/vectorIntrinsics.cpp @@ -43,14 +43,14 @@ static bool check_vbox(const TypeInstPtr* vbox_type) { assert(is_vector(ik), "not a vector"); ciField* fd1 = ik->get_field_by_name(ciSymbols::ETYPE_name(), ciSymbols::class_signature(), /* is_static */ true); - assert(fd1 != NULL, "element type info is missing"); + assert(fd1 != nullptr, "element type info is missing"); ciConstant val1 = fd1->constant_value(); BasicType elem_bt = val1.as_object()->as_instance()->java_mirror_type()->basic_type(); assert(is_java_primitive(elem_bt), "element type info is missing"); ciField* fd2 = ik->get_field_by_name(ciSymbols::VLENGTH_name(), ciSymbols::int_signature(), /* is_static */ true); - assert(fd2 != NULL, "vector length info is missing"); + assert(fd2 != nullptr, "vector length info is missing"); ciConstant val2 = fd2->constant_value(); assert(val2.as_int() > 0, "vector length info is missing"); @@ -164,10 +164,10 @@ Node* GraphKit::unbox_vector(Node* v, const TypeInstPtr* vbox_type, BasicType el assert(EnableVectorSupport, ""); const TypeInstPtr* vbox_type_v = gvn().type(v)->is_instptr(); if (vbox_type->instance_klass() != vbox_type_v->instance_klass()) { - return NULL; // arguments don't agree on vector shapes + return nullptr; // arguments don't agree on vector shapes } if (vbox_type_v->maybe_null()) { - return NULL; // no nulls are allowed + return nullptr; // no nulls are allowed } assert(check_vbox(vbox_type), ""); const TypeVect* vt = TypeVect::make(elem_bt, num_elem, is_vector_mask(vbox_type->instance_klass())); @@ -319,10 +319,10 @@ bool LibraryCallKit::arch_supports_vector(int sopc, int num_elem, BasicType type } static bool is_klass_initialized(const TypeInstPtr* vec_klass) { - if (vec_klass->const_oop() == NULL) { + if (vec_klass->const_oop() == nullptr) { return false; // uninitialized or some kind of unsafe access } - assert(vec_klass->const_oop()->as_instance()->java_lang_Class_klass() != NULL, "klass instance expected"); + assert(vec_klass->const_oop()->as_instance()->java_lang_Class_klass() != nullptr, "klass instance expected"); ciInstanceKlass* klass = vec_klass->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); return klass->is_initialized(); } @@ -358,8 +358,8 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (opr == NULL || vector_klass == NULL || elem_klass == NULL || vlen == NULL || - !opr->is_con() || vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + !opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -389,7 +389,7 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { const Type* vmask_type = gvn().type(argument(n + 5)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { - if (mask_klass == NULL || mask_klass->const_oop() == NULL) { + if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); } @@ -481,11 +481,11 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { return false; } - Node* opd1 = NULL; Node* opd2 = NULL; Node* opd3 = NULL; + Node* opd1 = nullptr; Node* opd2 = nullptr; Node* opd3 = nullptr; switch (n) { case 3: { opd3 = unbox_vector(argument(7), vbox_type, elem_bt, num_elem); - if (opd3 == NULL) { + if (opd3 == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed v3=%s", NodeClassNames[argument(7)->Opcode()]); @@ -496,7 +496,7 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { } case 2: { opd2 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem); - if (opd2 == NULL) { + if (opd2 == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed v2=%s", NodeClassNames[argument(6)->Opcode()]); @@ -507,7 +507,7 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { } case 1: { opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); - if (opd1 == NULL) { + if (opd1 == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed v1=%s", NodeClassNames[argument(5)->Opcode()]); @@ -519,13 +519,13 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { default: fatal("unsupported arity: %d", n); } - Node* mask = NULL; + Node* mask = nullptr; if (is_masked_op) { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); assert(is_vector_mask(mbox_klass), "argument(2) should be a mask class"); const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(n + 5), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(n + 5)->Opcode()]); @@ -534,11 +534,11 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { } } - Node* operation = NULL; + Node* operation = nullptr; if (opc == Op_CallLeafVector) { assert(UseVectorStubs, "sanity"); operation = gen_call_to_svml(opr->get_con(), elem_bt, num_elem, opd1, opd2); - if (operation == NULL) { + if (operation == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** svml call failed for %s_%s_%d", (elem_bt == T_FLOAT)?"float":"double", @@ -563,7 +563,7 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) { } } - if (is_masked_op && mask != NULL) { + if (is_masked_op && mask != nullptr) { if (use_predicate) { operation->add_req(mask); operation->add_flag(Node::Flag_is_predicated_vector); @@ -595,11 +595,11 @@ bool LibraryCallKit::inline_vector_shuffle_iota() { Node* start = argument(4); Node* step = argument(5); - if (shuffle_klass == NULL || vlen == NULL || start_val == NULL || step_val == NULL || wrap == NULL) { + if (shuffle_klass == nullptr || vlen == nullptr || start_val == nullptr || step_val == nullptr || wrap == nullptr) { return false; // dead code } if (!vlen->is_con() || !is_power_of_2(vlen->get_con()) || - shuffle_klass->const_oop() == NULL || !wrap->is_con()) { + shuffle_klass->const_oop() == nullptr || !wrap->is_con()) { return false; // not enough info for intrinsification } if (!is_klass_initialized(shuffle_klass)) { @@ -689,7 +689,7 @@ bool LibraryCallKit::inline_vector_mask_operation() { const TypeInt* vlen = gvn().type(argument(3))->isa_int(); Node* mask = argument(4); - if (mask_klass == NULL || elem_klass == NULL || mask->is_top() || vlen == NULL) { + if (mask_klass == nullptr || elem_klass == nullptr || mask->is_top() || vlen == nullptr) { return false; // dead code } @@ -717,7 +717,7 @@ bool LibraryCallKit::inline_vector_mask_operation() { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* mask_box_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); Node* mask_vec = unbox_vector(mask, mask_box_type, elem_bt, num_elem, true); - if (mask_vec == NULL) { + if (mask_vec == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(4)->Opcode()]); @@ -725,7 +725,7 @@ bool LibraryCallKit::inline_vector_mask_operation() { return false; } - if (mask_vec->bottom_type()->isa_vectmask() == NULL) { + if (mask_vec->bottom_type()->isa_vectmask() == nullptr) { mask_vec = gvn().transform(VectorStoreMaskNode::make(gvn(), mask_vec, elem_bt, num_elem)); } const Type* maskoper_ty = mopc == Op_VectorMaskToLong ? (const Type*)TypeLong::LONG : (const Type*)TypeInt::INT; @@ -753,10 +753,10 @@ bool LibraryCallKit::inline_vector_shuffle_to_vector() { Node* shuffle = argument(3); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || shuffle_klass == NULL || shuffle->is_top() || vlen == NULL) { + if (vector_klass == nullptr || elem_klass == nullptr || shuffle_klass == nullptr || shuffle->is_top() || vlen == nullptr) { return false; // dead code } - if (!vlen->is_con() || vector_klass->const_oop() == NULL || shuffle_klass->const_oop() == NULL) { + if (!vlen->is_con() || vector_klass->const_oop() == nullptr || shuffle_klass->const_oop() == nullptr) { return false; // not enough info for intrinsification } if (!is_klass_initialized(shuffle_klass) || !is_klass_initialized(vector_klass) ) { @@ -821,8 +821,8 @@ bool LibraryCallKit::inline_vector_frombits_coerced() { // MODE_BITS_COERCED_LONG_TO_MASK for VectorMask.fromLong operation. const TypeInt* mode = gvn().type(argument(5))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || vlen == NULL || mode == NULL || - bits_type == NULL || vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || + if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || mode == nullptr || + bits_type == nullptr || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !mode->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s bitwise=%s", @@ -867,7 +867,7 @@ bool LibraryCallKit::inline_vector_frombits_coerced() { return false; // not supported } - Node* broadcast = NULL; + Node* broadcast = nullptr; Node* bits = argument(3); Node* elem = bits; @@ -916,7 +916,7 @@ bool LibraryCallKit::inline_vector_frombits_coerced() { } static bool elem_consistent_with_arr(BasicType elem_bt, const TypeAryPtr* arr_type) { - assert(arr_type != NULL, "unexpected"); + assert(arr_type != nullptr, "unexpected"); BasicType arr_elem_bt = arr_type->elem()->array_element_basic_type(); if (elem_bt == arr_elem_bt) { return true; @@ -955,8 +955,8 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(2))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || vlen == NULL || - vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1016,15 +1016,15 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { const bool is_mixed_access = !in_heap && !in_native; - const bool is_mismatched_access = in_heap && (addr_type->isa_aryptr() == NULL); + const bool is_mismatched_access = in_heap && (addr_type->isa_aryptr() == nullptr); const bool needs_cpu_membar = is_mixed_access || is_mismatched_access; // Now handle special case where load/store happens from/to byte array but element type is not byte. - bool using_byte_array = arr_type != NULL && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; + bool using_byte_array = arr_type != nullptr && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; // Handle loading masks. // If there is no consistency between array and vector element types, it must be special byte array case or loading masks - if (arr_type != NULL && !using_byte_array && !is_mask && !elem_consistent_with_arr(elem_bt, arr_type)) { + if (arr_type != nullptr && !using_byte_array && !is_mask && !elem_consistent_with_arr(elem_bt, arr_type)) { if (C->print_intrinsics()) { tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", is_store, is_store ? "store" : "load", @@ -1073,7 +1073,7 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { if (is_store) { Node* val = unbox_vector(argument(6), vbox_type, elem_bt, num_elem); - if (val == NULL) { + if (val == nullptr) { set_map(old_map); set_sp(old_sp); return false; // operand unboxing failed @@ -1094,7 +1094,7 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) { set_memory(vstore, addr_type); } else { // When using byte array, we need to load as byte then reinterpret the value. Otherwise, do a simple vector load. - Node* vload = NULL; + Node* vload = nullptr; if (using_byte_array) { int load_num_elem = num_elem * type2aelembytes(elem_bt); vload = gvn().transform(LoadVectorNode::make(0, control(), memory(addr), addr, addr_type, load_num_elem, T_BYTE)); @@ -1151,9 +1151,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(3))->isa_int(); - if (vector_klass == NULL || mask_klass == NULL || elem_klass == NULL || vlen == NULL || - vector_klass->const_oop() == NULL || mask_klass->const_oop() == NULL || - elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (vector_klass == nullptr || mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || + elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1200,9 +1200,9 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { const TypeAryPtr* arr_type = addr_type->isa_aryptr(); // Now handle special case where load/store happens from/to byte array but element type is not byte. - bool using_byte_array = arr_type != NULL && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; + bool using_byte_array = arr_type != nullptr && arr_type->elem()->array_element_basic_type() == T_BYTE && elem_bt != T_BYTE; // If there is no consistency between array and vector element types, it must be special byte array case - if (arr_type != NULL && !using_byte_array && !elem_consistent_with_arr(elem_bt, arr_type)) { + if (arr_type != nullptr && !using_byte_array && !elem_consistent_with_arr(elem_bt, arr_type)) { if (C->print_intrinsics()) { tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s", is_store, is_store ? "storeMasked" : "loadMasked", @@ -1293,7 +1293,7 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { return false; } - // Can base be NULL? Otherwise, always on-heap access. + // Can base be null? Otherwise, always on-heap access. bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(gvn().type(base)); if (can_access_non_heap) { insert_mem_bar(Op_MemBarCPUOrder); @@ -1306,7 +1306,7 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); Node* mask = unbox_vector(is_store ? argument(8) : argument(7), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", is_store ? NodeClassNames[argument(8)->Opcode()] @@ -1319,7 +1319,7 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { if (is_store) { Node* val = unbox_vector(argument(7), vbox_type, elem_bt, num_elem); - if (val == NULL) { + if (val == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed vector=%s", NodeClassNames[argument(7)->Opcode()]); @@ -1342,7 +1342,7 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) { Node* vstore = gvn().transform(new StoreVectorMaskedNode(control(), memory(addr), addr, val, addr_type, mask)); set_memory(vstore, addr_type); } else { - Node* vload = NULL; + Node* vload = nullptr; if (using_byte_array) { // Reinterpret the vector mask to byte type. @@ -1413,8 +1413,8 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const TypeInt* vlen = gvn().type(argument(3))->isa_int(); const TypeInstPtr* vector_idx_klass = gvn().type(argument(4))->isa_instptr(); - if (vector_klass == NULL || elem_klass == NULL || vector_idx_klass == NULL || vlen == NULL || - vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || vector_idx_klass->const_oop() == NULL || !vlen->is_con()) { + if (vector_klass == nullptr || elem_klass == nullptr || vector_idx_klass == nullptr || vlen == nullptr || + vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || vector_idx_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s viclass=%s", NodeClassNames[argument(0)->Opcode()], @@ -1446,7 +1446,7 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const Type* vmask_type = gvn().type(is_scatter ? argument(10) : argument(9)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { - if (mask_klass == NULL || mask_klass->const_oop() == NULL) { + if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(1)->Opcode()]); } @@ -1512,7 +1512,7 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const TypeAryPtr* arr_type = addr_type->isa_aryptr(); // The array must be consistent with vector type - if (arr_type == NULL || (arr_type != NULL && !elem_consistent_with_arr(elem_bt, arr_type))) { + if (arr_type == nullptr || (arr_type != nullptr && !elem_consistent_with_arr(elem_bt, arr_type))) { if (C->print_intrinsics()) { tty->print_cr(" ** not supported: arity=%d op=%s vlen=%d etype=%s atype=%s ismask=no", is_scatter, is_scatter ? "scatter" : "gather", @@ -1526,7 +1526,7 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); ciKlass* vbox_idx_klass = vector_idx_klass->const_oop()->as_instance()->java_lang_Class_klass(); - if (vbox_idx_klass == NULL) { + if (vbox_idx_klass == nullptr) { set_map(old_map); set_sp(old_sp); return false; @@ -1534,18 +1534,18 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const TypeInstPtr* vbox_idx_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_idx_klass); Node* index_vect = unbox_vector(argument(8), vbox_idx_type, T_INT, num_elem); - if (index_vect == NULL) { + if (index_vect == nullptr) { set_map(old_map); set_sp(old_sp); return false; } - Node* mask = NULL; + Node* mask = nullptr; if (is_masked_op) { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(is_scatter ? argument(10) : argument(9), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", is_scatter ? NodeClassNames[argument(10)->Opcode()] @@ -1560,23 +1560,23 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) { const TypeVect* vector_type = TypeVect::make(elem_bt, num_elem); if (is_scatter) { Node* val = unbox_vector(argument(9), vbox_type, elem_bt, num_elem); - if (val == NULL) { + if (val == nullptr) { set_map(old_map); set_sp(old_sp); return false; // operand unboxing failed } set_all_memory(reset_memory()); - Node* vstore = NULL; - if (mask != NULL) { + Node* vstore = nullptr; + if (mask != nullptr) { vstore = gvn().transform(new StoreVectorScatterMaskedNode(control(), memory(addr), addr, addr_type, val, index_vect, mask)); } else { vstore = gvn().transform(new StoreVectorScatterNode(control(), memory(addr), addr, addr_type, val, index_vect)); } set_memory(vstore, addr_type); } else { - Node* vload = NULL; - if (mask != NULL) { + Node* vload = nullptr; + if (mask != nullptr) { vload = gvn().transform(new LoadVectorGatherMaskedNode(control(), memory(addr), addr, addr_type, vector_type, index_vect, mask)); } else { vload = gvn().transform(new LoadVectorGatherNode(control(), memory(addr), addr, addr_type, vector_type, index_vect)); @@ -1605,8 +1605,8 @@ bool LibraryCallKit::inline_vector_reduction() { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (opr == NULL || vector_klass == NULL || elem_klass == NULL || vlen == NULL || - !opr->is_con() || vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + !opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1633,7 +1633,7 @@ bool LibraryCallKit::inline_vector_reduction() { const Type* vmask_type = gvn().type(argument(6)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { - if (mask_klass == NULL || mask_klass->const_oop() == NULL) { + if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); } @@ -1683,17 +1683,17 @@ bool LibraryCallKit::inline_vector_reduction() { const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); Node* opd = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); - if (opd == NULL) { + if (opd == nullptr) { return false; // operand unboxing failed } - Node* mask = NULL; + Node* mask = nullptr; if (is_masked_op) { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); assert(is_vector_mask(mbox_klass), "argument(2) should be a mask class"); const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(6)->Opcode()]); @@ -1703,24 +1703,24 @@ bool LibraryCallKit::inline_vector_reduction() { } Node* init = ReductionNode::make_reduction_input(gvn(), opc, elem_bt); - Node* value = NULL; - if (mask == NULL) { + Node* value = nullptr; + if (mask == nullptr) { assert(!is_masked_op, "Masked op needs the mask value never null"); - value = ReductionNode::make(opc, NULL, init, opd, elem_bt); + value = ReductionNode::make(opc, nullptr, init, opd, elem_bt); } else { if (use_predicate) { - value = ReductionNode::make(opc, NULL, init, opd, elem_bt); + value = ReductionNode::make(opc, nullptr, init, opd, elem_bt); value->add_req(mask); value->add_flag(Node::Flag_is_predicated_vector); } else { Node* reduce_identity = gvn().transform(VectorNode::scalar2vector(init, num_elem, Type::get_const_basic_type(elem_bt))); value = gvn().transform(new VectorBlendNode(reduce_identity, opd, mask)); - value = ReductionNode::make(opc, NULL, init, value, elem_bt); + value = ReductionNode::make(opc, nullptr, init, value, elem_bt); } } value = gvn().transform(value); - Node* bits = NULL; + Node* bits = nullptr; switch (elem_bt) { case T_BYTE: case T_SHORT: @@ -1758,8 +1758,8 @@ bool LibraryCallKit::inline_vector_test() { const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(3))->isa_int(); - if (cond == NULL || vector_klass == NULL || elem_klass == NULL || vlen == NULL || - !cond->is_con() || vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (cond == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + !cond->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: cond=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1805,7 +1805,7 @@ bool LibraryCallKit::inline_vector_test() { } else { opd2 = opd1; } - if (opd1 == NULL || opd2 == NULL) { + if (opd1 == nullptr || opd2 == nullptr) { return false; // operand unboxing failed } @@ -1833,11 +1833,11 @@ bool LibraryCallKit::inline_vector_blend() { const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(3))->isa_int(); - if (mask_klass == NULL || vector_klass == NULL || elem_klass == NULL || vlen == NULL) { + if (mask_klass == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr) { return false; // dead code } - if (mask_klass->const_oop() == NULL || vector_klass->const_oop() == NULL || - elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (mask_klass->const_oop() == nullptr || vector_klass->const_oop() == nullptr || + elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1881,7 +1881,7 @@ bool LibraryCallKit::inline_vector_blend() { Node* v2 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); Node* mask = unbox_vector(argument(6), mbox_type, mask_bt, num_elem); - if (v1 == NULL || v2 == NULL || mask == NULL) { + if (v1 == nullptr || v2 == nullptr || mask == nullptr) { return false; // operand unboxing failed } @@ -1907,11 +1907,11 @@ bool LibraryCallKit::inline_vector_compare() { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (cond == NULL || vector_klass == NULL || mask_klass == NULL || elem_klass == NULL || vlen == NULL) { + if (cond == nullptr || vector_klass == nullptr || mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr) { return false; // dead code } - if (!cond->is_con() || vector_klass->const_oop() == NULL || mask_klass->const_oop() == NULL || - elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (!cond->is_con() || vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || + elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: cond=%s vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -1968,8 +1968,8 @@ bool LibraryCallKit::inline_vector_compare() { Node* v2 = unbox_vector(argument(6), vbox_type, elem_bt, num_elem); bool is_masked_op = argument(7)->bottom_type() != TypePtr::NULL_PTR; - Node* mask = is_masked_op ? unbox_vector(argument(7), mbox_type, elem_bt, num_elem) : NULL; - if (is_masked_op && mask == NULL) { + Node* mask = is_masked_op ? unbox_vector(argument(7), mbox_type, elem_bt, num_elem) : nullptr; + if (is_masked_op && mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** not supported: mask = null arity=2 op=comp/%d vlen=%d etype=%s ismask=usestore is_masked_op=1", cond->get_con(), num_elem, type2name(elem_bt)); @@ -1986,7 +1986,7 @@ bool LibraryCallKit::inline_vector_compare() { return false; } - if (v1 == NULL || v2 == NULL) { + if (v1 == nullptr || v2 == nullptr) { return false; // operand unboxing failed } BoolTest::mask pred = (BoolTest::mask)cond->get_con(); @@ -2028,12 +2028,12 @@ bool LibraryCallKit::inline_vector_rearrange() { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (vector_klass == NULL || shuffle_klass == NULL || elem_klass == NULL || vlen == NULL) { + if (vector_klass == nullptr || shuffle_klass == nullptr || elem_klass == nullptr || vlen == nullptr) { return false; // dead code } - if (shuffle_klass->const_oop() == NULL || - vector_klass->const_oop() == NULL || - elem_klass->const_oop() == NULL || + if (shuffle_klass->const_oop() == nullptr || + vector_klass->const_oop() == nullptr || + elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s sclass=%s etype=%s vlen=%s", @@ -2073,8 +2073,8 @@ bool LibraryCallKit::inline_vector_rearrange() { bool is_masked_op = argument(7)->bottom_type() != TypePtr::NULL_PTR; bool use_predicate = is_masked_op; if (is_masked_op && - (mask_klass == NULL || - mask_klass->const_oop() == NULL || + (mask_klass == nullptr || + mask_klass->const_oop() == nullptr || !is_klass_initialized(mask_klass))) { if (C->print_intrinsics()) { tty->print_cr(" ** mask_klass argument not initialized"); @@ -2103,16 +2103,16 @@ bool LibraryCallKit::inline_vector_rearrange() { Node* v1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); Node* shuffle = unbox_vector(argument(6), shbox_type, shuffle_bt, num_elem); - if (v1 == NULL || shuffle == NULL) { + if (v1 == nullptr || shuffle == nullptr) { return false; // operand unboxing failed } - Node* mask = NULL; + Node* mask = nullptr; if (is_masked_op) { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** not supported: arity=3 op=shuffle/rearrange vlen=%d etype=%s ismask=useload is_masked_op=1", num_elem, type2name(elem_bt)); @@ -2143,9 +2143,9 @@ bool LibraryCallKit::inline_vector_rearrange() { } static address get_svml_address(int vop, int bits, BasicType bt, char* name_ptr, int name_len) { - address addr = NULL; + address addr = nullptr; assert(UseVectorStubs, "sanity"); - assert(name_ptr != NULL, "unexpected"); + assert(name_ptr != nullptr, "unexpected"); assert((vop >= VectorSupport::VECTOR_OP_SVML_START) && (vop <= VectorSupport::VECTOR_OP_SVML_END), "unexpected"); int op = vop - VectorSupport::VECTOR_OP_SVML_START; @@ -2165,7 +2165,7 @@ static address get_svml_address(int vop, int bits, BasicType bt, char* name_ptr, break; default: snprintf(name_ptr, name_len, "invalid"); - addr = NULL; + addr = nullptr; Unimplemented(); break; } @@ -2176,19 +2176,19 @@ static address get_svml_address(int vop, int bits, BasicType bt, char* name_ptr, Node* LibraryCallKit::gen_call_to_svml(int vector_api_op_id, BasicType bt, int num_elem, Node* opd1, Node* opd2) { assert(UseVectorStubs, "sanity"); assert(vector_api_op_id >= VectorSupport::VECTOR_OP_SVML_START && vector_api_op_id <= VectorSupport::VECTOR_OP_SVML_END, "need valid op id"); - assert(opd1 != NULL, "must not be null"); + assert(opd1 != nullptr, "must not be null"); const TypeVect* vt = TypeVect::make(bt, num_elem); - const TypeFunc* call_type = OptoRuntime::Math_Vector_Vector_Type(opd2 != NULL ? 2 : 1, vt, vt); + const TypeFunc* call_type = OptoRuntime::Math_Vector_Vector_Type(opd2 != nullptr ? 2 : 1, vt, vt); char name[100] = ""; // Get address for svml method. address addr = get_svml_address(vector_api_op_id, vt->length_in_bytes() * BitsPerByte, bt, name, 100); - if (addr == NULL) { - return NULL; + if (addr == nullptr) { + return nullptr; } - assert(name != NULL, "name must not be null"); + assert(name[0] != '\0', "name must not be null"); Node* operation = make_runtime_call(RC_VECTOR, call_type, addr, @@ -2214,10 +2214,10 @@ bool LibraryCallKit::inline_vector_broadcast_int() { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (opr == NULL || vector_klass == NULL || elem_klass == NULL || vlen == NULL) { + if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr) { return false; // dead code } - if (!opr->is_con() || vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (!opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -2237,7 +2237,7 @@ bool LibraryCallKit::inline_vector_broadcast_int() { const Type* vmask_type = gvn().type(argument(7)); bool is_masked_op = vmask_type != TypePtr::NULL_PTR; if (is_masked_op) { - if (mask_klass == NULL || mask_klass->const_oop() == NULL) { + if (mask_klass == nullptr || mask_klass->const_oop() == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: maskclass=%s", NodeClassNames[argument(2)->Opcode()]); } @@ -2317,7 +2317,7 @@ bool LibraryCallKit::inline_vector_broadcast_int() { } Node* opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); - Node* opd2 = NULL; + Node* opd2 = nullptr; if (is_shift) { opd2 = vector_shift_count(cnt, opc, elem_bt, num_elem); } else { @@ -2332,16 +2332,16 @@ bool LibraryCallKit::inline_vector_broadcast_int() { } } - if (opd1 == NULL || opd2 == NULL) { + if (opd1 == nullptr || opd2 == nullptr) { return false; } - Node* mask = NULL; + Node* mask = nullptr; if (is_masked_op) { ciKlass* mbox_klass = mask_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); mask = unbox_vector(argument(7), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(7)->Opcode()]); } @@ -2350,7 +2350,7 @@ bool LibraryCallKit::inline_vector_broadcast_int() { } Node* operation = VectorNode::make(opc, opd1, opd2, num_elem, elem_bt); - if (is_masked_op && mask != NULL) { + if (is_masked_op && mask != nullptr) { if (use_predicate) { operation->add_req(mask); operation->add_flag(Node::Flag_is_predicated_vector); @@ -2386,14 +2386,14 @@ bool LibraryCallKit::inline_vector_convert() { const TypeInstPtr* elem_klass_to = gvn().type(argument(5))->isa_instptr(); const TypeInt* vlen_to = gvn().type(argument(6))->isa_int(); - if (opr == NULL || - vector_klass_from == NULL || elem_klass_from == NULL || vlen_from == NULL || - vector_klass_to == NULL || elem_klass_to == NULL || vlen_to == NULL) { + if (opr == nullptr || + vector_klass_from == nullptr || elem_klass_from == nullptr || vlen_from == nullptr || + vector_klass_to == nullptr || elem_klass_to == nullptr || vlen_to == nullptr) { return false; // dead code } if (!opr->is_con() || - vector_klass_from->const_oop() == NULL || elem_klass_from->const_oop() == NULL || !vlen_from->is_con() || - vector_klass_to->const_oop() == NULL || elem_klass_to->const_oop() == NULL || !vlen_to->is_con()) { + vector_klass_from->const_oop() == nullptr || elem_klass_from->const_oop() == nullptr || !vlen_from->is_con() || + vector_klass_to->const_oop() == nullptr || elem_klass_to->const_oop() == nullptr || !vlen_to->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: opr=%s vclass_from=%s etype_from=%s vlen_from=%s vclass_to=%s etype_to=%s vlen_to=%s", NodeClassNames[argument(0)->Opcode()], @@ -2477,7 +2477,7 @@ bool LibraryCallKit::inline_vector_convert() { const TypeInstPtr* vbox_type_from = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass_from); Node* opd1 = unbox_vector(argument(7), vbox_type_from, elem_bt_from, num_elem_from); - if (opd1 == NULL) { + if (opd1 == nullptr) { return false; } @@ -2490,8 +2490,8 @@ bool LibraryCallKit::inline_vector_convert() { // where certain masks (depending on the species) are either propagated // through a vector or predicate register. if (is_mask && - ((src_type->isa_vectmask() == NULL && dst_type->isa_vectmask()) || - (dst_type->isa_vectmask() == NULL && src_type->isa_vectmask()))) { + ((src_type->isa_vectmask() == nullptr && dst_type->isa_vectmask()) || + (dst_type->isa_vectmask() == nullptr && src_type->isa_vectmask()))) { return false; } @@ -2591,10 +2591,10 @@ bool LibraryCallKit::inline_vector_insert() { const TypeInt* vlen = gvn().type(argument(2))->isa_int(); const TypeInt* idx = gvn().type(argument(4))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || vlen == NULL || idx == NULL) { + if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || idx == nullptr) { return false; // dead code } - if (vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con() || !idx->is_con()) { + if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !idx->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s idx=%s", NodeClassNames[argument(0)->Opcode()], @@ -2631,12 +2631,12 @@ bool LibraryCallKit::inline_vector_insert() { const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem); - if (opd == NULL) { + if (opd == nullptr) { return false; } Node* insert_val = argument(5); - assert(gvn().type(insert_val)->isa_long() != NULL, "expected to be long"); + assert(gvn().type(insert_val)->isa_long() != nullptr, "expected to be long"); // Convert insert value back to its appropriate type. switch (elem_bt) { @@ -2684,10 +2684,10 @@ bool LibraryCallKit::inline_vector_extract() { const TypeInt* vlen = gvn().type(argument(2))->isa_int(); const TypeInt* idx = gvn().type(argument(4))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || vlen == NULL || idx == NULL) { + if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || idx == nullptr) { return false; // dead code } - if (vector_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con() || !idx->is_con()) { + if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con() || !idx->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s idx=%s", NodeClassNames[argument(0)->Opcode()], @@ -2725,14 +2725,14 @@ bool LibraryCallKit::inline_vector_extract() { const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem); - if (opd == NULL) { + if (opd == nullptr) { return false; } ConINode* idx_con = gvn().intcon(idx->get_con())->as_ConI(); Node* operation = gvn().transform(ExtractNode::make(opd, idx_con, elem_bt)); - Node* bits = NULL; + Node* bits = nullptr; switch (elem_bt) { case T_BYTE: case T_SHORT: @@ -2775,9 +2775,9 @@ bool LibraryCallKit::inline_vector_compress_expand() { const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(4))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || mask_klass == NULL || vlen == NULL || - vector_klass->const_oop() == NULL || mask_klass->const_oop() == NULL || - elem_klass->const_oop() == NULL || !vlen->is_con() || !opr->is_con()) { + if (vector_klass == nullptr || elem_klass == nullptr || mask_klass == nullptr || vlen == nullptr || + vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || + elem_klass->const_oop() == nullptr || !vlen->is_con() || !opr->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: opr=%s vclass=%s mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -2816,13 +2816,13 @@ bool LibraryCallKit::inline_vector_compress_expand() { return false; // not supported } - Node* opd1 = NULL; - const TypeInstPtr* vbox_type = NULL; + Node* opd1 = nullptr; + const TypeInstPtr* vbox_type = nullptr; if (opc != Op_CompressM) { ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); opd1 = unbox_vector(argument(5), vbox_type, elem_bt, num_elem); - if (opd1 == NULL) { + if (opd1 == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed vector=%s", NodeClassNames[argument(5)->Opcode()]); @@ -2836,7 +2836,7 @@ bool LibraryCallKit::inline_vector_compress_expand() { const TypeInstPtr* mbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, mbox_klass); Node* mask = unbox_vector(argument(6), mbox_type, elem_bt, num_elem); - if (mask == NULL) { + if (mask == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed mask=%s", NodeClassNames[argument(6)->Opcode()]); @@ -2868,9 +2868,9 @@ bool LibraryCallKit::inline_index_vector() { const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(2))->isa_int(); - if (vector_klass == NULL || elem_klass == NULL || vlen == NULL || - vector_klass->const_oop() == NULL || !vlen->is_con() || - elem_klass->const_oop() == NULL) { + if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + vector_klass->const_oop() == nullptr || !vlen->is_con() || + elem_klass->const_oop() == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: vclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -2940,7 +2940,7 @@ bool LibraryCallKit::inline_index_vector() { ciKlass* vbox_klass = vector_klass->const_oop()->as_instance()->java_lang_Class_klass(); const TypeInstPtr* vbox_type = TypeInstPtr::make_exact(TypePtr::NotNull, vbox_klass); Node* opd = unbox_vector(argument(3), vbox_type, elem_bt, num_elem); - if (opd == NULL) { + if (opd == nullptr) { if (C->print_intrinsics()) { tty->print_cr(" ** unbox failed vector=%s", NodeClassNames[argument(3)->Opcode()]); @@ -3018,8 +3018,8 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr(); const TypeInt* vlen = gvn().type(argument(2))->isa_int(); - if (mask_klass == NULL || elem_klass == NULL || vlen == NULL || - mask_klass->const_oop() == NULL || elem_klass->const_oop() == NULL || !vlen->is_con()) { + if (mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr || + mask_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) { if (C->print_intrinsics()) { tty->print_cr(" ** missing constant: mclass=%s etype=%s vlen=%s", NodeClassNames[argument(0)->Opcode()], @@ -3075,9 +3075,9 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { Node* offset = argument(3); Node* limit = argument(5); - if (offset == NULL || limit == NULL) { + if (offset == nullptr || limit == nullptr) { if (C->print_intrinsics()) { - tty->print_cr(" ** offset or limit argument is NULL"); + tty->print_cr(" ** offset or limit argument is null"); } return false; // not supported } @@ -3089,7 +3089,7 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() { // We assume "offset > 0 && limit >= offset && limit - offset < num_elem". // So directly get indexLimit with "indexLimit = limit - offset". Node* indexLimit = gvn().transform(new SubLNode(limit, offset)); - Node* mask = NULL; + Node* mask = nullptr; if (supports_mask_gen) { mask = gvn().transform(VectorMaskGenNode::make(indexLimit, elem_bt, num_elem)); } else { diff --git a/src/hotspot/share/opto/vectornode.cpp b/src/hotspot/share/opto/vectornode.cpp index 81b20c63ab6..49a5bee3207 100644 --- a/src/hotspot/share/opto/vectornode.cpp +++ b/src/hotspot/share/opto/vectornode.cpp @@ -609,7 +609,7 @@ VectorNode* VectorNode::make_mask_node(int vopc, Node* n1, Node* n2, uint vlen, return new XorVNode(n1, n2, vmask_type); default: fatal("Unsupported mask vector creation for '%s'", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -701,12 +701,12 @@ VectorNode* VectorNode::make(int vopc, Node* n1, Node* n2, const TypeVect* vt, b case Op_ExpandV: return new ExpandVNode(n1, n2, vt); case Op_CompressV: return new CompressVNode(n1, n2, vt); - case Op_CompressM: assert(n1 == NULL, ""); return new CompressMNode(n2, vt); + case Op_CompressM: assert(n1 == nullptr, ""); return new CompressMNode(n2, vt); case Op_CountLeadingZerosV: return new CountLeadingZerosVNode(n1, vt); case Op_CountTrailingZerosV: return new CountTrailingZerosVNode(n1, vt); default: fatal("Missed vector creation for '%s'", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -730,7 +730,7 @@ VectorNode* VectorNode::make(int vopc, Node* n1, Node* n2, Node* n3, const TypeV case Op_SignumVF: return new SignumVFNode(n1, n2, n3, vt); default: fatal("Missed vector creation for '%s'", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -770,7 +770,7 @@ VectorNode* VectorNode::scalar2vector(Node* s, uint vlen, const Type* opd_t, boo return new ReplicateDNode(s, vt); default: fatal("Type '%s' is not supported for vectors", type2name(bt)); - return NULL; + return nullptr; } } @@ -790,7 +790,7 @@ VectorNode* VectorNode::shift_count(int opc, Node* cnt, uint vlen, BasicType bt) return new RShiftCntVNode(cnt, vt); default: fatal("Missed vector creation for '%s'", NodeClassNames[opc]); - return NULL; + return nullptr; } } @@ -898,17 +898,17 @@ Node* VectorNode::try_to_gen_masked_vector(PhaseGVN* gvn, Node* node, const Type if (node->is_predicated_vector() || !Matcher::has_predicated_vectors() || !Matcher::match_rule_supported_vector_masked(vopc, vlen, bt) || !Matcher::match_rule_supported_vector(Op_VectorMaskGen, vlen, bt)) { - return NULL; + return nullptr; } - Node* mask = NULL; + Node* mask = nullptr; // Generate a vector mask for vector operation whose vector length is lower than the // hardware supported max vector length. if (vt->length_in_bytes() < (uint)MaxVectorSize) { Node* length = gvn->transform(new ConvI2LNode(gvn->makecon(TypeInt::make(vlen)))); mask = gvn->transform(VectorMaskGenNode::make(length, bt, vlen)); } else { - return NULL; + return nullptr; } // Generate the related masked op for vector load/store/load_gather/store_scatter. @@ -942,7 +942,7 @@ Node* VectorNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (Matcher::vector_needs_partial_operations(this, vect_type())) { return try_to_gen_masked_vector(phase, this, vect_type()); } - return NULL; + return nullptr; } // Return initial Pack node. Additional operands added with add_opd() calls. @@ -965,7 +965,7 @@ PackNode* PackNode::make(Node* s, uint vlen, BasicType bt) { return new PackDNode(s, vt); default: fatal("Type '%s' is not supported for vectors", type2name(bt)); - return NULL; + return nullptr; } } @@ -1001,7 +1001,7 @@ PackNode* PackNode::binary_tree_pack(int lo, int hi) { return new Pack2DNode(n1, n2, TypeVect::make(T_DOUBLE, 2)); default: fatal("Type '%s' is not supported for vectors", type2name(bt)); - return NULL; + return nullptr; } } } @@ -1106,7 +1106,7 @@ Node* ExtractNode::make(Node* v, ConINode* pos, BasicType bt) { case T_DOUBLE: return new ExtractDNode(v, pos); default: assert(false, "wrong type: %s", type2name(bt)); - return NULL; + return nullptr; } } @@ -1287,7 +1287,7 @@ ReductionNode* ReductionNode::make(int opc, Node *ctrl, Node* n1, Node* n2, Basi case Op_XorReductionV: return new XorReductionVNode(ctrl, n1, n2); default: assert(false, "unknown node: %s", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -1296,7 +1296,7 @@ Node* ReductionNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (Matcher::vector_needs_partial_operations(this, vt)) { return VectorNode::try_to_gen_masked_vector(phase, this, vt); } - return NULL; + return nullptr; } Node* VectorLoadMaskNode::Identity(PhaseGVN* phase) { @@ -1341,7 +1341,7 @@ VectorCastNode* VectorCastNode::make(int vopc, Node* n1, BasicType bt, uint vlen case Op_VectorCastF2HF: return new VectorCastF2HFNode(n1, vt); default: assert(false, "unknown node: %s", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -1412,7 +1412,7 @@ Node* ReductionNode::make_reduction_input(PhaseGVN& gvn, int opc, BasicType bt) return gvn.makecon(TypeLong::MINUS_1); default: fatal("Missed vector creation for '%s' as the basic type is not correct.", NodeClassNames[vopc]); - return NULL; + return nullptr; } break; case Op_AddReductionVI: // fallthrough @@ -1444,7 +1444,7 @@ Node* ReductionNode::make_reduction_input(PhaseGVN& gvn, int opc, BasicType bt) return gvn.makecon(TypeF::POS_INF); case T_DOUBLE: return gvn.makecon(TypeD::POS_INF); - default: Unimplemented(); return NULL; + default: Unimplemented(); return nullptr; } break; case Op_MaxReductionV: @@ -1461,12 +1461,12 @@ Node* ReductionNode::make_reduction_input(PhaseGVN& gvn, int opc, BasicType bt) return gvn.makecon(TypeF::NEG_INF); case T_DOUBLE: return gvn.makecon(TypeD::NEG_INF); - default: Unimplemented(); return NULL; + default: Unimplemented(); return nullptr; } break; default: fatal("Missed vector creation for '%s'", NodeClassNames[vopc]); - return NULL; + return nullptr; } } @@ -1511,8 +1511,8 @@ Node* VectorNode::degenerate_vector_rotate(Node* src, Node* cnt, bool is_rotate_ // Compute shift values for right rotation and // later swap them in case of left rotation. - Node* shiftRCnt = NULL; - Node* shiftLCnt = NULL; + Node* shiftRCnt = nullptr; + Node* shiftLCnt = nullptr; const TypeInt* cnt_type = cnt->bottom_type()->isa_int(); bool is_binary_vector_op = false; if (cnt_type && cnt_type->is_con()) { @@ -1541,8 +1541,8 @@ Node* VectorNode::degenerate_vector_rotate(Node* src, Node* cnt, bool is_rotate_ int subVopc = 0; int addVopc = 0; - Node* shift_mask_node = NULL; - Node* const_one_node = NULL; + Node* shift_mask_node = nullptr; + Node* const_one_node = nullptr; assert(cnt->bottom_type()->isa_vect(), "Unexpected shift"); const Type* elem_ty = Type::get_const_basic_type(bt); @@ -1590,7 +1590,7 @@ Node* RotateLeftVNode::Ideal(PhaseGVN* phase, bool can_reshape) { !Matcher::match_rule_supported_vector(Op_RotateLeftV, vlen, bt)) { return VectorNode::degenerate_vector_rotate(in(1), in(2), true, vlen, bt, phase); } - return NULL; + return nullptr; } Node* RotateRightVNode::Ideal(PhaseGVN* phase, bool can_reshape) { @@ -1600,7 +1600,7 @@ Node* RotateRightVNode::Ideal(PhaseGVN* phase, bool can_reshape) { !Matcher::match_rule_supported_vector(Op_RotateRightV, vlen, bt)) { return VectorNode::degenerate_vector_rotate(in(1), in(2), false, vlen, bt, phase); } - return NULL; + return nullptr; } #ifndef PRODUCT @@ -1663,7 +1663,7 @@ Node* VectorUnboxNode::Ideal(PhaseGVN* phase, bool can_reshape) { } } } - return NULL; + return nullptr; } Node* VectorUnboxNode::Identity(PhaseGVN* phase) { @@ -1721,7 +1721,7 @@ Node* VectorMaskOpNode::make(Node* mask, const Type* ty, int mopc) { default: assert(false, "Unhandled operation"); } - return NULL; + return nullptr; } Node* VectorMaskOpNode::Ideal(PhaseGVN* phase, bool can_reshape) { @@ -1729,7 +1729,7 @@ Node* VectorMaskOpNode::Ideal(PhaseGVN* phase, bool can_reshape) { if (Matcher::vector_needs_partial_operations(this, vt)) { return VectorNode::try_to_gen_masked_vector(phase, this, vt); } - return NULL; + return nullptr; } Node* VectorMaskToLongNode::Identity(PhaseGVN* phase) { @@ -1749,20 +1749,20 @@ Node* VectorLongToMaskNode::Ideal(PhaseGVN* phase, bool can_reshape) { // Different src/dst mask length represents a re-interpretation operation, // we can however generate a mask casting operation if length matches. Node* src = in(1)->in(1)->in(1); - if (dst_type->isa_vectmask() == NULL) { + if (dst_type->isa_vectmask() == nullptr) { if (src->Opcode() != Op_VectorStoreMask) { - return NULL; + return nullptr; } src = src->in(1); } const TypeVect* src_type = src->bottom_type()->is_vect(); if (src_type->length() == dst_type->length() && - ((src_type->isa_vectmask() == NULL && dst_type->isa_vectmask() == NULL) || + ((src_type->isa_vectmask() == nullptr && dst_type->isa_vectmask() == nullptr) || (src_type->isa_vectmask() && dst_type->isa_vectmask()))) { return new VectorMaskCastNode(src, dst_type); } } - return NULL; + return nullptr; } // Generate other vector nodes to implement the masked/non-masked vector negation. @@ -1774,8 +1774,8 @@ Node* NegVNode::degenerate_integral_negate(PhaseGVN* phase, bool is_predicated) // Transformation for predicated NegVI/L if (is_predicated) { // (NegVI/L src m) ==> (AddVI/L (XorV src (ReplicateI/L -1) m) (ReplicateI/L 1) m) - Node* const_minus_one = NULL; - Node* const_one = NULL; + Node* const_minus_one = nullptr; + Node* const_one = nullptr; int add_opc; if (bt == T_LONG) { const_minus_one = phase->longcon(-1L); @@ -1799,7 +1799,7 @@ Node* NegVNode::degenerate_integral_negate(PhaseGVN* phase, bool is_predicated) } // NegVI/L ==> (SubVI/L (ReplicateI/L 0) src) - Node* const_zero = NULL; + Node* const_zero = nullptr; int sub_opc; if (bt == T_LONG) { const_zero = phase->longcon(0L); @@ -1825,7 +1825,7 @@ Node* NegVNode::Ideal(PhaseGVN* phase, bool can_reshape) { return degenerate_integral_negate(phase, false); } } - return NULL; + return nullptr; } static Node* reverse_operations_identity(Node* n, Node* in1) { @@ -1974,9 +1974,9 @@ Node* XorVNode::Ideal(PhaseGVN* phase, bool can_reshape) { BasicType bt = vect_type()->element_basic_type(); Node* zero = phase->transform(phase->zerocon(bt)); return VectorNode::scalar2vector(zero, length(), Type::get_const_basic_type(bt), - bottom_type()->isa_vectmask() != NULL); + bottom_type()->isa_vectmask() != nullptr); } - return NULL; + return nullptr; } Node* VectorBlendNode::Identity(PhaseGVN* phase) { diff --git a/src/hotspot/share/opto/vectornode.hpp b/src/hotspot/share/opto/vectornode.hpp index d92b0c9c331..da6d500f4d4 100644 --- a/src/hotspot/share/opto/vectornode.hpp +++ b/src/hotspot/share/opto/vectornode.hpp @@ -1270,7 +1270,7 @@ class VectorLoadConstNode : public VectorNode { // Extract a scalar from a vector at position "pos" class ExtractNode : public Node { public: - ExtractNode(Node* src, ConINode* pos) : Node(NULL, src, (Node*)pos) { + ExtractNode(Node* src, ConINode* pos) : Node(nullptr, src, (Node*)pos) { assert(in(2)->get_int() >= 0, "positive constants"); } virtual int Opcode() const; @@ -1676,13 +1676,13 @@ class VectorBoxNode : public Node { }; VectorBoxNode(Compile* C, Node* box, Node* val, const TypeInstPtr* box_type, const TypeVect* vt) - : Node(NULL, box, val), _box_type(box_type), _vec_type(vt) { + : Node(nullptr, box, val), _box_type(box_type), _vec_type(vt) { init_flags(Flag_is_macro); C->add_macro_node(this); } - const TypeInstPtr* box_type() const { assert(_box_type != NULL, ""); return _box_type; }; - const TypeVect* vec_type() const { assert(_vec_type != NULL, ""); return _vec_type; }; + const TypeInstPtr* box_type() const { assert(_box_type != nullptr, ""); return _box_type; }; + const TypeVect* vec_type() const { assert(_vec_type != nullptr, ""); return _vec_type; }; virtual int Opcode() const; virtual const Type* bottom_type() const { return _box_type; } @@ -1695,7 +1695,7 @@ class VectorBoxNode : public Node { class VectorBoxAllocateNode : public CallStaticJavaNode { public: VectorBoxAllocateNode(Compile* C, const TypeInstPtr* vbox_type) - : CallStaticJavaNode(C, VectorBoxNode::vec_box_type(vbox_type), NULL, NULL) { + : CallStaticJavaNode(C, VectorBoxNode::vec_box_type(vbox_type), nullptr, nullptr) { init_flags(Flag_is_macro); C->add_macro_node(this); }