8301225: Replace NULL with nullptr in share/gc/shenandoah/

Reviewed-by: wkemper, kdnilsen, rkennke
This commit is contained in:
Johan Sjölen 2023-02-15 13:40:34 +00:00
parent 26b111d714
commit 0c9658446d
60 changed files with 533 additions and 533 deletions

View File

@ -50,11 +50,11 @@ void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
}
ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
_pre_barrier_c1_runtime_code_blob(NULL),
_load_reference_barrier_strong_rt_code_blob(NULL),
_load_reference_barrier_strong_native_rt_code_blob(NULL),
_load_reference_barrier_weak_rt_code_blob(NULL),
_load_reference_barrier_phantom_rt_code_blob(NULL) {}
_pre_barrier_c1_runtime_code_blob(nullptr),
_load_reference_barrier_strong_rt_code_blob(nullptr),
_load_reference_barrier_strong_native_rt_code_blob(nullptr),
_load_reference_barrier_weak_rt_code_blob(nullptr),
_load_reference_barrier_phantom_rt_code_blob(nullptr) {}
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
// First we test whether marking is in progress.
@ -97,7 +97,7 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
assert(addr_opr->is_register(), "must be");
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
}
slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : nullptr);
} else {
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
assert(pre_val->is_register(), "must be");
@ -246,7 +246,7 @@ class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
bs->generate_c1_pre_barrier_runtime_stub(sasm);
return NULL;
return nullptr;
}
};
@ -260,7 +260,7 @@ public:
virtual OopMapSet* generate_code(StubAssembler* sasm) {
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
return NULL;
return nullptr;
}
};

View File

@ -53,7 +53,7 @@ public:
// previous value is assumed to have already been loaded into pre_val.
ShenandoahPreBarrierStub(LIR_Opr pre_val) :
_do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val),
_patch_code(lir_patch_none), _info(NULL)
_patch_code(lir_patch_none), _info(nullptr)
{
assert(_pre_val->is_register(), "should be a register");
}
@ -69,7 +69,7 @@ public:
if (_do_load) {
// don't pass in the code emit info since it's processed in the fast
// path
if (_info != NULL)
if (_info != nullptr)
visitor->do_slow_case(_info);
else
visitor->do_slow_case();
@ -142,7 +142,7 @@ private:
public:
LIR_OpShenandoahCompareAndSwap(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
: LIR_Op(lir_none, result, NULL) // no info
: LIR_Op(lir_none, result, nullptr) // no info
, _addr(addr)
, _cmp_value(cmp_value)
, _new_value(new_value)
@ -205,27 +205,27 @@ public:
ShenandoahBarrierSetC1();
CodeBlob* pre_barrier_c1_runtime_code_blob() {
assert(_pre_barrier_c1_runtime_code_blob != NULL, "");
assert(_pre_barrier_c1_runtime_code_blob != nullptr, "");
return _pre_barrier_c1_runtime_code_blob;
}
CodeBlob* load_reference_barrier_strong_rt_code_blob() {
assert(_load_reference_barrier_strong_rt_code_blob != NULL, "");
assert(_load_reference_barrier_strong_rt_code_blob != nullptr, "");
return _load_reference_barrier_strong_rt_code_blob;
}
CodeBlob* load_reference_barrier_strong_native_rt_code_blob() {
assert(_load_reference_barrier_strong_native_rt_code_blob != NULL, "");
assert(_load_reference_barrier_strong_native_rt_code_blob != nullptr, "");
return _load_reference_barrier_strong_native_rt_code_blob;
}
CodeBlob* load_reference_barrier_weak_rt_code_blob() {
assert(_load_reference_barrier_weak_rt_code_blob != NULL, "");
assert(_load_reference_barrier_weak_rt_code_blob != nullptr, "");
return _load_reference_barrier_weak_rt_code_blob;
}
CodeBlob* load_reference_barrier_phantom_rt_code_blob() {
assert(_load_reference_barrier_phantom_rt_code_blob != NULL, "");
assert(_load_reference_barrier_phantom_rt_code_blob != nullptr, "");
return _load_reference_barrier_phantom_rt_code_blob;
}

View File

@ -48,8 +48,8 @@ ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
}
ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
: _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, NULL)),
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) {
: _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
}
int ShenandoahBarrierSetC2State::iu_barriers_count() const {
@ -107,7 +107,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
return false; // cannot unalias unless there are precise offsets
}
if (alloc == NULL) {
if (alloc == nullptr) {
return false; // No allocation found
}
@ -123,7 +123,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
intptr_t st_offset = 0;
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
if (st_base == NULL) {
if (st_base == nullptr) {
break; // inscrutable pointer
}
@ -163,12 +163,12 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
// Make sure that we are looking at the same allocation site.
// The alloc variable is guaranteed to not be null here from earlier check.
if (alloc == st_alloc) {
// Check that the initialization is storing NULL so that no previous store
// Check that the initialization is storing null so that no previous store
// has been moved up and directly write a reference
Node* captured_store = st_init->find_captured_store(offset,
type2aelembytes(T_OBJECT),
phase);
if (captured_store == NULL || captured_store == st_init->zero_memory()) {
if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
return true;
}
}
@ -199,9 +199,9 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
if (do_load) {
// We need to generate the load of the previous value
assert(adr != NULL, "where are loading from?");
assert(pre_val == NULL, "loaded already?");
assert(val_type != NULL, "need a type");
assert(adr != nullptr, "where are loading from?");
assert(pre_val == nullptr, "loaded already?");
assert(val_type != nullptr, "need a type");
if (ReduceInitialCardMarks
&& satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
@ -210,7 +210,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
} else {
// In this case both val_type and alias_idx are unused.
assert(pre_val != NULL, "must be loaded already");
assert(pre_val != nullptr, "must be loaded already");
// Nothing to be done if pre_val is null.
if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
@ -255,7 +255,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
}
// if (pre_val != NULL)
// if (pre_val != nullptr)
__ if_then(pre_val, BoolTest::ne, kit->null()); {
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
@ -277,13 +277,13 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
} __ end_if(); // (!index)
} __ end_if(); // (pre_val != NULL)
} __ end_if(); // (pre_val != nullptr)
} __ end_if(); // (!marking)
// Final sync IdealKit and GraphKit.
kit->final_sync(ideal);
if (ShenandoahSATBBarrier && adr != NULL) {
if (ShenandoahSATBBarrier && adr != nullptr) {
Node* c = kit->control();
Node* call = c->in(1)->in(1)->in(1)->in(0);
assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
@ -370,7 +370,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
const TypeX* otype = offset->find_intptr_t_type();
if (otype != NULL && otype->is_con() &&
if (otype != nullptr && otype->is_con() &&
otype->get_con() != java_lang_ref_Reference::referent_offset()) {
// Constant offset but not the reference_offset so just return
return;
@ -378,14 +378,14 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
// We only need to generate the runtime guards for instances.
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
if (btype != NULL) {
if (btype != nullptr) {
if (btype->isa_aryptr()) {
// Array type so nothing to do
return;
}
const TypeInstPtr* itype = btype->isa_instptr();
if (itype != NULL) {
if (itype != nullptr) {
// Can the klass of base_oop be statically determined to be
// _not_ a sub-class of Reference and _not_ Object?
ciKlass* klass = itype->instance_klass();
@ -424,7 +424,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
__ sync_kit(kit);
Node* one = __ ConI(1);
// is_instof == 0 if base_oop == NULL
// is_instof == 0 if base_oop == nullptr
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
// Update graphKit from IdeakKit.
@ -432,7 +432,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
// Use the pre-barrier to record the value in the referent field
satb_write_barrier_pre(kit, false /* do_load */,
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
pre_val /* pre_val */,
T_OBJECT);
if (need_mem_bar) {
@ -512,7 +512,7 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
value = shenandoah_iu_barrier(kit, value);
val.set_node(value);
shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
} else {
assert(access.is_opt_access(), "only for optimization passes");
assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
@ -539,7 +539,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
// 2: apply LRB if needed
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
load = new ShenandoahLoadReferenceBarrierNode(NULL, load, decorators);
load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
if (access.is_parse_access()) {
load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
} else {
@ -579,7 +579,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
if (on_weak_ref) {
// Use the pre-barrier to record the value in the referent field
satb_write_barrier_pre(kit, false /* do_load */,
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
load /* pre_val */, T_OBJECT);
// Add memory barrier to prevent commoning reads from this field
// across safepoint since GC can change its value.
@ -600,14 +600,14 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
if (access.is_oop()) {
new_val = shenandoah_iu_barrier(kit, new_val);
shenandoah_write_barrier_pre(kit, false /* do_load */,
NULL, NULL, max_juint, NULL, NULL,
nullptr, nullptr, max_juint, nullptr, nullptr,
expected_val /* pre_val */, T_OBJECT);
MemNode::MemOrd mo = access.mem_node_mo();
Node* mem = access.memory();
Node* adr = access.addr().node();
const TypePtr* adr_type = access.addr().type();
Node* load_store = NULL;
Node* load_store = nullptr;
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
@ -636,7 +636,7 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
}
#endif
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store, access.decorators()));
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
return load_store;
}
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
@ -648,13 +648,13 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAcces
if (access.is_oop()) {
new_val = shenandoah_iu_barrier(kit, new_val);
shenandoah_write_barrier_pre(kit, false /* do_load */,
NULL, NULL, max_juint, NULL, NULL,
nullptr, nullptr, max_juint, nullptr, nullptr,
expected_val /* pre_val */, T_OBJECT);
DecoratorSet decorators = access.decorators();
MemNode::MemOrd mo = access.mem_node_mo();
Node* mem = access.memory();
bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
Node* load_store = NULL;
Node* load_store = nullptr;
Node* adr = access.addr().node();
#ifdef _LP64
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
@ -704,9 +704,9 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
}
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
if (access.is_oop()) {
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result, access.decorators()));
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
shenandoah_write_barrier_pre(kit, false /* do_load */,
NULL, NULL, max_juint, NULL, NULL,
nullptr, nullptr, max_juint, nullptr, nullptr,
result /* pre_val */, T_OBJECT);
}
return result;
@ -724,7 +724,7 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
return false;
}
CallLeafNode *call = node->as_CallLeaf();
if (call->_name == NULL) {
if (call->_name == nullptr) {
return false;
}
@ -734,7 +734,7 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
}
Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
if (c == NULL) {
if (c == nullptr) {
return c;
}
if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
@ -783,7 +783,7 @@ bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_couple
bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
if (src_type->isa_instptr() != NULL) {
if (src_type->isa_instptr() != nullptr) {
ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
if (ik->has_object_fields()) {
@ -832,7 +832,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo
Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
uint gc_state_idx = Compile::AliasIdxRaw;
const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
@ -873,7 +873,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo
const char* name = "arraycopy";
call = phase->make_leaf_call(ctrl, mem,
OptoRuntime::fast_arraycopy_Type(),
phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true),
phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
name, TypeRawPtr::BOTTOM,
src, dest, length
LP64_ONLY(COMMA phase->top()));
@ -988,7 +988,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
worklist.push(compile->root());
while (worklist.size() > 0) {
Node *x = worklist.pop();
if (x == NULL || x == compile->top()) continue;
if (x == nullptr || x == compile->top()) continue;
if (visited.member(x)) {
continue;
} else {
@ -1028,7 +1028,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
if_ctrl = if_ctrl->in(0)->in(0);
}
}
assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
}
}
}
@ -1057,7 +1057,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
Node* in1 = n->in(1);
Node* in2 = n->in(2);
// If one input is NULL, then step over the strong LRB barriers on the other input
// If one input is null, then step over the strong LRB barriers on the other input
if (in1->bottom_type() == TypePtr::NULL_PTR &&
!((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
!ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in2)->decorators()))) {
@ -1081,7 +1081,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
} else if (can_reshape &&
n->Opcode() == Op_If &&
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
n->in(0) != NULL &&
n->in(0) != nullptr &&
n->outcnt() == 2) {
Node* dom = n->in(0);
Node* prev_dom = n;
@ -1091,23 +1091,23 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
while (dom->Opcode() != op || // Not same opcode?
!ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1?
prev_dom->in(0) != dom) { // One path of test does not dominate?
if (dist < 0) return NULL;
if (dist < 0) return nullptr;
dist--;
prev_dom = dom;
dom = IfNode::up_one_dom(dom);
if (!dom) return NULL;
if (!dom) return nullptr;
}
// Check that we did not follow a loop back to ourselves
if (n == dom) {
return NULL;
return nullptr;
}
return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN());
}
return NULL;
return nullptr;
}
bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
@ -1205,7 +1205,7 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
case Op_ShenandoahCompareAndExchangeP:
case Op_ShenandoahCompareAndExchangeN: {
Node *adr = n->in(MemNode::Address);
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, nullptr);
// fallthrough
}
case Op_ShenandoahCompareAndSwapP:
@ -1214,10 +1214,10 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
case Op_ShenandoahWeakCompareAndSwapN:
return conn_graph->add_final_edges_unsafe_access(n, opcode);
case Op_ShenandoahIUBarrier:
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
return true;
case Op_ShenandoahLoadReferenceBarrier:
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), nullptr);
return true;
default:
// Nothing

View File

@ -158,7 +158,7 @@ bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, Vecto
while (true) {
if (in->bottom_type() == TypePtr::NULL_PTR) {
if (trace) {tty->print_cr("NULL");}
if (trace) {tty->print_cr("null");}
} else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
if (trace) {tty->print_cr("Non oop");}
} else {
@ -265,10 +265,10 @@ bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, Vecto
}
void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
if (n1 != NULL) {
if (n1 != nullptr) {
n1->dump(+10);
}
if (n2 != NULL) {
if (n2 != nullptr) {
n2->dump(+10);
}
fatal("%s", msg);
@ -462,7 +462,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
};
if (call->is_call_to_arraycopystub()) {
Node* dest = NULL;
Node* dest = nullptr;
const TypeTuple* args = n->as_Call()->_tf->domain();
for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
if (args->field_at(i)->isa_ptr()) {
@ -597,7 +597,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
}
}
for (uint j = 1; j < stop; j++) {
if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
uint k = 0;
for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
@ -608,7 +608,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
}
} else {
for (uint j = 1; j < stop; j++) {
if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
fatal("%s not covered", n->Name());
}
@ -618,7 +618,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
if (n->is_SafePoint()) {
SafePointNode* sfpt = n->as_SafePoint();
if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
phis.clear();
@ -667,7 +667,7 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
}
}
for (uint i = 0; i < m->req(); i++) {
if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
wq.push(m->in(i));
}
}
@ -684,7 +684,7 @@ bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Nod
}
Node* next_mem(Node* mem, int alias) {
Node* res = NULL;
Node* res = nullptr;
if (mem->is_Proj()) {
res = mem->in(0);
} else if (mem->is_SafePoint() || mem->is_MemBar()) {
@ -706,7 +706,7 @@ Node* next_mem(Node* mem, int alias) {
}
Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
Node* iffproj = NULL;
Node* iffproj = nullptr;
while (c != dom) {
Node* next = phase->idom(c);
assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
@ -743,13 +743,13 @@ Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one
}
} else if (c->is_Proj()) {
if (c->is_IfProj()) {
if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
// continue;
} else {
if (!allow_one_proj) {
return NodeSentinel;
}
if (iffproj == NULL) {
if (iffproj == nullptr) {
iffproj = c;
} else {
return NodeSentinel;
@ -778,7 +778,7 @@ Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node
while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
mem = next_mem(mem, alias);
if (wq.test_set(mem->_idx)) {
return NULL;
return nullptr;
}
mem_ctrl = phase->ctrl_or_self(mem);
}
@ -790,11 +790,11 @@ Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node
}
Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
Node* mem = NULL;
Node* mem = nullptr;
Node* c = ctrl;
do {
if (c->is_Region()) {
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
Node* u = c->fast_out(i);
if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
if (u->adr_type() == TypePtr::BOTTOM) {
@ -803,12 +803,12 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
}
}
} else {
if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
CallProjections projs;
c->as_Call()->extract_projections(&projs, true, false);
if (projs.fallthrough_memproj != NULL) {
if (projs.fallthrough_memproj != nullptr) {
if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
if (projs.catchall_memproj == NULL) {
if (projs.catchall_memproj == nullptr) {
mem = projs.fallthrough_memproj;
} else {
if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
@ -821,7 +821,7 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
}
} else {
Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
if (proj != NULL &&
if (proj != nullptr &&
proj->adr_type() == TypePtr::BOTTOM) {
mem = proj;
}
@ -833,15 +833,15 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
u->bottom_type() == Type::MEMORY &&
u->adr_type() == TypePtr::BOTTOM) {
assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
assert(mem == NULL, "only one proj");
assert(mem == nullptr, "only one proj");
mem = u;
}
}
assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
}
}
c = phase->idom(c);
} while (mem == NULL);
} while (mem == nullptr);
return mem;
}
@ -874,7 +874,7 @@ void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*
Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset);
Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
TypeInt::BYTE, MemNode::unordered);
Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
@ -940,7 +940,7 @@ void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl,
Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
TypeInt::BYTE, MemNode::unordered);
Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
@ -971,8 +971,8 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
IdealLoopTree*loop = phase->get_loop(ctrl);
const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
address calladdr = NULL;
const char* name = NULL;
address calladdr = nullptr;
const char* name = nullptr;
bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
@ -1041,7 +1041,7 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
Node *n = uses_to_ignore.at(next);
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
uses_to_ignore.push(in);
}
}
@ -1076,14 +1076,14 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
}
static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
Node* region = NULL;
Node* region = nullptr;
while (c != ctrl) {
if (c->is_Region()) {
region = c;
}
c = phase->idom(c);
}
assert(region != NULL, "");
assert(region != nullptr, "");
Node* phi = new PhiNode(region, n->bottom_type());
for (uint j = 1; j < region->req(); j++) {
Node* in = region->in(j);
@ -1125,14 +1125,14 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* ctrl = phase->get_ctrl(lrb);
Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
CallStaticJavaNode* unc = NULL;
Node* unc_ctrl = NULL;
CallStaticJavaNode* unc = nullptr;
Node* unc_ctrl = nullptr;
Node* uncasted_val = val;
for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
Node* u = lrb->fast_out(i);
if (u->Opcode() == Op_CastPP &&
u->in(0) != NULL &&
u->in(0) != nullptr &&
phase->is_dominator(u->in(0), ctrl)) {
const Type* u_t = phase->igvn().type(u);
@ -1153,7 +1153,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
}
Node* branch = no_branches(ctrl, u->in(0), false, phase);
assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
if (branch == NodeSentinel) {
continue;
}
@ -1184,7 +1184,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
if (idx < n->req()) {
Node* in = n->in(idx);
stack.set_index(idx+1);
if (in != NULL) {
if (in != nullptr) {
if (phase->has_ctrl(in)) {
if (phase->is_dominator(call, phase->get_ctrl(in))) {
#ifdef ASSERT
@ -1337,15 +1337,15 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* orig_ctrl = ctrl;
Node* raw_mem = fixer.find_mem(ctrl, lrb);
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
IdealLoopTree *loop = phase->get_loop(ctrl);
Node* heap_stable_ctrl = NULL;
Node* null_ctrl = NULL;
Node* heap_stable_ctrl = nullptr;
Node* null_ctrl = nullptr;
assert(val->bottom_type()->make_oopptr(), "need oop");
assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
Node* region = new RegionNode(PATH_LIMIT);
@ -1363,14 +1363,14 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
region->init_req(_heap_stable, heap_stable_ctrl);
val_phi->init_req(_heap_stable, val);
// Test for in-cset, unless it's a native-LRB. Native LRBs need to return NULL
// Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
// even for non-cset objects to prevent resurrection of such objects.
// Wires !in_cset(obj) to slot 2 of region and phis
Node* not_cset_ctrl = NULL;
Node* not_cset_ctrl = nullptr;
if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
}
if (not_cset_ctrl != NULL) {
if (not_cset_ctrl != nullptr) {
region->init_req(_not_cset, not_cset_ctrl);
val_phi->init_req(_not_cset, val);
} else {
@ -1382,7 +1382,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
// Make the unconditional resolve for fwdptr.
// Call lrb-stub and wire up that path in slots 4
Node* result_mem = NULL;
Node* result_mem = nullptr;
Node* addr;
if (ShenandoahSelfFixing) {
@ -1469,9 +1469,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
IdealLoopTree* loop = phase->get_loop(ctrl);
Node* raw_mem = fixer.find_mem(ctrl, barrier);
Node* init_raw_mem = raw_mem;
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
Node* heap_stable_ctrl = NULL;
Node* null_ctrl = NULL;
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
Node* heap_stable_ctrl = nullptr;
Node* null_ctrl = nullptr;
uint last = phase->C->unique();
enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
@ -1488,9 +1488,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
phi->init_req(_heap_stable, raw_mem);
// Null path
Node* reg2_ctrl = NULL;
Node* reg2_ctrl = nullptr;
test_null(ctrl, pre_val, null_ctrl, phase);
if (null_ctrl != NULL) {
if (null_ctrl != nullptr) {
reg2_ctrl = null_ctrl->in(0);
region2->init_req(_null_path, null_ctrl);
phi2->init_req(_null_path, raw_mem);
@ -1518,7 +1518,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
phase->register_new_node(index_test, ctrl);
IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
phase->register_control(queue_full_iff, loop, ctrl);
Node* not_full = new IfTrueNode(queue_full_iff);
phase->register_control(not_full, loop, queue_full_iff);
@ -1598,7 +1598,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
if (visited.test_set(in->_idx)) {
return NULL;
return nullptr;
}
switch (in->Opcode()) {
case Op_Proj:
@ -1625,17 +1625,17 @@ Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet
Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
// Handle unambiguous cases: single address reported on both branches.
if (t != NULL && f == NULL) return t;
if (t == NULL && f != NULL) return f;
if (t != NULL && t == f) return t;
if (t != nullptr && f == nullptr) return t;
if (t == nullptr && f != nullptr) return f;
if (t != nullptr && t == f) return t;
// Ambiguity.
return phase->igvn().zerocon(T_OBJECT);
}
case Op_Phi: {
Node* addr = NULL;
Node* addr = nullptr;
for (uint i = 1; i < in->req(); i++) {
Node* addr1 = get_load_addr(phase, visited, in->in(i));
if (addr == NULL) {
if (addr == nullptr) {
addr = addr1;
}
if (addr != addr1) {
@ -1677,7 +1677,7 @@ void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, Pha
assert(is_gc_state_load(load), "broken");
if (!phase->is_dominator(load->in(0), entry_c)) {
Node* mem_ctrl = NULL;
Node* mem_ctrl = nullptr;
Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
load = load->clone();
load->set_req(MemNode::Memory, mem);
@ -1771,7 +1771,7 @@ void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoo
IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
// Find first invariant test that doesn't exit the loop
LoopNode *head = loop->_head->as_Loop();
IfNode* unswitch_iff = NULL;
IfNode* unswitch_iff = nullptr;
Node* n = head->in(LoopNode::LoopBackControl);
int loop_has_sfpts = -1;
while (n != head) {
@ -1846,14 +1846,14 @@ void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, No
Node* n = heap_stable_tests.at(i);
IdealLoopTree* loop = phase->get_loop(n);
if (loop != phase->ltree_root() &&
loop->_child == NULL &&
loop->_child == nullptr &&
!loop->_irreducible) {
Node* head = loop->_head;
if (head->is_Loop() &&
(!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
!seen.test_set(head->_idx)) {
IfNode* iff = find_unswitching_candidate(loop, phase);
if (iff != NULL) {
if (iff != nullptr) {
Node* bol = iff->in(1);
if (head->as_Loop()->is_strip_mined()) {
head->as_Loop()->verify_strip_mined(0);
@ -1880,12 +1880,12 @@ void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, No
}
}
ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(NULL, val) {
ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
}
const Type* ShenandoahIUBarrierNode::bottom_type() const {
if (in(1) == NULL || in(1)->is_top()) {
if (in(1) == nullptr || in(1)->is_top()) {
return Type::TOP;
}
const Type* t = in(1)->bottom_type();
@ -1896,7 +1896,7 @@ const Type* ShenandoahIUBarrierNode::bottom_type() const {
}
const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
if (in(1) == NULL) {
if (in(1) == nullptr) {
return Type::TOP;
}
const Type* t = phase->type(in(1));
@ -1910,11 +1910,11 @@ const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
}
int ShenandoahIUBarrierNode::needed(Node* n) {
if (n == NULL ||
if (n == nullptr ||
n->is_Allocate() ||
n->Opcode() == Op_ShenandoahIUBarrier ||
n->bottom_type() == TypePtr::NULL_PTR ||
(n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
(n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
return NotNeeded;
}
if (n->is_Phi() ||
@ -1926,11 +1926,11 @@ int ShenandoahIUBarrierNode::needed(Node* n) {
Node* ShenandoahIUBarrierNode::next(Node* n) {
for (;;) {
if (n == NULL) {
if (n == nullptr) {
return n;
} else if (n->bottom_type() == TypePtr::NULL_PTR) {
return n;
} else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
} else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
return n;
} else if (n->is_ConstraintCast() ||
n->Opcode() == Op_DecodeN ||
@ -1943,7 +1943,7 @@ Node* ShenandoahIUBarrierNode::next(Node* n) {
}
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
@ -1956,7 +1956,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
if (cont == NotNeeded) {
return in(1);
} else if (cont == MaybeNeeded) {
if (igvn == NULL) {
if (igvn == nullptr) {
phase->record_for_igvn(this);
return this;
} else {
@ -1968,7 +1968,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
if (n->is_Phi()) {
for (uint i = 1; i < n->req(); i++) {
Node* m = n->in(i);
if (m != NULL) {
if (m != nullptr) {
wq.push(m);
}
}
@ -1979,7 +1979,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
m = n->in(CMoveNode::IfTrue);
wq.push(m);
}
Node* orig_n = NULL;
Node* orig_n = nullptr;
do {
if (wq_i >= wq.size()) {
return in(1);
@ -2004,7 +2004,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
static bool has_never_branch(Node* root) {
for (uint i = 1; i < root->req(); i++) {
Node* in = root->in(i);
if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
return true;
}
}
@ -2025,7 +2025,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
int opc = n->Opcode();
uint i = stack.index();
if (i < n->req()) {
Node* mem = NULL;
Node* mem = nullptr;
if (opc == Op_Root) {
Node* in = n->in(i);
int in_opc = in->Opcode();
@ -2066,7 +2066,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
}
i++;
stack.set_index(i);
if (mem == NULL) {
if (mem == nullptr) {
continue;
}
for (;;) {
@ -2119,7 +2119,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
if (trace) {
for (int i = rpo_list.size() - 1; i >= 0; i--) {
Node* c = rpo_list.at(i);
if (_memory_nodes[c->_idx] != NULL) {
if (_memory_nodes[c->_idx] != nullptr) {
tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
}
}
@ -2150,15 +2150,15 @@ void MemoryGraphFixer::collect_memory_nodes() {
Node* prev_mem = _memory_nodes[c->_idx];
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
Node* prev_region = regions[c->_idx];
Node* unique = NULL;
Node* unique = nullptr;
for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
Node* m = _memory_nodes[c->in(j)->_idx];
assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
if (m != NULL) {
assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
if (m != nullptr) {
if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
// continue
} else if (unique == NULL) {
} else if (unique == nullptr) {
unique = m;
} else if (m == unique) {
// continue
@ -2167,30 +2167,30 @@ void MemoryGraphFixer::collect_memory_nodes() {
}
}
}
assert(unique != NULL, "empty phi???");
assert(unique != nullptr, "empty phi???");
if (unique != NodeSentinel) {
if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
dead_phis.push(prev_region);
}
regions.map(c->_idx, unique);
} else {
Node* phi = NULL;
if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
Node* phi = nullptr;
if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
phi = prev_region;
for (uint k = 1; k < c->req(); k++) {
Node* m = _memory_nodes[c->in(k)->_idx];
assert(m != NULL, "expect memory state");
assert(m != nullptr, "expect memory state");
phi->set_req(k, m);
}
} else {
for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
Node* u = c->fast_out(j);
if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
(u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
phi = u;
for (uint k = 1; k < c->req() && phi != NULL; k++) {
for (uint k = 1; k < c->req() && phi != nullptr; k++) {
Node* m = _memory_nodes[c->in(k)->_idx];
assert(m != NULL, "expect memory state");
assert(m != nullptr, "expect memory state");
if (u->in(k) != m) {
phi = NodeSentinel;
}
@ -2201,12 +2201,12 @@ void MemoryGraphFixer::collect_memory_nodes() {
phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
for (uint k = 1; k < c->req(); k++) {
Node* m = _memory_nodes[c->in(k)->_idx];
assert(m != NULL, "expect memory state");
assert(m != nullptr, "expect memory state");
phi->init_req(k, m);
}
}
}
if (phi != NULL) {
if (phi != nullptr) {
regions.map(c->_idx, phi);
} else {
assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
@ -2219,9 +2219,9 @@ void MemoryGraphFixer::collect_memory_nodes() {
_memory_nodes.map(c->_idx, current_region);
}
}
} else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
} else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
Node* m = _memory_nodes[_phase->idom(c)->_idx];
assert(m != NULL || c->Opcode() == Op_Halt, "expect memory state");
assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
if (m != prev_mem) {
_memory_nodes.map(c->_idx, m);
progress = true;
@ -2245,8 +2245,8 @@ void MemoryGraphFixer::collect_memory_nodes() {
Node* c = rpo_list.at(i);
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
Node* n = regions[c->_idx];
assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
if (n != NULL && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
_phase->register_new_node(n, c);
}
}
@ -2255,7 +2255,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
Node* c = rpo_list.at(i);
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
Node* n = regions[c->_idx];
assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
Node* u = c->fast_out(i);
if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
@ -2274,26 +2274,26 @@ void MemoryGraphFixer::collect_memory_nodes() {
}
Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
Node* mem = NULL;
Node* mem = nullptr;
Node* head = in->in(0);
assert(head->is_Region(), "unexpected infinite loop graph shape");
Node* phi_mem = NULL;
Node* phi_mem = nullptr;
for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
Node* u = head->fast_out(j);
if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
phi_mem = u;
} else if (u->adr_type() == TypePtr::BOTTOM) {
assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
if (phi_mem == NULL) {
assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
if (phi_mem == nullptr) {
phi_mem = u;
}
}
}
}
if (phi_mem == NULL) {
if (phi_mem == nullptr) {
ResourceMark rm;
Node_Stack stack(0);
stack.push(head, 1);
@ -2316,7 +2316,7 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
if (m->is_MergeMem()) {
m = m->as_MergeMem()->memory_at(_alias);
}
assert(mem == NULL || mem == m, "several memory states");
assert(mem == nullptr || mem == m, "several memory states");
mem = m;
break;
} else {
@ -2327,7 +2327,7 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
}
}
} while (stack.size() > 0);
assert(mem != NULL, "should have found safepoint");
assert(mem != nullptr, "should have found safepoint");
} else {
mem = phi_mem;
}
@ -2336,12 +2336,12 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
Node* MemoryGraphFixer::get_ctrl(Node* n) const {
Node* c = _phase->get_ctrl(n);
if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
assert(c == n->in(0), "");
CallNode* call = c->as_Call();
CallProjections projs;
call->extract_projections(&projs, true, false);
if (projs.catchall_memproj != NULL) {
if (projs.catchall_memproj != nullptr) {
if (projs.fallthrough_memproj == n) {
c = projs.fallthrough_catchproj;
} else {
@ -2363,11 +2363,11 @@ Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
}
bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
return m != NULL && get_ctrl(m) == c;
return m != nullptr && get_ctrl(m) == c;
}
Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
assert(!ctrl->is_Call() || ctrl == n, "projection expected");
#ifdef ASSERT
if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
@ -2386,11 +2386,11 @@ Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
Node* mem = _memory_nodes[ctrl->_idx];
Node* c = ctrl;
while (!mem_is_valid(mem, c) &&
(!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
(!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
c = _phase->idom(c);
mem = _memory_nodes[c->_idx];
}
if (n != NULL && mem_is_valid(mem, c)) {
if (n != nullptr && mem_is_valid(mem, c)) {
while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
mem = next_mem(mem, _alias);
}
@ -2402,7 +2402,7 @@ Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
c = _phase->idom(c);
mem = _memory_nodes[c->_idx];
} while (!mem_is_valid(mem, c) &&
(!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
(!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
}
}
assert(mem->bottom_type() == Type::MEMORY, "");
@ -2428,7 +2428,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
GrowableArray<Node*> phis;
if (mem_for_ctrl != mem) {
Node* old = mem_for_ctrl;
Node* prev = NULL;
Node* prev = nullptr;
while (old != mem) {
prev = old;
if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
@ -2441,7 +2441,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
ShouldNotReachHere();
}
}
assert(prev != NULL, "");
assert(prev != nullptr, "");
if (new_ctrl != ctrl) {
_memory_nodes.map(ctrl->_idx, mem);
_memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
@ -2464,7 +2464,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
!has_mem_phi(u) &&
u->unique_ctrl_out()->Opcode() != Op_Halt) {
DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
if (!mem_is_valid(m, u) || !m->is_Phi()) {
bool push = true;
@ -2484,7 +2484,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
for (;;) {
assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
Node* next = NULL;
Node* next = nullptr;
if (m->is_Proj()) {
next = m->in(0);
} else {
@ -2531,14 +2531,14 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
Node* r = n->in(0);
DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
for (uint j = 1; j < n->req(); j++) {
Node* m = find_mem(r->in(j), NULL);
Node* m = find_mem(r->in(j), nullptr);
_phase->igvn().replace_input_of(n, j, m);
DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
}
}
}
uint last = _phase->C->unique();
MergeMemNode* mm = NULL;
MergeMemNode* mm = nullptr;
int alias = _alias;
DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
// Process loads first to not miss an anti-dependency: if the memory
@ -2570,7 +2570,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
} else if (u->is_MergeMem()) {
MergeMemNode* u_mm = u->as_MergeMem();
if (u_mm->memory_at(alias) == mem) {
MergeMemNode* newmm = NULL;
MergeMemNode* newmm = nullptr;
for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
Node* uu = u->fast_out(j);
assert(!uu->is_MergeMem(), "chain of MergeMems?");
@ -2580,7 +2580,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
int nb = 0;
for (uint k = 1; k < uu->req(); k++) {
if (uu->in(k) == u) {
Node* m = find_mem(region->in(k), NULL);
Node* m = find_mem(region->in(k), nullptr);
if (m != mem) {
DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
@ -2615,7 +2615,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
bool replaced = false;
for (uint j = 1; j < u->req(); j++) {
if (u->in(j) == mem) {
Node* m = find_mem(region->in(j), NULL);
Node* m = find_mem(region->in(j), nullptr);
Node* nnew = m;
if (m != mem) {
if (u->adr_type() == TypePtr::BOTTOM) {
@ -2633,8 +2633,8 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
}
}
} else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
u->adr_type() == NULL) {
assert(u->adr_type() != NULL ||
u->adr_type() == nullptr) {
assert(u->adr_type() != nullptr ||
u->Opcode() == Op_Rethrow ||
u->Opcode() == Op_Return ||
u->Opcode() == Op_SafePoint ||
@ -2690,7 +2690,7 @@ MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, No
}
MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
MergeMemNode* newmm = NULL;
MergeMemNode* newmm = nullptr;
MergeMemNode* u_mm = u->as_MergeMem();
Node* c = _phase->get_ctrl(u);
if (_phase->is_dominator(c, rep_ctrl)) {
@ -2750,7 +2750,7 @@ bool MemoryGraphFixer::should_process_phi(Node* phi) const {
void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
uint last = _phase-> C->unique();
MergeMemNode* mm = NULL;
MergeMemNode* mm = nullptr;
assert(mem->bottom_type() == Type::MEMORY, "");
for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
Node* u = mem->out(i);
@ -2758,7 +2758,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
if (u->is_MergeMem()) {
MergeMemNode* u_mm = u->as_MergeMem();
if (u_mm->memory_at(_alias) == mem) {
MergeMemNode* newmm = NULL;
MergeMemNode* newmm = nullptr;
for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
Node* uu = u->fast_out(j);
assert(!uu->is_MergeMem(), "chain of MergeMems?");
@ -2768,7 +2768,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
int nb = 0;
for (uint k = 1; k < uu->req(); k++) {
if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
if (newmm == NULL) {
if (newmm == nullptr) {
newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
}
if (newmm != u) {
@ -2784,7 +2784,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
}
} else {
if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
if (newmm == NULL) {
if (newmm == nullptr) {
newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
}
if (newmm != u) {
@ -2804,7 +2804,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
Node* nnew = rep_proj;
if (u->adr_type() == TypePtr::BOTTOM) {
if (mm == NULL) {
if (mm == nullptr) {
mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
}
nnew = mm;
@ -2819,8 +2819,8 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
}
} else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
u->adr_type() == NULL) {
assert(u->adr_type() != NULL ||
u->adr_type() == nullptr) {
assert(u->adr_type() != nullptr ||
u->Opcode() == Op_Rethrow ||
u->Opcode() == Op_Return ||
u->Opcode() == Op_SafePoint ||
@ -2828,7 +2828,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
(u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
u->Opcode() == Op_CallLeaf, "%s", u->Name());
if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
if (mm == NULL) {
if (mm == nullptr) {
mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
}
_phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
@ -2873,7 +2873,7 @@ bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
}
const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
return Type::TOP;
}
const Type* t = in(ValueIn)->bottom_type();
@ -2918,7 +2918,7 @@ bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n)
}
bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
if (n == NULL) return false;
if (n == nullptr) return false;
if (visited.member(n)) {
return false; // Been there.
}
@ -2941,7 +2941,7 @@ bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Nod
// tty->print_cr("optimize barrier on null");
return false;
}
if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
// tty->print_cr("optimize barrier on constant");
return false;
}

View File

@ -49,7 +49,7 @@ private:
};
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
static void report_verify_failure(const char* msg, Node* n1 = nullptr, Node* n2 = nullptr);
#endif
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
@ -142,10 +142,10 @@ public:
: CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
return new CompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;
@ -157,10 +157,10 @@ public:
: CompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
return new CompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;
@ -172,10 +172,10 @@ public:
: WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
return new WeakCompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;
@ -187,10 +187,10 @@ public:
: WeakCompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
return new WeakCompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;
@ -202,10 +202,10 @@ public:
: CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
return new CompareAndExchangePNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;
@ -217,10 +217,10 @@ public:
: CompareAndExchangeNNode(c, mem, adr, val, ex, at, t, mem_ord) { }
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
return new CompareAndExchangeNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
}
return NULL;
return nullptr;
}
virtual int Opcode() const;

View File

@ -43,7 +43,7 @@ int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
}
ShenandoahHeuristics::ShenandoahHeuristics() :
_region_data(NULL),
_region_data(nullptr),
_degenerated_cycles_in_a_row(0),
_successful_cycles_in_a_row(0),
_cycle_start(os::elapsedTime()),

View File

@ -65,7 +65,7 @@ void ShenandoahIUMode::initialize_flags() const {
}
ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const {
if (ShenandoahGCHeuristics == NULL) {
if (ShenandoahGCHeuristics == nullptr) {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
}
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
@ -78,5 +78,5 @@ ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const {
return new ShenandoahCompactHeuristics();
}
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
return NULL;
return nullptr;
}

View File

@ -56,7 +56,7 @@ void ShenandoahPassiveMode::initialize_flags() const {
// No barriers are required to run.
}
ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics() const {
if (ShenandoahGCHeuristics == NULL) {
if (ShenandoahGCHeuristics == nullptr) {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
}
return new ShenandoahPassiveHeuristics();

View File

@ -53,7 +53,7 @@ void ShenandoahSATBMode::initialize_flags() const {
}
ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const {
if (ShenandoahGCHeuristics == NULL) {
if (ShenandoahGCHeuristics == nullptr) {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
}
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
@ -66,5 +66,5 @@ ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const {
return new ShenandoahCompactHeuristics();
}
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
return NULL;
return nullptr;
}

View File

@ -40,7 +40,7 @@ void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) {
if (!heap->is_in(loc)) return;
ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
if (r != NULL && r->is_committed()) {
if (r != nullptr && r->is_committed()) {
address start = MAX2((address) r->bottom(), (address) loc - 32);
address end = MIN2((address) r->end(), (address) loc + 128);
if (start >= end) return;
@ -98,7 +98,7 @@ void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc)
msg.append(" " PTR_FORMAT " - safe print, no details\n", p2i(loc));
if (heap->is_in(loc)) {
ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
if (r != NULL) {
if (r != nullptr) {
stringStream ss;
r->print_on(&ss);
msg.append(" region: %s", ss.freeze());
@ -113,12 +113,12 @@ void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_l
ShenandoahHeap* heap = ShenandoahHeap::heap();
ResourceMark rm;
bool loc_in_heap = (loc != NULL && heap->is_in(loc));
bool loc_in_heap = (loc != nullptr && heap->is_in(loc));
ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label);
msg.append("Referenced from:\n");
if (interior_loc != NULL) {
if (interior_loc != nullptr) {
msg.append(" interior location: " PTR_FORMAT "\n", p2i(interior_loc));
if (loc_in_heap) {
print_obj(msg, loc);
@ -170,7 +170,7 @@ void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->is_in(obj)) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap failed",
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap failed",
"oop must point to a heap address",
file, line);
}
@ -179,8 +179,8 @@ void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *
void ShenandoahAsserts::assert_in_heap_or_null(void* interior_loc, oop obj, const char *file, int line) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (obj != NULL && !heap->is_in(obj)) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap_or_null failed",
if (obj != nullptr && !heap->is_in(obj)) {
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap_or_null failed",
"oop must point to a heap address",
file, line);
}
@ -192,20 +192,20 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
// Step 1. Check that obj is correct.
// After this step, it is safe to call heap_region_containing().
if (!heap->is_in(obj)) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"oop must point to a heap address",
file, line);
}
Klass* obj_klass = obj->klass_or_null();
if (obj_klass == NULL) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
"Object klass pointer should not be NULL",
if (obj_klass == nullptr) {
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Object klass pointer should not be null",
file,line);
}
if (!Metaspace::contains(obj_klass)) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Object klass pointer must go to metaspace",
file,line);
}
@ -217,27 +217,27 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
// tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr
// that still points to the object itself.
if (heap->is_full_gc_move_in_progress()) {
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Non-trivial forwarding pointer during Full GC moves, probable bug.",
file, line);
}
// Step 2. Check that forwardee is correct
if (!heap->is_in(fwd)) {
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Forwardee must point to a heap address",
file, line);
}
if (obj_klass != fwd->klass()) {
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Forwardee klass disagrees with object class",
file, line);
}
// Step 3. Check that forwardee points to correct region
if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Non-trivial forwardee should in another region",
file, line);
}
@ -245,7 +245,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
// Step 4. Check for multiple forwardings
oop fwd2 = ShenandoahForwarding::get_forwardee_raw_unchecked(fwd);
if (fwd != fwd2) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
"Multiple forwardings",
file, line);
}
@ -258,7 +258,7 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahHeapRegion* r = heap->heap_region_containing(obj);
if (!r->is_active()) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
"Object must reside in active region",
file, line);
}
@ -270,12 +270,12 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co
for (size_t i = idx; i < idx + num_regions; i++) {
ShenandoahHeapRegion* chain_reg = heap->get_region(i);
if (i == idx && !chain_reg->is_humongous_start()) {
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
"Object must reside in humongous start",
file, line);
}
if (i != idx && !chain_reg->is_humongous_continuation()) {
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
"Humongous continuation should be of proper size",
file, line);
}
@ -288,7 +288,7 @@ void ShenandoahAsserts::assert_forwarded(void* interior_loc, oop obj, const char
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
if (obj == fwd) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_forwarded failed",
"Object should be forwarded",
file, line);
}
@ -299,7 +299,7 @@ void ShenandoahAsserts::assert_not_forwarded(void* interior_loc, oop obj, const
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
if (obj != fwd) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_not_forwarded failed",
"Object should not be forwarded",
file, line);
}
@ -310,7 +310,7 @@ void ShenandoahAsserts::assert_marked(void *interior_loc, oop obj, const char *f
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->marking_context()->is_marked(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked failed",
"Object should be marked",
file, line);
}
@ -321,7 +321,7 @@ void ShenandoahAsserts::assert_marked_weak(void *interior_loc, oop obj, const ch
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->marking_context()->is_marked_weak(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked_weak failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked_weak failed",
"Object should be marked weakly",
file, line);
}
@ -332,7 +332,7 @@ void ShenandoahAsserts::assert_marked_strong(void *interior_loc, oop obj, const
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->marking_context()->is_marked_strong(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked_strong failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked_strong failed",
"Object should be marked strongly",
file, line);
}
@ -343,7 +343,7 @@ void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char*
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->in_collection_set(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_in_cset failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_in_cset failed",
"Object should be in collection set",
file, line);
}
@ -354,7 +354,7 @@ void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const ch
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (heap->in_collection_set(obj)) {
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_in_cset failed",
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_not_in_cset failed",
"Object should not be in collection set",
file, line);
}
@ -363,7 +363,7 @@ void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const ch
void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (heap->in_collection_set_loc(interior_loc)) {
print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed",
print_failure(_safe_unknown, nullptr, interior_loc, nullptr, "Shenandoah assert_not_in_cset_loc failed",
"Interior location should not be in collection set",
file, line);
}

View File

@ -45,7 +45,7 @@ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
make_barrier_set_c1<ShenandoahBarrierSetC1>(),
make_barrier_set_c2<ShenandoahBarrierSetC2>(),
ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : nullptr,
new ShenandoahBarrierSetStackChunk(),
BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
_heap(heap),
@ -103,7 +103,7 @@ void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
ShenandoahThreadLocalData::initialize_gclab(thread);
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
if (bs_nm != NULL) {
if (bs_nm != nullptr) {
thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
}
@ -120,7 +120,7 @@ void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
_satb_mark_queue_set.flush_queue(queue);
if (thread->is_Java_thread()) {
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
if (gclab != NULL) {
if (gclab != nullptr) {
gclab->retire();
}

View File

@ -43,7 +43,7 @@ inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) {
}
inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
if (p != NULL) {
if (p != nullptr) {
return resolve_forwarded_not_null(p);
} else {
return p;
@ -68,7 +68,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, T* load
fwd = _heap->evacuate_object(obj, t);
}
if (load_addr != NULL && fwd != obj) {
if (load_addr != nullptr && fwd != obj) {
// Since we are here and we know the load address, update the reference.
ShenandoahHeap::atomic_update_oop(fwd, load_addr, obj);
}
@ -81,8 +81,8 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
return obj;
}
if (_heap->has_forwarded_objects() &&
_heap->in_collection_set(obj)) { // Subsumes NULL-check
assert(obj != NULL, "cset check must have subsumed NULL-check");
_heap->in_collection_set(obj)) { // Subsumes null-check
assert(obj != nullptr, "cset check must have subsumed null-check");
oop fwd = resolve_forwarded_not_null(obj);
if (obj == fwd && _heap->is_evacuation_in_progress()) {
Thread* t = Thread::current();
@ -96,22 +96,22 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
template <class T>
inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators, oop obj, T* load_addr) {
if (obj == NULL) {
return NULL;
if (obj == nullptr) {
return nullptr;
}
// Prevent resurrection of unreachable phantom (i.e. weak-native) references.
if ((decorators & ON_PHANTOM_OOP_REF) != 0 &&
_heap->is_concurrent_weak_root_in_progress() &&
!_heap->marking_context()->is_marked(obj)) {
return NULL;
return nullptr;
}
// Prevent resurrection of unreachable weak references.
if ((decorators & ON_WEAK_OOP_REF) != 0 &&
_heap->is_concurrent_weak_root_in_progress() &&
!_heap->marking_context()->is_marked_strong(obj)) {
return NULL;
return nullptr;
}
// Prevent resurrection of unreachable objects that are visited during
@ -123,7 +123,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators,
}
oop fwd = load_reference_barrier(obj);
if (ShenandoahSelfFixing && load_addr != NULL && fwd != obj) {
if (ShenandoahSelfFixing && load_addr != nullptr && fwd != obj) {
// Since we are here and we know the load address, update the reference.
ShenandoahHeap::atomic_update_oop(fwd, load_addr, obj);
}
@ -132,7 +132,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators,
}
inline void ShenandoahBarrierSet::enqueue(oop obj) {
assert(obj != NULL, "checked by caller");
assert(obj != nullptr, "checked by caller");
assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
// Filter marked objects before hitting the SATB queues. The same predicate would
@ -159,13 +159,13 @@ inline void ShenandoahBarrierSet::satb_barrier(T *field) {
}
inline void ShenandoahBarrierSet::satb_enqueue(oop value) {
if (value != NULL && ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
if (value != nullptr && ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
enqueue(value);
}
}
inline void ShenandoahBarrierSet::iu_barrier(oop obj) {
if (ShenandoahIUBarrier && obj != NULL && _heap->is_concurrent_mark_in_progress()) {
if (ShenandoahIUBarrier && obj != nullptr && _heap->is_concurrent_mark_in_progress()) {
enqueue(obj);
}
}
@ -242,8 +242,8 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_loa
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_common(T* addr, oop value) {
shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
shenandoah_assert_not_in_cset_if(addr, value, value != NULL && !ShenandoahHeap::heap()->cancelled_gc());
shenandoah_assert_marked_if(nullptr, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
shenandoah_assert_not_in_cset_if(addr, value, value != nullptr && !ShenandoahHeap::heap()->cancelled_gc());
ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set();
bs->iu_barrier(value);
bs->satb_barrier<decorators>(addr);
@ -260,7 +260,7 @@ template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap(T* addr, oop value) {
shenandoah_assert_not_in_cset_loc_except(addr, ShenandoahHeap::heap()->cancelled_gc());
shenandoah_assert_not_forwarded_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
shenandoah_assert_not_forwarded_except (addr, value, value == nullptr || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
oop_store_common(addr, value);
}

View File

@ -102,7 +102,7 @@ void ShenandoahBarrierSet::clone_update(oop obj) {
void ShenandoahBarrierSet::clone_barrier(oop obj) {
assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled");
shenandoah_assert_correct(NULL, obj);
shenandoah_assert_correct(nullptr, obj);
int gc_state = _heap->gc_state();
if ((gc_state & ShenandoahHeap::MARKING) != 0) {

View File

@ -37,7 +37,7 @@
bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Must be");
assert(lock != nullptr, "Must be");
ShenandoahReentrantLocker locker(lock);
if (!is_armed(nm)) {

View File

@ -47,7 +47,7 @@ bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
return false;
}
obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
shenandoah_assert_not_forwarded_if(nullptr, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
return _mark_context->is_marked(obj);
}
@ -59,7 +59,7 @@ bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
if (CompressedOops::is_null(obj)) {
return false;
}
shenandoah_assert_not_forwarded(NULL, obj);
shenandoah_assert_not_forwarded(nullptr, obj);
return _mark_context->is_marked(obj);
}
@ -111,7 +111,7 @@ void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
template <bool concurrent, bool stable_thread>
ShenandoahEvacuateUpdateRootClosureBase<concurrent, stable_thread>::ShenandoahEvacuateUpdateRootClosureBase() :
_heap(ShenandoahHeap::heap()), _thread(stable_thread ? Thread::current() : NULL) {
_heap(ShenandoahHeap::heap()), _thread(stable_thread ? Thread::current() : nullptr) {
}
template <bool concurrent, bool stable_thread>
@ -199,7 +199,7 @@ ShenandoahCodeBlobAndDisarmClosure::ShenandoahCodeBlobAndDisarmClosure(OopClosur
void ShenandoahCodeBlobAndDisarmClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm != nullptr) {
assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
CodeBlobToOopClosure::do_code_blob(cb);
_bs->disarm(nm);

View File

@ -83,7 +83,7 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
int count = 0;
bool process_block = true;
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != nullptr; cb = CodeCache::next_blob(_heap, cb)) {
int current = count++;
if ((current & stride_mask) == 0) {
process_block = (current >= _claimed_idx) &&
@ -119,7 +119,7 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
}
void ShenandoahCodeRoots::arm_nmethods() {
assert(BarrierSet::barrier_set()->barrier_set_nmethod() != NULL, "Sanity");
assert(BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr, "Sanity");
BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
}
@ -285,7 +285,7 @@ void ShenandoahCodeRoots::purge() {
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
_par_iterator(CodeCache::heaps()),
_table_snapshot(NULL) {
_table_snapshot(nullptr) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
@ -294,12 +294,12 @@ ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
MonitorLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
_table_snapshot = NULL;
_table_snapshot = nullptr;
locker.notify_all();
}
void ShenandoahCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
assert(_table_snapshot != NULL, "Sanity");
assert(_table_snapshot != nullptr, "Sanity");
_table_snapshot->parallel_blobs_do(f);
}

View File

@ -46,8 +46,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
_current_index(0) {
// The collection set map is reserved to cover the entire heap *and* zero addresses.
// This is needed to accept in-cset checks for both heap oops and NULLs, freeing
// high-performance code from checking for NULL first.
// This is needed to accept in-cset checks for both heap oops and nulls, freeing
// high-performance code from checking for null first.
//
// Since heap_base can be far away, committing the entire map would waste memory.
// Therefore, we only commit the parts that are needed to operate: the heap view,
@ -131,7 +131,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
}
}
}
return NULL;
return nullptr;
}
ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
@ -146,7 +146,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
}
}
return NULL;
return nullptr;
}
void ShenandoahCollectionSet::print_on(outputStream* out) const {

View File

@ -41,12 +41,12 @@ bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const {
}
bool ShenandoahCollectionSet::is_in(oop p) const {
shenandoah_assert_in_heap_or_null(NULL, p);
shenandoah_assert_in_heap_or_null(nullptr, p);
return is_in_loc(cast_from_oop<void*>(p));
}
bool ShenandoahCollectionSet::is_in_loc(void* p) const {
assert(p == NULL || _heap->is_in(p), "Must be in the heap");
assert(p == nullptr || _heap->is_in(p), "Must be in the heap");
uintx index = ((uintx) p) >> _region_size_bytes_shift;
// no need to subtract the bottom of the heap from p,
// _biased_cset_map is biased

View File

@ -734,7 +734,7 @@ public:
}
};
// This task not only evacuates/updates marked weak roots, but also "NULL"
// This task not only evacuates/updates marked weak roots, but also "null"
// dead weak roots.
class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
private:
@ -782,7 +782,7 @@ public:
// cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
// can cleanup immediate garbage sooner.
if (ShenandoahHeap::heap()->unload_classes()) {
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
// CLD's holder or evacuate it.
{
ShenandoahIsCLDAliveClosure is_cld_alive;
@ -953,7 +953,7 @@ void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
if (thread->is_Java_thread()) {
JavaThread* jt = JavaThread::cast(thread);
ResourceMark rm;
jt->oops_do(&_cl, NULL);
jt->oops_do(&_cl, nullptr);
}
}

View File

@ -60,7 +60,7 @@ public:
ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
ShenandoahReferenceProcessor* rp = heap->ref_processor();
assert(rp != NULL, "need reference processor");
assert(rp != nullptr, "need reference processor");
StringDedup::Requests requests;
_cm->mark_loop(worker_id, _terminator, rp,
true /*cancellable*/,
@ -84,9 +84,9 @@ public:
// Transfer any partial buffer to the qset for completed buffer processing.
_satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
if (thread->is_Java_thread()) {
if (_cl != NULL) {
if (_cl != nullptr) {
ResourceMark rm;
thread->oops_do(_cl, NULL);
thread->oops_do(_cl, nullptr);
}
}
}
@ -121,7 +121,7 @@ public:
ShenandoahMarkRefsClosure mark_cl(q, rp);
ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set,
ShenandoahIUBarrier ? &mark_cl : NULL);
ShenandoahIUBarrier ? &mark_cl : nullptr);
Threads::possibly_parallel_threads_do(true /* is_par */, &tc);
}
_cm->mark_loop(worker_id, _terminator, rp,

View File

@ -150,7 +150,7 @@ void ShenandoahDegenGC::op_degenerated() {
heap->collection_set()->clear_current_index();
ShenandoahHeapRegion* r;
while ((r = heap->collection_set()->next()) != NULL) {
while ((r = heap->collection_set()->next()) != nullptr) {
if (r->is_pinned()) {
heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
op_degenerated_fail();

View File

@ -32,19 +32,19 @@
#include "runtime/javaThread.hpp"
inline oop ShenandoahForwarding::get_forwardee_raw(oop obj) {
shenandoah_assert_in_heap(NULL, obj);
shenandoah_assert_in_heap(nullptr, obj);
return get_forwardee_raw_unchecked(obj);
}
inline oop ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
// JVMTI and JFR code use mark words for marking objects for their needs.
// On this path, we can encounter the "marked" object, but with NULL
// On this path, we can encounter the "marked" object, but with null
// fwdptr. That object is still not forwarded, and we need to return
// the object itself.
markWord mark = obj->mark();
if (mark.is_marked()) {
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
if (fwdptr != NULL) {
if (fwdptr != nullptr) {
return cast_to_oop(fwdptr);
}
}
@ -52,14 +52,14 @@ inline oop ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
}
inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
// Same as above, but mutator thread cannot ever see NULL forwardee.
shenandoah_assert_correct(NULL, obj);
// Same as above, but mutator thread cannot ever see null forwardee.
shenandoah_assert_correct(nullptr, obj);
assert(Thread::current()->is_Java_thread(), "Must be a mutator thread");
markWord mark = obj->mark();
if (mark.is_marked()) {
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
assert(fwdptr != NULL, "Forwarding pointer is never null here");
assert(fwdptr != nullptr, "Forwarding pointer is never null here");
return cast_to_oop(fwdptr);
} else {
return obj;
@ -67,7 +67,7 @@ inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
}
inline oop ShenandoahForwarding::get_forwardee(oop obj) {
shenandoah_assert_correct(NULL, obj);
shenandoah_assert_correct(nullptr, obj);
return get_forwardee_raw_unchecked(obj);
}

View File

@ -82,7 +82,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
if (is_mutator_free(idx)) {
HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
if (result != NULL) {
if (result != nullptr) {
return result;
}
}
@ -100,7 +100,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
size_t idx = c - 1;
if (is_collector_free(idx)) {
HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
if (result != NULL) {
if (result != nullptr) {
return result;
}
}
@ -108,7 +108,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
// No dice. Can we borrow space from mutator view?
if (!ShenandoahEvacReserveOverflow) {
return NULL;
return nullptr;
}
// Try to steal the empty region from the mutator view
@ -119,7 +119,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
if (can_allocate_from(r)) {
flip_to_gc(r);
HeapWord *result = try_allocate_in(r, req, in_new_region);
if (result != NULL) {
if (result != nullptr) {
return result;
}
}
@ -136,7 +136,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
ShouldNotReachHere();
}
return NULL;
return nullptr;
}
HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
@ -144,14 +144,14 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
if (_heap->is_concurrent_weak_root_in_progress() &&
r->is_trash()) {
return NULL;
return nullptr;
}
try_recycle_trashed(r);
in_new_region = r->is_empty();
HeapWord* result = NULL;
HeapWord* result = nullptr;
size_t size = req.size();
if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
@ -161,13 +161,13 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
}
if (size >= req.min_size()) {
result = r->allocate(size, req.type());
assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
}
} else {
result = r->allocate(size, req.type());
}
if (result != NULL) {
if (result != nullptr) {
// Allocation successful, bump stats:
if (req.is_mutator_alloc()) {
increase_used(size * HeapWordSize);
@ -181,7 +181,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
}
}
if (result == NULL || has_no_alloc_capacity(r)) {
if (result == nullptr || has_no_alloc_capacity(r)) {
// Region cannot afford this or future allocations. Retire it.
//
// While this seems a bit harsh, especially in the case when this large allocation does not
@ -250,7 +250,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
// No regions left to satisfy allocation, bye.
if (num > mutator_count()) {
return NULL;
return nullptr;
}
// Find the continuous interval of $num regions, starting from $beg and ending in $end,
@ -262,7 +262,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
while (true) {
if (end >= _max) {
// Hit the end, goodbye
return NULL;
return nullptr;
}
// If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
@ -549,10 +549,10 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_
in_new_region = false;
assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
req.size(), ShenandoahHeapRegion::humongous_threshold_words());
return NULL;
return nullptr;
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
} else {
return allocate_single(req, in_new_region);

View File

@ -314,7 +314,7 @@ public:
_empty_regions(empty_regions),
_empty_regions_pos(0),
_to_region(to_region),
_from_region(NULL),
_from_region(nullptr),
_compact_point(to_region->bottom()) {}
void set_from_region(ShenandoahHeapRegion* from_region) {
@ -322,7 +322,7 @@ public:
}
void finish_region() {
assert(_to_region != NULL, "should not happen");
assert(_to_region != nullptr, "should not happen");
_to_region->set_new_top(_compact_point);
}
@ -335,7 +335,7 @@ public:
}
void do_object(oop p) {
assert(_from_region != NULL, "must set before work");
assert(_from_region != nullptr, "must set before work");
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
@ -354,14 +354,14 @@ public:
}
assert(new_to_region != _to_region, "must not reuse same to-region");
assert(new_to_region != NULL, "must not be NULL");
assert(new_to_region != nullptr, "must not be null");
_to_region = new_to_region;
_compact_point = _to_region->bottom();
}
// Object fits into current region, record new location:
assert(_compact_point + obj_size <= _to_region->end(), "must fit");
shenandoah_assert_not_forwarded(NULL, p);
shenandoah_assert_not_forwarded(nullptr, p);
_preserved_marks->push_if_necessary(p, p->mark());
p->forward_to(cast_to_oop(_compact_point));
_compact_point += obj_size;
@ -399,7 +399,7 @@ public:
ShenandoahHeapRegionSetIterator it(slice);
ShenandoahHeapRegion* from_region = it.next();
// No work?
if (from_region == NULL) {
if (from_region == nullptr) {
return;
}
@ -411,7 +411,7 @@ public:
ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
while (from_region != NULL) {
while (from_region != nullptr) {
assert(is_candidate_region(from_region), "Sanity");
cl.set_from_region(from_region);
@ -665,7 +665,7 @@ void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices
for (size_t wid = 0; wid < n_workers; wid++) {
ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
ShenandoahHeapRegion* r = it.next();
while (r != NULL) {
while (r != nullptr) {
size_t idx = r->index();
assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
@ -779,7 +779,7 @@ public:
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahAdjustPointersObjectClosure obj_cl;
ShenandoahHeapRegion* r = _regions.next();
while (r != NULL) {
while (r != nullptr) {
if (!r->is_humongous_continuation() && r->has_live()) {
_heap->marked_object_iterate(r, &obj_cl);
}
@ -872,7 +872,7 @@ public:
ShenandoahCompactObjectsClosure cl(worker_id);
ShenandoahHeapRegion* r = slice.next();
while (r != NULL) {
while (r != nullptr) {
assert(!r->is_humongous(), "must not get humongous regions here");
if (r->has_live()) {
_heap->marked_object_iterate(r, &cl);
@ -1017,7 +1017,7 @@ public:
ShenandoahHeapRegion* region = _regions.next();
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
while (region != NULL) {
while (region != nullptr) {
if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
ctx->clear_bitmap(region);
}

View File

@ -102,7 +102,7 @@ public:
virtual void work(uint worker_id) {
ShenandoahHeapRegion* r = _regions.next();
while (r != NULL) {
while (r != nullptr) {
if (r->is_committed()) {
os::pretouch_memory(r->bottom(), r->end(), _page_size);
}
@ -126,7 +126,7 @@ public:
virtual void work(uint worker_id) {
ShenandoahHeapRegion* r = _regions.next();
while (r != NULL) {
while (r != nullptr) {
size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
@ -311,7 +311,7 @@ jint ShenandoahHeap::initialize() {
}
}
if (_collection_set == NULL) {
if (_collection_set == nullptr) {
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
@ -397,7 +397,7 @@ jint ShenandoahHeap::initialize() {
_pacer = new ShenandoahPacer(this);
_pacer->setup_for_idle();
} else {
_pacer = NULL;
_pacer = nullptr;
}
_control_thread = new ShenandoahControlThread();
@ -408,7 +408,7 @@ jint ShenandoahHeap::initialize() {
}
void ShenandoahHeap::initialize_mode() {
if (ShenandoahGCMode != NULL) {
if (ShenandoahGCMode != nullptr) {
if (strcmp(ShenandoahGCMode, "satb") == 0) {
_gc_mode = new ShenandoahSATBMode();
} else if (strcmp(ShenandoahGCMode, "iu") == 0) {
@ -435,7 +435,7 @@ void ShenandoahHeap::initialize_mode() {
}
void ShenandoahHeap::initialize_heuristics() {
assert(_gc_mode != NULL, "Must be initialized");
assert(_gc_mode != nullptr, "Must be initialized");
_heuristics = _gc_mode->initialize_heuristics();
if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
@ -462,36 +462,36 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_committed(0),
_bytes_allocated_since_gc_start(0),
_max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
_workers(NULL),
_safepoint_workers(NULL),
_workers(nullptr),
_safepoint_workers(nullptr),
_heap_region_special(false),
_num_regions(0),
_regions(NULL),
_regions(nullptr),
_update_refs_iterator(this),
_control_thread(NULL),
_control_thread(nullptr),
_shenandoah_policy(policy),
_gc_mode(NULL),
_heuristics(NULL),
_free_set(NULL),
_pacer(NULL),
_verifier(NULL),
_phase_timings(NULL),
_monitoring_support(NULL),
_memory_pool(NULL),
_gc_mode(nullptr),
_heuristics(nullptr),
_free_set(nullptr),
_pacer(nullptr),
_verifier(nullptr),
_phase_timings(nullptr),
_monitoring_support(nullptr),
_memory_pool(nullptr),
_stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
_cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
_gc_timer(new ConcurrentGCTimer()),
_soft_ref_policy(),
_log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
_ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
_marking_context(NULL),
_marking_context(nullptr),
_bitmap_size(0),
_bitmap_regions_per_slice(0),
_bitmap_bytes_per_slice(0),
_bitmap_region_special(false),
_aux_bitmap_region_special(false),
_liveness_cache(NULL),
_collection_set(NULL)
_liveness_cache(nullptr),
_collection_set(nullptr)
{
// Initialize GC mode early, so we can adjust barrier support
initialize_mode();
@ -499,7 +499,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
_max_workers = MAX2(_max_workers, 1U);
_workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
if (_workers == NULL) {
if (_workers == nullptr) {
vm_exit_during_initialization("Failed necessary allocation.");
} else {
_workers->initialize_workers();
@ -528,7 +528,7 @@ public:
ShenandoahHeapRegion* region = _regions.next();
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahMarkingContext* const ctx = heap->marking_context();
while (region != NULL) {
while (region != nullptr) {
if (heap->is_bitmap_slice_committed(region)) {
ctx->clear_bitmap(region);
}
@ -583,11 +583,11 @@ void ShenandoahHeap::print_on(outputStream* st) const {
ShenandoahCollectionSet* cset = collection_set();
st->print_cr("Collection set:");
if (cset != NULL) {
if (cset != nullptr) {
st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
} else {
st->print_cr(" (NULL)");
st->print_cr(" (null)");
}
st->cr();
@ -601,7 +601,7 @@ void ShenandoahHeap::print_on(outputStream* st) const {
class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
public:
void do_thread(Thread* thread) {
assert(thread != NULL, "Sanity");
assert(thread != nullptr, "Sanity");
assert(thread->is_Worker_thread(), "Only worker thread expected");
ShenandoahThreadLocalData::initialize_gclab(thread);
}
@ -617,7 +617,7 @@ void ShenandoahHeap::post_initialize() {
// gclab can not be initialized early during VM startup, as it can not determinate its max_size.
// Now, we will let WorkerThreads to initialize gclab when new worker is created.
_workers->set_initialize_gclab();
if (_safepoint_workers != NULL) {
if (_safepoint_workers != nullptr) {
_safepoint_workers->threads_do(&init_gclabs);
_safepoint_workers->set_initialize_gclab();
}
@ -760,7 +760,7 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size)
if (new_size < size) {
// New size still does not fit the object. Fall back to shared allocation.
// This avoids retiring perfectly good GCLABs, when we encounter a large object.
return NULL;
return nullptr;
}
// Retire current GCLAB, and allocate a new one.
@ -769,8 +769,8 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size)
size_t actual_size = 0;
HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
if (gclab_buf == NULL) {
return NULL;
if (gclab_buf == nullptr) {
return nullptr;
}
assert (size <= actual_size, "allocation should fit");
@ -797,7 +797,7 @@ HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
size_t* actual_size) {
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
HeapWord* res = allocate_memory(req);
if (res != NULL) {
if (res != nullptr) {
*actual_size = req.actual_size();
} else {
*actual_size = 0;
@ -810,7 +810,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
size_t* actual_size) {
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
HeapWord* res = allocate_memory(req);
if (res != NULL) {
if (res != nullptr) {
*actual_size = req.actual_size();
} else {
*actual_size = 0;
@ -821,7 +821,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
intptr_t pacer_epoch = 0;
bool in_new_region = false;
HeapWord* result = NULL;
HeapWord* result = nullptr;
if (req.is_mutator_alloc()) {
if (ShenandoahPacing) {
@ -845,13 +845,13 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
size_t tries = 0;
while (result == NULL && _progress_last_gc.is_set()) {
while (result == nullptr && _progress_last_gc.is_set()) {
tries++;
control_thread()->handle_alloc_failure(req);
result = allocate_memory_under_lock(req, in_new_region);
}
while (result == NULL && tries <= ShenandoahFullGCThreshold) {
while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
tries++;
control_thread()->handle_alloc_failure(req);
result = allocate_memory_under_lock(req, in_new_region);
@ -868,7 +868,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
control_thread()->notify_heap_changed();
}
if (result != NULL) {
if (result != nullptr) {
size_t requested = req.size();
size_t actual = req.actual_size();
@ -917,7 +917,7 @@ MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
if (result != NULL) {
if (result != nullptr) {
return result;
}
@ -926,18 +926,18 @@ MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo
// Retry allocation
result = loader_data->metaspace_non_null()->allocate(size, mdtype);
if (result != NULL) {
if (result != nullptr) {
return result;
}
// Expand and retry allocation
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
if (result != NULL) {
if (result != nullptr) {
return result;
}
// Out of memory
return NULL;
return nullptr;
}
class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
@ -949,7 +949,7 @@ public:
_heap(heap), _thread(Thread::current()) {}
void do_object(oop p) {
shenandoah_assert_marked(NULL, p);
shenandoah_assert_marked(nullptr, p);
if (!p->is_forwarded()) {
_heap->evacuate_object(p, _thread);
}
@ -988,7 +988,7 @@ private:
void do_work() {
ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
ShenandoahHeapRegion* r;
while ((r =_cs->claim_next()) != NULL) {
while ((r =_cs->claim_next()) != nullptr) {
assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
_sh->marked_object_iterate(r, &cl);
@ -1014,7 +1014,7 @@ void ShenandoahHeap::trash_cset_regions() {
ShenandoahCollectionSet* set = collection_set();
ShenandoahHeapRegion* r;
set->clear_current_index();
while ((r = set->next()) != NULL) {
while ((r = set->next()) != nullptr) {
r->make_trash();
}
collection_set()->clear();
@ -1059,7 +1059,7 @@ public:
ShenandoahCheckCleanGCLABClosure() {}
void do_thread(Thread* thread) {
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
}
};
@ -1071,7 +1071,7 @@ public:
ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
void do_thread(Thread* thread) {
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
gclab->retire();
if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
ShenandoahThreadLocalData::set_gclab_size(thread, 0);
@ -1128,7 +1128,7 @@ void ShenandoahHeap::gclabs_retire(bool resize) {
}
workers()->threads_do(&cl);
if (safepoint_workers() != NULL) {
if (safepoint_workers() != nullptr) {
safepoint_workers()->threads_do(&cl);
}
}
@ -1159,10 +1159,10 @@ void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
HeapWord* ShenandoahHeap::block_start(const void* addr) const {
ShenandoahHeapRegion* r = heap_region_containing(addr);
if (r != NULL) {
if (r != nullptr) {
return r->block_start(addr);
}
return NULL;
return nullptr;
}
bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
@ -1183,7 +1183,7 @@ void ShenandoahHeap::prepare_for_verify() {
void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
tcl->do_thread(_control_thread);
workers()->threads_do(tcl);
if (_safepoint_workers != NULL) {
if (_safepoint_workers != nullptr) {
_safepoint_workers->threads_do(tcl);
}
if (ShenandoahStringDedup::is_enabled()) {
@ -1321,7 +1321,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
// This populates the work stack with initial objects
// It is important to relinquish the associated locks before diving
// into heap dumper
uint n_workers = safepoint_workers() != NULL ? safepoint_workers()->active_workers() : 1;
uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
ShenandoahHeapIterationRootScanner rp(n_workers);
rp.roots_do(oops);
}
@ -1400,16 +1400,16 @@ public:
// Reclaim bitmap
_heap->reclaim_aux_bitmap_for_iteration();
// Reclaim queue for workers
if (_task_queues!= NULL) {
if (_task_queues!= nullptr) {
for (uint i = 0; i < _num_workers; ++i) {
ShenandoahObjToScanQueue* q = _task_queues->queue(i);
if (q != NULL) {
if (q != nullptr) {
delete q;
_task_queues->register_queue(i, NULL);
_task_queues->register_queue(i, nullptr);
}
}
delete _task_queues;
_task_queues = NULL;
_task_queues = nullptr;
}
}
@ -1449,10 +1449,10 @@ private:
uint worker_id,
ShenandoahObjToScanQueueSet* queue_set) {
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
assert(queue_set != NULL, "task queue must not be NULL");
assert(queue_set != nullptr, "task queue must not be null");
ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
assert(q != NULL, "object iterate queue must not be NULL");
assert(q != nullptr, "object iterate queue must not be null");
ShenandoahMarkTask t;
ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
@ -1475,7 +1475,7 @@ ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint worker
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
void ShenandoahHeap::keep_alive(oop obj) {
if (is_concurrent_mark_in_progress() && (obj != NULL)) {
if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
ShenandoahBarrierSet::barrier_set()->enqueue(obj);
}
}
@ -1872,7 +1872,7 @@ bool ShenandoahHeap::unload_classes() const {
address ShenandoahHeap::in_cset_fast_test_addr() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(heap->collection_set() != NULL, "Sanity");
assert(heap->collection_set() != nullptr, "Sanity");
return (address) heap->collection_set()->biased_map_address();
}
@ -1923,7 +1923,7 @@ void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
ShenandoahHeapRegion* r = heap_region_containing(o);
assert(r != NULL, "Sanity");
assert(r != nullptr, "Sanity");
assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
r->record_unpin();
}
@ -2005,7 +2005,7 @@ void ShenandoahHeap::assert_gc_workers(uint nworkers) {
ShenandoahVerifier* ShenandoahHeap::verifier() {
guarantee(ShenandoahVerify, "Should be enabled");
assert (_verifier != NULL, "sanity");
assert (_verifier != nullptr, "sanity");
return _verifier;
}
@ -2038,7 +2038,7 @@ private:
T cl;
ShenandoahHeapRegion* r = _regions->next();
ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
while (r != NULL) {
while (r != nullptr) {
HeapWord* update_watermark = r->get_update_watermark();
assert (update_watermark >= r->bottom(), "sanity");
if (r->is_active() && !r->is_cset()) {
@ -2283,7 +2283,7 @@ char ShenandoahHeap::gc_state() const {
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
#ifdef ASSERT
assert(_liveness_cache != NULL, "sanity");
assert(_liveness_cache != nullptr, "sanity");
assert(worker_id < _max_workers, "sanity");
for (uint i = 0; i < num_regions(); i++) {
assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
@ -2294,7 +2294,7 @@ ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
assert(worker_id < _max_workers, "sanity");
assert(_liveness_cache != NULL, "sanity");
assert(_liveness_cache != nullptr, "sanity");
ShenandoahLiveData* ld = _liveness_cache[worker_id];
for (uint i = 0; i < num_regions(); i++) {
ShenandoahLiveData live = ld[i];

View File

@ -93,7 +93,7 @@ public:
// Reset iterator to default state
void reset();
// Returns next region, or NULL if there are no more regions.
// Returns next region, or null if there are no more regions.
// This is multi-thread-safe.
inline ShenandoahHeapRegion* next();

View File

@ -57,7 +57,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
// get_region() provides the bounds-check and returns NULL on OOB.
// get_region() provides the bounds-check and returns null on OOB.
return _heap->get_region(new_index - 1);
}
@ -219,8 +219,8 @@ inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr,
return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare;
}
// The memory ordering discussion above does not apply for methods that store NULLs:
// then, there is no transitive reads in mutator (as we see NULLs), and we can do
// The memory ordering discussion above does not apply for methods that store nulls:
// then, there is no transitive reads in mutator (as we see nulls), and we can do
// relaxed memory ordering there.
inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
@ -274,14 +274,14 @@ inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size
assert(UseTLAB, "TLABs should be enabled");
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
if (gclab == NULL) {
if (gclab == nullptr) {
assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
"Performance: thread should have GCLAB: %s", thread->name());
// No GCLABs in this thread, fallback to shared allocation
return NULL;
return nullptr;
}
HeapWord* obj = gclab->allocate(size);
if (obj != NULL) {
if (obj != nullptr) {
return obj;
}
// Otherwise...
@ -302,18 +302,18 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
bool alloc_from_gclab = true;
HeapWord* copy = NULL;
HeapWord* copy = nullptr;
#ifdef ASSERT
if (ShenandoahOOMDuringEvacALot &&
(os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
copy = NULL;
copy = nullptr;
} else {
#endif
if (UseTLAB) {
copy = allocate_from_gclab(thread, size);
}
if (copy == NULL) {
if (copy == nullptr) {
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
copy = allocate_memory(req);
alloc_from_gclab = false;
@ -322,7 +322,7 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
}
#endif
if (copy == NULL) {
if (copy == nullptr) {
control_thread()->handle_alloc_failure_evac(size);
_oom_evac_handler.handle_out_of_memory_during_evacuation();
@ -340,7 +340,7 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
if (result == copy_val) {
// Successfully evacuated. Our copy is now the public one!
shenandoah_assert_correct(NULL, copy_val);
shenandoah_assert_correct(nullptr, copy_val);
return copy_val;
} else {
// Failed to evacuate. We need to deal with the object that is left behind. Since this
@ -358,9 +358,9 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
} else {
fill_with_object(copy, size);
shenandoah_assert_correct(NULL, copy_val);
shenandoah_assert_correct(nullptr, copy_val);
}
shenandoah_assert_correct(NULL, result);
shenandoah_assert_correct(nullptr, result);
return result;
}
}
@ -371,12 +371,12 @@ inline bool ShenandoahHeap::requires_marking(const void* entry) const {
}
inline bool ShenandoahHeap::in_collection_set(oop p) const {
assert(collection_set() != NULL, "Sanity");
assert(collection_set() != nullptr, "Sanity");
return collection_set()->is_in(p);
}
inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
assert(collection_set() != NULL, "Sanity");
assert(collection_set() != nullptr, "Sanity");
return collection_set()->is_in_loc(p);
}
@ -565,7 +565,7 @@ inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx)
if (region_idx < _num_regions) {
return _regions[region_idx];
} else {
return NULL;
return nullptr;
}
}

View File

@ -60,7 +60,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
_index(index),
_bottom(start),
_end(start + RegionSizeWords),
_new_top(NULL),
_new_top(nullptr),
_empty_time(os::elapsedTime()),
_state(committed ? _empty_committed : _empty_uncommitted),
_top(start),
@ -455,7 +455,7 @@ HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
last = cur;
cur += cast_to_oop(cur)->size();
}
shenandoah_assert_correct(NULL, cast_to_oop(last));
shenandoah_assert_correct(nullptr, cast_to_oop(last));
return last;
}
}

View File

@ -334,7 +334,7 @@ public:
return _index;
}
// Allocation (return NULL if full)
// Allocation (return null if full)
inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type);
inline void clear_live_data();

View File

@ -48,7 +48,7 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Ty
return obj;
} else {
return NULL;
return nullptr;
}
}

View File

@ -70,7 +70,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() :
}
ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
if (_name_space != nullptr) FREE_C_HEAP_ARRAY(char, _name_space);
}
void ShenandoahHeapRegionCounters::update() {

View File

@ -77,7 +77,7 @@ ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() {
return _heap->get_region(index);
}
}
return NULL;
return nullptr;
}
void ShenandoahHeapRegionSet::print_on(outputStream* out) const {

View File

@ -44,7 +44,7 @@ void ShenandoahSimpleLock::unlock() {
}
ShenandoahReentrantLock::ShenandoahReentrantLock() :
ShenandoahSimpleLock(), _owner(NULL), _count(0) {
ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
assert(os::mutex_init_done(), "Too early!");
}
@ -71,7 +71,7 @@ void ShenandoahReentrantLock::unlock() {
_count--;
if (_count == 0) {
Atomic::store(&_owner, (Thread*)NULL);
Atomic::store(&_owner, (Thread*)nullptr);
ShenandoahSimpleLock::unlock();
}
}

View File

@ -41,7 +41,7 @@ private:
shenandoah_padding(2);
public:
ShenandoahLock() : _state(unlocked), _owner(NULL) {};
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock() {
#ifdef ASSERT
@ -50,7 +50,7 @@ public:
Thread::SpinAcquire(&_state, "Shenandoah Heap Lock");
#ifdef ASSERT
assert(_state == locked, "must be locked");
assert(_owner == NULL, "must not be owned");
assert(_owner == nullptr, "must not be owned");
_owner = Thread::current();
#endif
}
@ -58,7 +58,7 @@ public:
void unlock() {
#ifdef ASSERT
assert (_owner == Thread::current(), "sanity");
_owner = NULL;
_owner = nullptr;
#endif
Thread::SpinRelease(&_state);
}
@ -78,13 +78,13 @@ private:
ShenandoahLock* const _lock;
public:
ShenandoahLocker(ShenandoahLock* lock) : _lock(lock) {
if (_lock != NULL) {
if (_lock != nullptr) {
_lock->lock();
}
}
~ShenandoahLocker() {
if (_lock != NULL) {
if (_lock != nullptr) {
_lock->unlock();
}
}
@ -123,13 +123,13 @@ private:
public:
ShenandoahReentrantLocker(ShenandoahReentrantLock* lock) :
_lock(lock) {
if (_lock != NULL) {
if (_lock != nullptr) {
_lock->lock();
}
}
~ShenandoahReentrantLocker() {
if (_lock != NULL) {
if (_lock != nullptr) {
assert(_lock->owned_by_self(), "Must be owner");
_lock->unlock();
}

View File

@ -139,7 +139,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w
"Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
q = queues->claim_next();
while (q != NULL) {
while (q != nullptr) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}

View File

@ -60,9 +60,9 @@ template <class T, StringDedupMode STRING_DEDUP>
void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
oop obj = task->obj();
shenandoah_assert_not_forwarded(NULL, obj);
shenandoah_assert_marked(NULL, obj);
shenandoah_assert_not_in_cset_except(NULL, obj, ShenandoahHeap::heap()->cancelled_gc());
shenandoah_assert_not_forwarded(nullptr, obj);
shenandoah_assert_marked(nullptr, obj);
shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
// Are we in weak subgraph scan?
bool weak = task->is_weak();
@ -121,7 +121,7 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob
live_data[region_idx] = (ShenandoahLiveData) new_val;
}
} else {
shenandoah_assert_in_correct_region(NULL, obj);
shenandoah_assert_in_correct_region(nullptr, obj);
size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
for (size_t i = region_idx; i < region_idx + num_regions; i++) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,7 +45,7 @@ size_t ShenandoahMarkBitMap::mark_distance() {
HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const {
assert(limit != NULL, "limit must not be NULL");
assert(limit != nullptr, "limit must not be null");
// Round addr up to a possible object boundary to be safe.
size_t const addr_offset = address_to_index(align_up(addr, HeapWordSize << LogMinObjAlignment));
size_t const limit_offset = address_to_index(limit);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -160,8 +160,8 @@ public:
inline bool is_marked_weak(HeapWord* addr) const;
// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-NULL. If there is no
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
// "addr", and before "limit", if "limit" is non-null. If there is no
// such bit, returns "limit" if that is non-null, or else "endWord()".
HeapWord* get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const;

View File

@ -57,8 +57,8 @@ public:
};
ShenandoahMonitoringSupport::ShenandoahMonitoringSupport(ShenandoahHeap* heap) :
_partial_counters(NULL),
_full_counters(NULL)
_partial_counters(nullptr),
_full_counters(nullptr)
{
// Collection counters do not fit Shenandoah very well.
// We record partial cycles as "young", and full cycles (including full STW GC) as "old".

View File

@ -32,7 +32,7 @@
#include "runtime/continuation.hpp"
ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
_nm(nm), _oops(NULL), _oops_count(0), _unregistered(false) {
_nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false) {
if (!oops.is_empty()) {
_oops_count = oops.length();
@ -47,7 +47,7 @@ ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, boo
}
ShenandoahNMethod::~ShenandoahNMethod() {
if (_oops != NULL) {
if (_oops != nullptr) {
FREE_C_HEAP_ARRAY(oop*, _oops);
}
}
@ -92,9 +92,9 @@ void ShenandoahNMethod::update() {
detect_reloc_oops(nm(), oops, non_immediate_oops);
if (oops.length() != _oops_count) {
if (_oops != NULL) {
if (_oops != nullptr) {
FREE_C_HEAP_ARRAY(oop*, _oops);
_oops = NULL;
_oops = nullptr;
}
_oops_count = oops.length();
@ -129,14 +129,14 @@ void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops
}
oop value = r->oop_value();
if (value != NULL) {
if (value != nullptr) {
oop* addr = r->oop_addr();
shenandoah_assert_correct(addr, value);
shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
shenandoah_assert_not_forwarded(addr, value);
// Non-NULL immediate oop found. NULL oops can safely be
// Non-null immediate oop found. null oops can safely be
// ignored since the method will be re-registered if they
// are later patched to be non-NULL.
// are later patched to be non-null.
oops.push(addr);
}
}
@ -153,7 +153,7 @@ ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
ShenandoahNMethod* data = gc_data(nm);
assert(data != NULL, "Sanity");
assert(data != nullptr, "Sanity");
assert(data->lock()->owned_by_self(), "Must hold the lock");
ShenandoahHeap* const heap = ShenandoahHeap::heap();
@ -178,7 +178,7 @@ void ShenandoahNMethod::assert_correct() {
oop *loc = _oops[c];
assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
oop o = RawAccess<>::oop_load(loc);
shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
}
oop* const begin = _nm->oops_begin();
@ -186,7 +186,7 @@ void ShenandoahNMethod::assert_correct() {
for (oop* p = begin; p < end; p++) {
if (*p != Universe::non_oop_word()) {
oop o = RawAccess<>::oop_load(p);
shenandoah_assert_correct_except(p, o, o == NULL || heap->is_full_gc_move_in_progress());
shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
}
}
}
@ -263,7 +263,7 @@ ShenandoahNMethodTable::ShenandoahNMethodTable() :
}
ShenandoahNMethodTable::~ShenandoahNMethodTable() {
assert(_list != NULL, "Sanity");
assert(_list != nullptr, "Sanity");
_list->release();
}
@ -273,7 +273,7 @@ void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
if (data != NULL) {
if (data != nullptr) {
assert(contain(nm), "Must have been registered");
assert(nm == data->nm(), "Must be same nmethod");
// Prevent updating a nmethod while concurrent iteration is in progress.
@ -284,7 +284,7 @@ void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
// For a new nmethod, we can safely append it to the list, because
// concurrent iteration will not touch it.
data = ShenandoahNMethod::for_nmethod(nm);
assert(data != NULL, "Sanity");
assert(data != nullptr, "Sanity");
ShenandoahNMethod::attach_gc_data(nm, data);
ShenandoahLocker locker(&_lock);
log_register_nmethod(nm);
@ -298,14 +298,14 @@ void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
assert(data != NULL, "Sanity");
assert(data != nullptr, "Sanity");
log_unregister_nmethod(nm);
ShenandoahLocker locker(&_lock);
assert(contain(nm), "Must have been registered");
int idx = index_of(nm);
assert(idx >= 0 && idx < _index, "Invalid index");
ShenandoahNMethod::attach_gc_data(nm, NULL);
ShenandoahNMethod::attach_gc_data(nm, nullptr);
remove(idx);
}
@ -376,7 +376,7 @@ ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration()
void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
assert(iteration_in_progress(), "Why we here?");
assert(snapshot != NULL, "No snapshot");
assert(snapshot != nullptr, "No snapshot");
_itr_cnt--;
delete snapshot;
@ -429,7 +429,7 @@ ShenandoahNMethodList::ShenandoahNMethodList(int size) :
}
ShenandoahNMethodList::~ShenandoahNMethodList() {
assert(_list != NULL, "Sanity");
assert(_list != nullptr, "Sanity");
assert(_ref_count == 0, "Must be");
FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
}
@ -478,7 +478,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
for (size_t idx = start; idx < end; idx++) {
ShenandoahNMethod* nmr = list[idx];
assert(nmr != NULL, "Sanity");
assert(nmr != nullptr, "Sanity");
if (nmr->is_unregistered()) {
continue;
}
@ -502,7 +502,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
for (size_t idx = start; idx < end; idx++) {
ShenandoahNMethod* data = list[idx];
assert(data != NULL, "Should not be NULL");
assert(data != nullptr, "Should not be null");
if (!data->is_unregistered()) {
cl->do_nmethod(data->nm());
}
@ -511,7 +511,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
}
ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
_table(table), _table_snapshot(NULL) {
_table(table), _table_snapshot(nullptr) {
}
void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
@ -520,7 +520,7 @@ void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
}
void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
assert(_table_snapshot != NULL, "Must first call nmethod_do_begin()");
assert(_table_snapshot != nullptr, "Must first call nmethod_do_begin()");
_table_snapshot->concurrent_nmethods_do(cl);
}

View File

@ -80,9 +80,9 @@ void ShenandoahNMethod::heal_nmethod_metadata(ShenandoahNMethod* nmethod_data) {
void ShenandoahNMethod::disarm_nmethod(nmethod* nm) {
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
assert(bs != NULL || !ShenandoahNMethodBarrier,
assert(bs != nullptr || !ShenandoahNMethodBarrier,
"Must have nmethod barrier for concurrent GC");
if (bs != NULL && bs->is_armed(nm)) {
if (bs != nullptr && bs->is_armed(nm)) {
bs->disarm(nm);
}
}

View File

@ -30,14 +30,14 @@
HdrSeq::HdrSeq() {
_hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal);
for (int c = 0; c < MagBuckets; c++) {
_hdr[c] = NULL;
_hdr[c] = nullptr;
}
}
HdrSeq::~HdrSeq() {
for (int c = 0; c < MagBuckets; c++) {
int* sub = _hdr[c];
if (sub != NULL) {
if (sub != nullptr) {
FREE_C_HEAP_ARRAY(int, sub);
}
}
@ -93,7 +93,7 @@ void HdrSeq::add(double val) {
}
int* b = _hdr[bucket];
if (b == NULL) {
if (b == nullptr) {
b = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal);
for (int c = 0; c < ValBuckets; c++) {
b[c] = 0;
@ -108,7 +108,7 @@ double HdrSeq::percentile(double level) const {
int target = MAX2(1, (int) (level * num() / 100));
int cnt = 0;
for (int mag = 0; mag < MagBuckets; mag++) {
if (_hdr[mag] != NULL) {
if (_hdr[mag] != nullptr) {
for (int val = 0; val < ValBuckets; val++) {
cnt += _hdr[mag][val];
if (cnt >= target) {

View File

@ -56,7 +56,7 @@ ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) :
// Initialize everything to sane defaults
for (uint i = 0; i < _num_phases; i++) {
#define SHENANDOAH_WORKER_DATA_NULL(type, title) \
_worker_data[i] = NULL;
_worker_data[i] = nullptr;
SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_NULL)
#undef SHENANDOAH_WORKER_DATA_NULL
_cycle_data[i] = uninitialized();
@ -69,14 +69,14 @@ ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) :
if (is_worker_phase(Phase(i))) {
int c = 0;
#define SHENANDOAH_WORKER_DATA_INIT(type, title) \
if (c++ != 0) _worker_data[i + c] = new ShenandoahWorkerData(NULL, title, _max_workers);
if (c++ != 0) _worker_data[i + c] = new ShenandoahWorkerData(nullptr, title, _max_workers);
SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_INIT)
#undef SHENANDOAH_WORKER_DATA_INIT
}
}
_policy = ShenandoahHeap::heap()->shenandoah_policy();
assert(_policy != NULL, "Can not be NULL");
assert(_policy != nullptr, "Can not be null");
}
ShenandoahPhaseTimings::Phase ShenandoahPhaseTimings::worker_par_phase(Phase phase, ParPhase par_phase) {
@ -89,7 +89,7 @@ ShenandoahPhaseTimings::Phase ShenandoahPhaseTimings::worker_par_phase(Phase pha
ShenandoahWorkerData* ShenandoahPhaseTimings::worker_data(Phase phase, ParPhase par_phase) {
Phase p = worker_par_phase(phase, par_phase);
ShenandoahWorkerData* wd = _worker_data[p];
assert(wd != NULL, "Counter initialized: %s", phase_name(p));
assert(wd != nullptr, "Counter initialized: %s", phase_name(p));
return wd;
}
@ -219,7 +219,7 @@ void ShenandoahPhaseTimings::flush_cycle_to_global() {
_global_data[i].add(_cycle_data[i]);
_cycle_data[i] = uninitialized();
}
if (_worker_data[i] != NULL) {
if (_worker_data[i] != nullptr) {
_worker_data[i]->reset();
}
}
@ -243,7 +243,7 @@ void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const {
}
}
if (_worker_data[i] != NULL) {
if (_worker_data[i] != nullptr) {
out->print(", workers (us): ");
for (uint c = 0; c < _max_workers; c++) {
double tv = _worker_data[i]->get(c);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -53,7 +53,7 @@ static const char* reference_type_name(ReferenceType type) {
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
@ -71,7 +71,7 @@ void set_oop_field<narrowOop>(narrowOop* field, oop value) {
}
static oop lrb(oop obj) {
if (obj != NULL && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
} else {
return obj;
@ -120,7 +120,7 @@ void reference_set_discovered<narrowOop>(oop reference, oop discovered) {
template<typename T>
static bool reference_cas_discovered(oop reference, oop discovered) {
T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference));
return ShenandoahHeap::atomic_update_oop_check(discovered, addr, NULL);
return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr);
}
template <typename T>
@ -144,15 +144,15 @@ static void soft_reference_update_clock() {
}
ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() :
_discovered_list(NULL),
_discovered_list(nullptr),
_encountered_count(),
_discovered_count(),
_enqueued_count() {
}
void ShenandoahRefProcThreadLocal::reset() {
_discovered_list = NULL;
_mark_closure = NULL;
_discovered_list = nullptr;
_mark_closure = nullptr;
for (uint i = 0; i < reference_type_count; i++) {
_encountered_count[i] = 0;
_discovered_count[i] = 0;
@ -186,9 +186,9 @@ void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) {
}
ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) :
_soft_reference_policy(NULL),
_soft_reference_policy(nullptr),
_ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)),
_pending_list(NULL),
_pending_list(nullptr),
_pending_list_tail(&_pending_list),
_iterate_discovered_list_id(0U),
_stats() {
@ -227,11 +227,11 @@ bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, Refe
if (type == REF_FINAL) {
// A FinalReference is inactive if its next field is non-null. An application can't
// call enqueue() or clear() on a FinalReference.
return reference_next<T>(reference) != NULL;
return reference_next<T>(reference) != nullptr;
} else {
// A non-FinalReference is inactive if the referent is null. The referent can only
// be null if the application called Reference.enqueue() or Reference.clear().
return referent == NULL;
return referent == nullptr;
}
}
@ -248,7 +248,7 @@ bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType t
// Ask SoftReference policy
const jlong clock = java_lang_ref_SoftReference::clock();
assert(clock != 0, "Clock not initialized");
assert(_soft_reference_policy != NULL, "Policy not initialized");
assert(_soft_reference_policy != nullptr, "Policy not initialized");
return !_soft_reference_policy->should_clear_reference(reference, clock);
}
@ -279,7 +279,7 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType
template <typename T>
bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
const oop referent = reference_referent<T>(reference);
if (referent == NULL) {
if (referent == nullptr) {
// Reference has been cleared, by a call to Reference.enqueue()
// or Reference.clear() from the application, which means we
// should drop the reference.
@ -302,7 +302,7 @@ void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType ty
// to finalize(). A FinalReference is instead made inactive by self-looping the
// next field. An application can't call FinalReference.enqueue(), so there is
// no race to worry about when setting the next field.
assert(reference_next<T>(reference) == NULL, "Already inactive");
assert(reference_next<T>(reference) == nullptr, "Already inactive");
assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents");
reference_set_next(reference, reference);
} else {
@ -318,7 +318,7 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u
return false;
}
if (reference_discovered<T>(reference) != NULL) {
if (reference_discovered<T>(reference) != nullptr) {
// Already discovered. This can happen if the reference is marked finalizable first, and then strong,
// in which case it will be seen 2x by marking.
log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
@ -340,9 +340,9 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u
// Add reference to discovered list
ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
oop discovered_head = refproc_data.discovered_list_head<T>();
if (discovered_head == NULL) {
if (discovered_head == nullptr) {
// Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
// discovered field: if it is NULL, then it is not-yet discovered, otherwise it is discovered
// discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
discovered_head = reference;
}
if (reference_cas_discovered<T>(reference, discovered_head)) {
@ -377,13 +377,13 @@ oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
#ifdef ASSERT
oop referent = reference_referent<T>(reference);
assert(referent == NULL || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
assert(referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
"only drop references with alive referents");
#endif
// Unlink and return next in list
oop next = reference_discovered<T>(reference);
reference_set_discovered<T>(reference, NULL);
reference_set_discovered<T>(reference, nullptr);
return next;
}
@ -414,7 +414,7 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
T* p = list;
while (true) {
const oop reference = lrb(CompressedOops::decode(*p));
if (reference == NULL) {
if (reference == nullptr) {
break;
}
log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
@ -428,8 +428,8 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
const oop discovered = lrb(reference_discovered<T>(reference));
if (reference == discovered) {
// Reset terminating self-loop to NULL
reference_set_discovered<T>(reference, oop(NULL));
// Reset terminating self-loop to null
reference_set_discovered<T>(reference, oop(nullptr));
break;
}
}
@ -440,13 +440,13 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
oop prev = Atomic::xchg(&_pending_list, head);
RawAccess<>::oop_store(p, prev);
if (prev == NULL) {
if (prev == nullptr) {
// First to prepend to list, record tail
_pending_list_tail = reinterpret_cast<void*>(p);
}
// Clear discovered list
set_oop_field(list, oop(NULL));
set_oop_field(list, oop(nullptr));
}
}
@ -519,7 +519,7 @@ void ShenandoahReferenceProcessor::enqueue_references_locked() {
}
void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
if (_pending_list == NULL) {
if (_pending_list == nullptr) {
// Nothing to enqueue
return;
}
@ -538,7 +538,7 @@ void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
}
// Reset internal pending list
_pending_list = NULL;
_pending_list = nullptr;
_pending_list_tail = &_pending_list;
}
@ -547,7 +547,7 @@ void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
T discovered = *list;
while (!CompressedOops::is_null(discovered)) {
oop discovered_ref = CompressedOops::decode_not_null(discovered);
set_oop_field<T>(list, oop(NULL));
set_oop_field<T>(list, oop(nullptr));
list = reference_discovered_addr<T>(discovered_ref);
discovered = *list;
}
@ -562,9 +562,9 @@ void ShenandoahReferenceProcessor::abandon_partial_discovery() {
clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
}
}
if (_pending_list != NULL) {
if (_pending_list != nullptr) {
oop pending = _pending_list;
_pending_list = NULL;
_pending_list = nullptr;
if (UseCompressedOops) {
narrowOop* list = reference_discovered_addr<narrowOop>(pending);
clean_discovered_list<narrowOop>(list);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -72,7 +72,7 @@ typedef size_t Counters[reference_type_count];
* be processed (e.g. enqueued in its ReferenceQueue) by the Java ReferenceHandler thread.
*
* In order to prevent resurrection by Java threads calling Reference.get() concurrently while we are clearing
* referents, we employ a special barrier, the native LRB, which returns NULL when the referent is unreachable.
* referents, we employ a special barrier, the native LRB, which returns nullptr when the referent is unreachable.
*/
class ShenandoahRefProcThreadLocal : public CHeapObj<mtGC> {

View File

@ -128,7 +128,7 @@ ShenandoahConcurrentRootScanner::ShenandoahConcurrentRootScanner(uint n_workers,
_java_threads(phase, n_workers),
_vm_roots(phase),
_cld_roots(phase, n_workers, false /*heap iteration*/),
_codecache_snapshot(NULL),
_codecache_snapshot(nullptr),
_phase(phase) {
if (!ShenandoahHeap::heap()->unload_classes()) {
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -218,7 +218,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
// Process heavy-weight/fully parallel roots the last
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
_thread_roots.oops_do(oops, NULL, worker_id);
_thread_roots.oops_do(oops, nullptr, worker_id);
}
ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner(uint n_workers) :
@ -258,7 +258,7 @@ void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
// Must use _claim_other to avoid interfering with concurrent CLDG iteration
CLDToOopClosure clds(oops, ClassLoaderData::_claim_other);
ShenandoahMarkCodeBlobClosure code(oops);
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, nullptr);
ResourceMark rm;

View File

@ -134,7 +134,7 @@ public:
_f(f), _cf(cf), _thread_cl(thread_cl) {}
void do_thread(Thread* t) {
if (_thread_cl != NULL) {
if (_thread_cl != nullptr) {
_thread_cl->do_thread(t);
}
t->oops_do(_f, _cf);
@ -160,7 +160,7 @@ void ShenandoahSTWRootScanner::roots_do(T* oops, uint worker_id) {
_thread_roots.oops_do(oops, &blobs_cl, worker_id);
_cld_roots.always_strong_cld_do(&clds, worker_id);
} else {
_thread_roots.oops_do(oops, NULL, worker_id);
_thread_roots.oops_do(oops, nullptr, worker_id);
_code_roots.code_blobs_do(&blobs_cl, worker_id);
_cld_roots.cld_do(&clds, worker_id);
}
@ -185,7 +185,7 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
// Process heavy-weight/fully parallel roots the last
_code_roots.code_blobs_do(codes_cl, worker_id);
_thread_roots.oops_do(keep_alive, NULL, worker_id);
_thread_roots.oops_do(keep_alive, nullptr, worker_id);
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP

View File

@ -70,7 +70,7 @@ void ShenandoahRootVerifier::roots_do(OopClosure* oops) {
// Do thread roots the last. This allows verification code to find
// any broken objects from those special roots first, not the accidental
// dangling reference from the thread root.
Threads::possibly_parallel_oops_do(true, oops, NULL);
Threads::possibly_parallel_oops_do(true, oops, nullptr);
}
void ShenandoahRootVerifier::strong_roots_do(OopClosure* oops) {

View File

@ -44,8 +44,8 @@ void ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry(narrowOop* src, narro
// Shenandoah pre write barrier slowpath
JRT_LEAF(void, ShenandoahRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread))
assert(thread == JavaThread::current(), "pre-condition");
assert(orig != NULL, "should be optimized out");
shenandoah_assert_correct(NULL, orig);
assert(orig != nullptr, "should be optimized out");
shenandoah_assert_correct(nullptr, orig);
// store the original value that was in the field reference
assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "Shouldn't be here otherwise");
SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread);
@ -64,7 +64,7 @@ JRT_END
// in cloned objects.
JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* src))
oop s = oop(src);
shenandoah_assert_correct(NULL, s);
shenandoah_assert_correct(nullptr, s);
ShenandoahBarrierSet::barrier_set()->clone_barrier(s);
JRT_END

View File

@ -95,7 +95,7 @@ typedef struct ShenandoahSharedFlag {
private:
volatile ShenandoahSharedValue* operator&() {
fatal("Use addr_of() instead");
return NULL;
return nullptr;
}
bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
@ -188,7 +188,7 @@ typedef struct ShenandoahSharedBitmap {
private:
volatile ShenandoahSharedValue* operator&() {
fatal("Use addr_of() instead");
return NULL;
return nullptr;
}
bool operator==(ShenandoahSharedFlag& other) { fatal("Use is_set() instead"); return false; }
@ -233,7 +233,7 @@ struct ShenandoahSharedEnumFlag {
private:
volatile T* operator&() {
fatal("Use addr_of() instead");
return NULL;
return nullptr;
}
bool operator==(ShenandoahSharedEnumFlag& other) { fatal("Use get() instead"); return false; }

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021, Red Hat, Inc. All rights reserved.
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ ShenandoahOnStackCodeBlobClosure::ShenandoahOnStackCodeBlobClosure() :
void ShenandoahOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (nm != nullptr) {
const bool result = _bs_nm->nmethod_entry_barrier(nm);
assert(result, "NMethod on-stack must be alive");
}
@ -68,7 +68,7 @@ ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
_cb_cl() {}
OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
if (context != NULL) {
if (context != nullptr) {
assert(_heap->is_concurrent_weak_root_in_progress() ||
_heap->is_concurrent_mark_in_progress(),
"Only these two phases");
@ -82,7 +82,7 @@ OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
return &_evac_update_oop_cl;
} else {
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
}
@ -130,7 +130,7 @@ void ShenandoahStackWatermark::retire_tlab() {
void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
OopClosure* oops = closure_from_context(context);
assert(oops != NULL, "Should not get to here");
assert(oops != nullptr, "Should not get to here");
ShenandoahHeap* const heap = ShenandoahHeap::heap();
assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) ||
heap->is_concurrent_mark_in_progress(),

View File

@ -34,7 +34,7 @@ void ShenandoahObjToScanQueueSet::clear() {
uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
for (uint index = 0; index < size; index ++) {
ShenandoahObjToScanQueue* q = queue(index);
assert(q != NULL, "Sanity");
assert(q != nullptr, "Sanity");
q->clear();
}
}
@ -43,7 +43,7 @@ bool ShenandoahObjToScanQueueSet::is_empty() {
uint size = GenericTaskQueueSet<ShenandoahObjToScanQueue, mtGC>::size();
for (uint index = 0; index < size; index ++) {
ShenandoahObjToScanQueue* q = queue(index);
assert(q != NULL, "Sanity");
assert(q != nullptr, "Sanity");
if (!q->is_empty()) {
return false;
}

View File

@ -204,7 +204,7 @@ private:
}
public:
ShenandoahMarkTask(oop o = NULL, bool skip_live = false, bool weak = false) {
ShenandoahMarkTask(oop o = nullptr, bool skip_live = false, bool weak = false) {
uintptr_t enc = encode_oop(o, skip_live, weak);
assert(decode_oop(enc) == o, "oop encoding should work: " PTR_FORMAT, p2i(o));
assert(decode_cnt_live(enc) == !skip_live, "skip_live encoding should work");
@ -265,7 +265,7 @@ private:
int _pow;
public:
ShenandoahMarkTask(oop o = NULL, bool skip_live = false, bool weak = false, int chunk = 0, int pow = 0):
ShenandoahMarkTask(oop o = nullptr, bool skip_live = false, bool weak = false, int chunk = 0, int pow = 0):
_obj(o), _skip_live(skip_live), _weak(weak), _chunk(chunk), _pow(pow) {
assert(0 <= chunk && chunk <= chunk_max, "chunk is in range: %d", chunk);
assert(0 <= pow && pow <= pow_max, "pow is in range: %d", pow);
@ -334,7 +334,7 @@ T* ParallelClaimableQueueSet<T, F>::claim_next() {
jint size = (jint)GenericTaskQueueSet<T, F>::size();
if (_claimed_index >= size) {
return NULL;
return nullptr;
}
jint index = Atomic::add(&_claimed_index, 1, memory_order_relaxed);
@ -342,7 +342,7 @@ T* ParallelClaimableQueueSet<T, F>::claim_next() {
if (index <= size) {
return GenericTaskQueueSet<T, F>::queue((uint)index - 1);
} else {
return NULL;
return nullptr;
}
}

View File

@ -51,13 +51,13 @@ private:
_oom_scope_nesting_level(0),
_oom_during_evac(false),
_satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()),
_gclab(NULL),
_gclab(nullptr),
_gclab_size(0),
_paced_time(0) {
}
~ShenandoahThreadLocalData() {
if (_gclab != NULL) {
if (_gclab != nullptr) {
delete _gclab;
}
}
@ -94,7 +94,7 @@ public:
static void initialize_gclab(Thread* thread) {
assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs");
assert(data(thread)->_gclab == NULL, "Only initialize once");
assert(data(thread)->_gclab == nullptr, "Only initialize once");
data(thread)->_gclab = new PLAB(PLAB::min_size());
data(thread)->_gclab_size = 0;
}

View File

@ -92,7 +92,7 @@ public:
virtual bool lock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
assert(lock != nullptr, "Not yet registered?");
lock->lock();
return true;
}
@ -100,7 +100,7 @@ public:
virtual void unlock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
assert(lock != nullptr, "Not yet registered?");
lock->unlock();
}
@ -111,7 +111,7 @@ public:
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
assert(lock != nullptr, "Not yet registered?");
return lock->owned_by_self();
}
};

View File

@ -89,7 +89,7 @@ ShenandoahGCPauseMark::ShenandoahGCPauseMark(uint gc_id, SvcGCMarker::reason_typ
ShenandoahPausePhase::ShenandoahPausePhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage) :
ShenandoahTimingsTracker(phase),
_tracer(title, NULL, GCCause::_no_gc, log_heap_usage),
_tracer(title, nullptr, GCCause::_no_gc, log_heap_usage),
_timer(ShenandoahHeap::heap()->gc_timer()) {
_timer->register_gc_pause_start(title);
}
@ -100,7 +100,7 @@ ShenandoahPausePhase::~ShenandoahPausePhase() {
ShenandoahConcurrentPhase::ShenandoahConcurrentPhase(const char* title, ShenandoahPhaseTimings::Phase phase, bool log_heap_usage) :
ShenandoahTimingsTracker(phase),
_tracer(title, NULL, GCCause::_no_gc, log_heap_usage),
_tracer(title, nullptr, GCCause::_no_gc, log_heap_usage),
_timer(ShenandoahHeap::heap()->gc_timer()) {
_timer->register_gc_concurrent_start(title);
}

View File

@ -159,7 +159,7 @@ public:
// Otherwise check we are at proper operation type
VM_Operation* vm_op = VMThread::vm_operation();
if (vm_op == NULL) return false;
if (vm_op == nullptr) return false;
VM_Operation::VMOp_Type type = vm_op->type();
return type == VM_Operation::VMOp_ShenandoahInitMark ||

View File

@ -78,8 +78,8 @@ public:
_heap(ShenandoahHeap::heap()),
_map(map),
_ld(ld),
_interior_loc(NULL),
_loc(NULL) {
_interior_loc(nullptr),
_loc(nullptr) {
if (options._verify_marked == ShenandoahVerifier::_verify_marked_complete_except_references ||
options._verify_marked == ShenandoahVerifier::_verify_marked_disable) {
set_ref_discoverer_internal(new ShenandoahIgnoreReferenceDiscoverer());
@ -130,8 +130,8 @@ private:
// Verify that obj is not in dead space:
{
// Do this before touching obj->size()
check(ShenandoahAsserts::_safe_unknown, obj, obj_klass != NULL,
"Object klass pointer should not be NULL");
check(ShenandoahAsserts::_safe_unknown, obj, obj_klass != nullptr,
"Object klass pointer should not be null");
check(ShenandoahAsserts::_safe_unknown, obj, Metaspace::contains(obj_klass),
"Object klass pointer must go to metaspace");
@ -174,7 +174,7 @@ private:
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
ShenandoahHeapRegion* fwd_reg = NULL;
ShenandoahHeapRegion* fwd_reg = nullptr;
if (obj != fwd) {
check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd),
@ -186,8 +186,8 @@ private:
// Do this before touching fwd->size()
Klass* fwd_klass = fwd->klass_or_null();
check(ShenandoahAsserts::_safe_oop, obj, fwd_klass != NULL,
"Forwardee klass pointer should not be NULL");
check(ShenandoahAsserts::_safe_oop, obj, fwd_klass != nullptr,
"Forwardee klass pointer should not be null");
check(ShenandoahAsserts::_safe_oop, obj, Metaspace::contains(fwd_klass),
"Forwardee klass pointer must go to metaspace");
check(ShenandoahAsserts::_safe_oop, obj, obj_klass == fwd_klass,
@ -284,7 +284,7 @@ public:
void verify_oop_at(T* p, oop obj) {
_interior_loc = p;
verify_oop(obj);
_interior_loc = NULL;
_interior_loc = nullptr;
}
/**
@ -294,9 +294,9 @@ public:
* @param obj verified object
*/
void verify_oop_standalone(oop obj) {
_interior_loc = NULL;
_interior_loc = nullptr;
verify_oop(obj);
_interior_loc = NULL;
_interior_loc = nullptr;
}
/**
@ -306,7 +306,7 @@ public:
void verify_oops_from(oop obj) {
_loc = obj;
obj->oop_iterate(this);
_loc = NULL;
_loc = nullptr;
}
virtual void do_oop(oop* p) { do_oop_work(p); }
@ -920,7 +920,7 @@ private:
oop obj = CompressedOops::decode_not_null(o);
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
if (obj != fwd) {
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL,
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr,
"Verify Roots", "Should not be forwarded", __FILE__, __LINE__);
}
}
@ -941,18 +941,18 @@ private:
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (!heap->marking_context()->is_marked(obj)) {
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL,
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr,
"Verify Roots In To-Space", "Should be marked", __FILE__, __LINE__);
}
if (heap->in_collection_set(obj)) {
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL,
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr,
"Verify Roots In To-Space", "Should not be in collection set", __FILE__, __LINE__);
}
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
if (obj != fwd) {
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL,
ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, nullptr,
"Verify Roots In To-Space", "Should not be forwarded", __FILE__, __LINE__);
}
}

View File

@ -39,7 +39,7 @@ class ShenandoahHeap;
class ShenandoahVerifierTask {
public:
ShenandoahVerifierTask(oop o = NULL, int idx = 0): _obj(o) { }
ShenandoahVerifierTask(oop o = nullptr, int idx = 0): _obj(o) { }
ShenandoahVerifierTask(oop o, size_t idx): _obj(o) { }
// Trivially copyable.

View File

@ -34,7 +34,7 @@
ShenandoahWorkerScope::ShenandoahWorkerScope(WorkerThreads* workers, uint nworkers, const char* msg, bool check) :
_workers(workers) {
assert(msg != NULL, "Missing message");
assert(msg != nullptr, "Missing message");
_n_workers = _workers->set_active_workers(nworkers);
assert(_n_workers <= nworkers, "Must be");