8300651: Replace NULL with nullptr in share/runtime/

Reviewed-by: rehn, dholmes
This commit is contained in:
Johan Sjölen 2023-01-25 10:30:02 +00:00
parent 3c61d5aa48
commit 71107f4648
112 changed files with 2058 additions and 2058 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -309,14 +309,14 @@ void Abstract_VM_Version::insert_features_names(char* buf, size_t buflen, const
bool Abstract_VM_Version::print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]) {
char line[500];
FILE* fp = os::fopen(filename, "r");
if (fp == NULL) {
if (fp == nullptr) {
return false;
}
st->print_cr("Virtualization information:");
while (fgets(line, sizeof(line), fp) != NULL) {
while (fgets(line, sizeof(line), fp) != nullptr) {
int i = 0;
while (keywords_to_match[i] != NULL) {
while (keywords_to_match[i] != nullptr) {
if (strncmp(line, keywords_to_match[i], strlen(keywords_to_match[i])) == 0) {
st->print("%s", line);
break;
@ -354,8 +354,8 @@ int Abstract_VM_Version::number_of_sockets(void) {
const char* Abstract_VM_Version::cpu_name(void) {
assert(_initialized, "should be initialized");
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
if (NULL == tmp) {
return NULL;
if (nullptr == tmp) {
return nullptr;
}
strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE);
return tmp;
@ -364,8 +364,8 @@ const char* Abstract_VM_Version::cpu_name(void) {
const char* Abstract_VM_Version::cpu_description(void) {
assert(_initialized, "should be initialized");
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
if (NULL == tmp) {
return NULL;
if (nullptr == tmp) {
return nullptr;
}
strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE);
return tmp;

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ struct SpecialFlag {
};
struct LegacyGCLogging {
const char* file; // NULL -> stdout
const char* file; // null -> stdout
int lastFlag; // 0 not set; 1 -> -verbose:gc; 2 -> -Xloggc
};
@ -113,7 +113,7 @@ class SystemProperty : public PathString {
bool readable() const {
return !_internal || (strcmp(_key, "jdk.boot.class.path.append") == 0 &&
value() != NULL);
value() != nullptr);
}
// A system property should only have its value set
@ -143,7 +143,7 @@ class SystemProperty : public PathString {
class AgentLibrary : public CHeapObj<mtArguments> {
friend class AgentLibraryList;
public:
// Is this library valid or not. Don't rely on os_lib == NULL as statically
// Is this library valid or not. Don't rely on os_lib == nullptr as statically
// linked lib could have handle of RTLD_DEFAULT which == 0 on some platforms
enum AgentState {
agent_invalid = 0,
@ -185,7 +185,7 @@ class AgentLibraryList {
AgentLibrary* _first;
AgentLibrary* _last;
public:
bool is_empty() const { return _first == NULL; }
bool is_empty() const { return _first == nullptr; }
AgentLibrary* first() const { return _first; }
// add to the end of the list
@ -196,23 +196,23 @@ class AgentLibraryList {
_last->_next = lib;
_last = lib;
}
lib->_next = NULL;
lib->_next = nullptr;
}
// search for and remove a library known to be in the list
void remove(AgentLibrary* lib) {
AgentLibrary* curr;
AgentLibrary* prev = NULL;
for (curr = first(); curr != NULL; prev = curr, curr = curr->next()) {
AgentLibrary* prev = nullptr;
for (curr = first(); curr != nullptr; prev = curr, curr = curr->next()) {
if (curr == lib) {
break;
}
}
assert(curr != NULL, "always should be found");
assert(curr != nullptr, "always should be found");
if (curr != NULL) {
if (curr != nullptr) {
// it was found, by-pass this library
if (prev == NULL) {
if (prev == nullptr) {
_first = curr->_next;
} else {
prev->_next = curr->_next;
@ -220,13 +220,13 @@ class AgentLibraryList {
if (curr == _last) {
_last = prev;
}
curr->_next = NULL;
curr->_next = nullptr;
}
}
AgentLibraryList() {
_first = NULL;
_last = NULL;
_first = nullptr;
_last = nullptr;
}
};
@ -436,7 +436,7 @@ class Arguments : AllStatic {
static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type);
static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
return is_bad_option(option, ignore, NULL);
return is_bad_option(option, ignore, nullptr);
}
static void describe_range_error(ArgsRange errcode);
@ -467,7 +467,7 @@ class Arguments : AllStatic {
static JVMFlag* find_jvm_flag(const char* name, size_t name_length);
// Return the "real" name for option arg if arg is an alias, and print a warning if arg is deprecated.
// Return NULL if the arg has expired.
// Return nullptr if the arg has expired.
static const char* handle_aliases_and_deprecation(const char* arg);
static char* SharedArchivePath;
@ -520,7 +520,7 @@ class Arguments : AllStatic {
// convenient methods to get and set jvm_flags_file
static const char* get_jvm_flags_file() { return _jvm_flags_file; }
static void set_jvm_flags_file(const char *value) {
if (_jvm_flags_file != NULL) {
if (_jvm_flags_file != nullptr) {
os::free(_jvm_flags_file);
}
_jvm_flags_file = os::strdup_check_oom(value);
@ -603,7 +603,7 @@ class Arguments : AllStatic {
static void add_patch_mod_prefix(const char *module_name, const char *path, bool* patch_mod_javabase);
static void set_boot_class_path(const char *value, bool has_jimage) {
// During start up, set by os::set_boot_path()
assert(get_boot_class_path() == NULL, "Boot class path previously set");
assert(get_boot_class_path() == nullptr, "Boot class path previously set");
_boot_class_path->set_value(value);
_has_jimage = has_jimage;
}
@ -621,7 +621,7 @@ class Arguments : AllStatic {
static char* get_appclasspath() { return _java_class_path->value(); }
static void fix_appclasspath();
static char* get_default_shared_archive_path() NOT_CDS_RETURN_(NULL);
static char* get_default_shared_archive_path() NOT_CDS_RETURN_(nullptr);
static void init_shared_archive_paths() NOT_CDS_RETURN;
// Operation modi
@ -676,15 +676,15 @@ do { \
} \
} while(0)
// similar to UNSUPPORTED_OPTION but sets flag to NULL
#define UNSUPPORTED_OPTION_NULL(opt) \
do { \
if (opt) { \
if (FLAG_IS_CMDLINE(opt)) { \
// similar to UNSUPPORTED_OPTION but sets flag to nullptr
#define UNSUPPORTED_OPTION_NULL(opt) \
do { \
if (opt) { \
if (FLAG_IS_CMDLINE(opt)) { \
warning("-XX flag " #opt " not supported in this VM"); \
} \
FLAG_SET_DEFAULT(opt, NULL); \
} \
} \
FLAG_SET_DEFAULT(opt, nullptr); \
} \
} while(0)
// Initialize options not supported in this release, with a warning

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -151,7 +151,7 @@ public:
T exchange_value,
atomic_memory_order order = memory_order_conservative);
// Performs atomic compare of *dest and NULL, and replaces *dest
// Performs atomic compare of *dest and nullptr, and replaces *dest
// with exchange_value if the comparison succeeded. Returns true if
// the comparison succeeded and the exchange occurred. This is
// often used as part of lazy initialization, as a lock-free
@ -754,7 +754,7 @@ inline bool Atomic::replace_if_null(D* volatile* dest, T* value,
// Presently using a trivial implementation in terms of cmpxchg.
// Consider adding platform support, to permit the use of compiler
// intrinsics like gcc's __sync_bool_compare_and_swap.
D* expected_null = NULL;
D* expected_null = nullptr;
return expected_null == cmpxchg(dest, expected_null, value, order);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@ void BasicLock::print_on(outputStream* st, oop owner) const {
markWord mark_word = displaced_header();
if (mark_word.value() != 0) {
// Print monitor info if there's an owning oop and it refers to this BasicLock.
bool print_monitor_info = (owner != NULL) && (owner->mark() == markWord::from_pointer((void*)this));
bool print_monitor_info = (owner != nullptr) && (owner->mark() == markWord::from_pointer((void*)this));
mark_word.print_on(st, print_monitor_info);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1906,7 +1906,7 @@ NOINLINE intptr_t* Thaw<ConfigT>::thaw_fast(stackChunkOop chunk) {
}
// Are we thawing the last frame(s) in the continuation
const bool is_last = empty && chunk->parent() == NULL;
const bool is_last = empty && chunk->parent() == nullptr;
assert(!is_last || argsize == 0, "");
log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT,
@ -2606,7 +2606,7 @@ private:
template <bool use_compressed>
static void resolve_gc() {
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs != NULL, "freeze/thaw invoked before BarrierSet is set");
assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set");
switch (bs->kind()) {
#define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \
case BarrierSet::bs_name: { \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -208,7 +208,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet
frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
bool& deoptimized_objects) {
bool realloc_failures = false;
assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
JavaThread* deoptee_thread = chunk->at(0)->thread();
assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
@ -239,7 +239,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet
tty->cr();
}
}
if (objects != NULL) {
if (objects != nullptr) {
if (exec_mode == Deoptimization::Unpack_none) {
assert(thread->thread_state() == _thread_in_vm, "assumption");
JavaThread* THREAD = thread; // For exception macros.
@ -252,7 +252,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet
realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
JRT_END
}
bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
if (TraceDeoptimization) {
print_objects(deoptee_thread, objects, realloc_failures);
@ -276,7 +276,7 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledV
#endif // !PRODUCT
for (int i = 0; i < chunk->length(); i++) {
compiledVFrame* cvf = chunk->at(i);
assert (cvf->scope() != NULL,"expect only compiled java frames");
assert (cvf->scope() != nullptr,"expect only compiled java frames");
GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
if (monitors->is_nonempty()) {
bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
@ -295,7 +295,7 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledV
}
if (exec_mode == Deoptimization::Unpack_none) {
ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor();
if (monitor != NULL && monitor->object() == mi->owner()) {
if (monitor != nullptr && monitor->object() == mi->owner()) {
st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner()));
continue;
}
@ -358,7 +358,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// Allocate our special deoptimization ResourceMark
DeoptResourceMark* dmark = new DeoptResourceMark(current);
assert(current->deopt_mark() == NULL, "Pending deopt!");
assert(current->deopt_mark() == nullptr, "Pending deopt!");
current->set_deopt_mark(dmark);
frame stub_frame = current->last_frame(); // Makes stack walkable as side effect
@ -373,7 +373,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// Now get the deoptee with a valid map
frame deoptee = stub_frame.sender(&map);
// Set the deoptee nmethod
assert(current->deopt_compiled_method() == NULL, "Pending deopt!");
assert(current->deopt_compiled_method() == nullptr, "Pending deopt!");
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
current->set_deopt_compiled_method(cm);
@ -432,10 +432,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
#endif // !PRODUCT
GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
guarantee(expressions != nullptr && expressions->length() > 0, "must have exception to throw");
ScopeValue* topOfStack = expressions->top();
exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
guarantee(exceptionObject() != NULL, "exception oop can not be null");
guarantee(exceptionObject() != nullptr, "exception oop can not be null");
}
vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
@ -447,7 +447,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
}
#endif
assert(current->vframe_array_head() == NULL, "Pending deopt!");
assert(current->vframe_array_head() == nullptr, "Pending deopt!");
current->set_vframe_array_head(array);
// Now that the vframeArray has been created if we have any deferred local writes
@ -465,7 +465,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
// If the deopt call site is a MethodHandle invoke call site we have
// to adjust the unpack_sp.
nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
if (deoptee_nm != nullptr && deoptee_nm->is_method_handle_return(deoptee.pc()))
unpack_sp = deoptee.unextended_sp();
#ifdef ASSERT
@ -604,10 +604,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
}
assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
#if INCLUDE_JVMCI
if (exceptionObject() != NULL) {
if (exceptionObject() != nullptr) {
current->set_exception_oop(exceptionObject());
exec_mode = Unpack_exception;
}
@ -655,18 +655,18 @@ void Deoptimization::cleanup_deopt_info(JavaThread *thread,
vframeArray *array) {
// Get array if coming from exception
if (array == NULL) {
if (array == nullptr) {
array = thread->vframe_array_head();
}
thread->set_vframe_array_head(NULL);
thread->set_vframe_array_head(nullptr);
// Free the previous UnrollBlock
vframeArray* old_array = thread->vframe_array_last();
thread->set_vframe_array_last(array);
if (old_array != NULL) {
if (old_array != nullptr) {
UnrollBlock* old_info = old_array->unroll_block();
old_array->set_unroll_block(NULL);
old_array->set_unroll_block(nullptr);
delete old_info;
delete old_array;
}
@ -675,8 +675,8 @@ void Deoptimization::cleanup_deopt_info(JavaThread *thread,
// inside the vframeArray (StackValueCollections)
delete thread->deopt_mark();
thread->set_deopt_mark(NULL);
thread->set_deopt_compiled_method(NULL);
thread->set_deopt_mark(nullptr);
thread->set_deopt_compiled_method(nullptr);
if (JvmtiExport::can_pop_frame()) {
@ -755,7 +755,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m
// clear it to make sure JFR understands not to try and walk stacks from events
// in here.
intptr_t* sp = thread->frame_anchor()->last_Java_sp();
thread->frame_anchor()->set_last_Java_sp(NULL);
thread->frame_anchor()->set_last_Java_sp(nullptr);
// Unpack the interpreter frames and any adapter frame (c2 only) we might create.
array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
@ -919,7 +919,7 @@ void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
ResourceMark rm;
// Make the dependent methods not entrant
if (nmethod_only != NULL) {
if (nmethod_only != nullptr) {
nmethod_only->mark_for_deoptimization();
nmethod_only->make_not_entrant();
CodeCache::make_nmethod_deoptimized(nmethod_only);
@ -946,7 +946,7 @@ protected:
ResourceMark rm(thread);
char* klass_name_str = klass_name->as_C_string();
InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle(), Handle());
guarantee(ik != NULL, "%s must be loaded", klass_name_str);
guarantee(ik != nullptr, "%s must be loaded", klass_name_str);
guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
CacheType::compute_offsets(ik);
return ik;
@ -972,7 +972,7 @@ protected:
}
public:
static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
if (_singleton == NULL) {
if (_singleton == nullptr) {
BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
if (!Atomic::replace_if_null(&_singleton, s)) {
delete s;
@ -985,7 +985,7 @@ public:
int offset = value - _low;
return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
}
return NULL;
return nullptr;
}
oop lookup_raw(intptr_t raw_value) {
// Have to cast to avoid little/big-endian problems.
@ -1004,11 +1004,11 @@ typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>
typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache;
typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache;
template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = NULL;
template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = NULL;
template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = NULL;
template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = NULL;
template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = NULL;
template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = nullptr;
template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = nullptr;
template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = nullptr;
template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = nullptr;
template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = nullptr;
class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> {
jobject _true_cache;
@ -1026,7 +1026,7 @@ protected:
}
public:
static BooleanBoxCache* singleton(Thread* thread) {
if (_singleton == NULL) {
if (_singleton == nullptr) {
BooleanBoxCache* s = new BooleanBoxCache(thread);
if (!Atomic::replace_if_null(&_singleton, s)) {
delete s;
@ -1047,7 +1047,7 @@ public:
}
};
BooleanBoxCache* BooleanBoxCache::_singleton = NULL;
BooleanBoxCache* BooleanBoxCache::_singleton = nullptr;
oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) {
Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
@ -1064,7 +1064,7 @@ oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMa
default:;
}
}
return NULL;
return nullptr;
}
#endif // INCLUDE_JVMCI
@ -1082,7 +1082,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
ObjectValue* sv = (ObjectValue*) objects->at(i);
Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
oop obj = NULL;
oop obj = nullptr;
if (k->is_instance_klass()) {
#if INCLUDE_JVMCI
@ -1090,7 +1090,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
obj = get_cached_box(abv, fr, reg_map, THREAD);
if (obj != NULL) {
if (obj != nullptr) {
// Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
abv->set_cached(true);
}
@ -1098,7 +1098,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
#endif // INCLUDE_JVMCI
InstanceKlass* ik = InstanceKlass::cast(k);
if (obj == NULL) {
if (obj == nullptr) {
#ifdef COMPILER2
if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
@ -1119,12 +1119,12 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
obj = ak->allocate(sv->field_size(), THREAD);
}
if (obj == NULL) {
if (obj == nullptr) {
failures = true;
}
assert(sv->value().is_null(), "redundant reallocation");
assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
assert(obj != nullptr || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
CLEAR_PENDING_EXCEPTION;
sv->set_value(obj);
}
@ -1326,7 +1326,7 @@ int compare(ReassignedField* left, ReassignedField* right) {
static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
InstanceKlass* ik = klass;
while (ik != NULL) {
while (ik != nullptr) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
ReassignedField field;
@ -1499,13 +1499,13 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInf
// With exec_mode == Unpack_none obj may be thread local and locked in
// a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
markWord dmw = mark.displaced_mark_helper();
mark.locker()->set_displaced_header(markWord::encode((BasicLock*) NULL));
mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
obj->set_mark(dmw);
}
if (mark.has_monitor()) {
// defer relocking if the deoptee thread is currently waiting for obj
ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
if (waiting_monitor != NULL && waiting_monitor->object() == obj()) {
if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
mon_info->lock()->set_displaced_header(markWord::unused_mark());
JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
@ -1585,10 +1585,10 @@ void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray*
// reallocations of synchronized objects) and be confused.
for (int i = 0; i < array->frames(); i++) {
MonitorChunk* monitors = array->element(i)->monitors();
if (monitors != NULL) {
if (monitors != nullptr) {
for (int j = 0; j < monitors->number_of_monitors(); j++) {
BasicObjectLock* src = monitors->at(j);
if (src->obj() != NULL) {
if (src->obj() != nullptr) {
ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
}
}
@ -1606,9 +1606,9 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt
gather_statistics(reason, Action_none, Bytecodes::_illegal);
if (LogCompilation && xtty != NULL) {
if (LogCompilation && xtty != nullptr) {
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
assert(cm != NULL, "only compiled methods can deopt");
assert(cm != nullptr, "only compiled methods can deopt");
ttyLocker ttyl;
xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
@ -1662,9 +1662,9 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod*
compiledVFrame* cvf = compiledVFrame::cast(vf);
ScopeDesc* imm_scope = cvf->scope();
MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true);
if (imm_mdo != NULL) {
ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), NULL);
if (pdata != NULL && pdata->is_BitData()) {
if (imm_mdo != nullptr) {
ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), nullptr);
if (pdata != nullptr && pdata->is_BitData()) {
BitData* bit_data = (BitData*) pdata;
bit_data->set_exception_seen();
}
@ -1673,7 +1673,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod*
Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true);
if (trap_mdo != NULL) {
if (trap_mdo != nullptr) {
trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
}
@ -1726,7 +1726,7 @@ Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m,
bool create_if_missing) {
JavaThread* THREAD = thread; // For exception macros.
MethodData* mdo = m()->method_data();
if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
if (mdo == nullptr && create_if_missing && !HAS_PENDING_EXCEPTION) {
// Build an MDO. Ignore errors like OutOfMemory;
// that simply means we won't have an MDO to update.
Method::build_profiling_method_data(m, THREAD);
@ -1813,8 +1813,8 @@ static void post_deoptimization_event(CompiledMethod* nm,
int instruction,
Deoptimization::DeoptReason reason,
Deoptimization::DeoptAction action) {
assert(nm != NULL, "invariant");
assert(method != NULL, "invariant");
assert(nm != nullptr, "invariant");
assert(method != nullptr, "invariant");
if (EventDeoptimization::is_enabled()) {
static bool serializers_registered = false;
if (!serializers_registered) {
@ -1970,7 +1970,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
ResourceMark rm;
ttyLocker ttyl;
char buf[100];
if (xtty != NULL) {
if (xtty != nullptr) {
xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
os::current_thread_id(),
format_trap_request(buf, sizeof(buf), trap_request));
@ -1981,22 +1981,22 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
#endif
nm->log_identity(xtty);
}
Symbol* class_name = NULL;
Symbol* class_name = nullptr;
bool unresolved = false;
if (unloaded_class_index >= 0) {
constantPoolHandle constants (current, trap_method->constants());
if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
class_name = constants->klass_name_at(unloaded_class_index);
unresolved = true;
if (xtty != NULL)
if (xtty != nullptr)
xtty->print(" unresolved='1'");
} else if (constants->tag_at(unloaded_class_index).is_symbol()) {
class_name = constants->symbol_at(unloaded_class_index);
}
if (xtty != NULL)
if (xtty != nullptr)
xtty->name(class_name);
}
if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
if (xtty != nullptr && trap_mdo != nullptr && (int)reason < (int)MethodData::_trap_hist_limit) {
// Dump the relevant MDO state.
// This is the deopt count for the current reason, any previous
// reasons or recompiles seen at this point.
@ -2004,7 +2004,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
if (dcnt != 0)
xtty->print(" count='%d'", dcnt);
ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
int dos = (pdata == NULL)? 0: pdata->trap_state();
int dos = (pdata == nullptr)? 0: pdata->trap_state();
if (dos != 0) {
xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
if (trap_state_is_recompiled(dos)) {
@ -2014,7 +2014,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
}
}
}
if (xtty != NULL) {
if (xtty != nullptr) {
xtty->stamp();
xtty->end_head();
}
@ -2027,7 +2027,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
#if INCLUDE_JVMCI
if (nm->is_nmethod()) {
const char* installed_code_name = nm->as_nmethod()->jvmci_name();
if (installed_code_name != NULL) {
if (installed_code_name != nullptr) {
st.print(" (JVMCI: installed code name=%s) ", installed_code_name);
}
}
@ -2042,14 +2042,14 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
, debug_id
#endif
);
if (class_name != NULL) {
if (class_name != nullptr) {
st.print(unresolved ? " unresolved class: " : " symbol: ");
class_name->print_symbol_on(&st);
}
st.cr();
tty->print_raw(st.freeze());
}
if (xtty != NULL) {
if (xtty != nullptr) {
// Log the precise location of the trap.
for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
xtty->begin_elem("jvms bci='%d'", sd->bci());
@ -2178,8 +2178,8 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
// to use the MDO to detect hot deoptimization points and control
// aggressive optimization.
bool inc_recompile_count = false;
ProfileData* pdata = NULL;
if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != NULL) {
ProfileData* pdata = nullptr;
if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != nullptr) {
assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity");
uint this_trap_count = 0;
bool maybe_prior_trap = false;
@ -2260,7 +2260,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
return; // the call did not change nmethod's state
}
if (pdata != NULL) {
if (pdata != nullptr) {
// Record the recompilation event, if any.
int tstate0 = pdata->trap_state();
int tstate1 = trap_state_set_recompiled(tstate0, true);
@ -2273,14 +2273,14 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
// is recompiled for a reason other than RTM state change.
// Assume that in new recompiled code the statistic could be different,
// for example, due to different inlining.
if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
if ((reason != Reason_rtm_state_change) && (trap_mdo != nullptr) &&
UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
trap_mdo->atomic_set_rtm_state(ProfileRTM);
}
#endif
// For code aging we count traps separately here, using make_not_entrant()
// as a guard against simultaneous deopts in multiple threads.
if (reason == Reason_tenured && trap_mdo != NULL) {
if (reason == Reason_tenured && trap_mdo != nullptr) {
trap_mdo->inc_tenure_traps();
}
}
@ -2350,7 +2350,7 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo,
maybe_prior_trap = (prior_trap_count != 0);
maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
}
ProfileData* pdata = NULL;
ProfileData* pdata = nullptr;
// For reasons which are recorded per bytecode, we check per-BCI data.
@ -2360,11 +2360,11 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo,
// Find the profile data for this BCI. If there isn't one,
// try to allocate one from the MDO's set of spares.
// This will let us detect a repeated trap at this point.
pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : nullptr);
if (pdata != NULL) {
if (pdata != nullptr) {
if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
if (LogCompilation && xtty != NULL) {
if (LogCompilation && xtty != nullptr) {
ttyLocker ttyl;
// no more room for speculative traps in this MDO
xtty->elem("speculative_traps_oom");
@ -2385,7 +2385,7 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo,
if (tstate1 != tstate0)
pdata->set_trap_state(tstate1);
} else {
if (LogCompilation && xtty != NULL) {
if (LogCompilation && xtty != nullptr) {
ttyLocker ttyl;
// Missing MDP? Leave a small complaint in the log.
xtty->elem("missing_mdp bci='%d'", trap_bci);
@ -2416,7 +2416,7 @@ Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int tr
#if INCLUDE_JVMCI
false,
#endif
NULL,
nullptr,
ignore_this_trap_count,
ignore_maybe_prior_trap,
ignore_maybe_prior_recompile);
@ -2640,14 +2640,14 @@ void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
_deoptimization_hist[Reason_none][0][0] += 1; // total
_deoptimization_hist[reason][0][0] += 1; // per-reason total
juint* cases = _deoptimization_hist[reason][1+action];
juint* bc_counter_addr = NULL;
juint* bc_counter_addr = nullptr;
juint bc_counter = 0;
// Look for an unused counter, or an exact match to this BC.
if (bc != Bytecodes::_illegal) {
for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
juint* counter_addr = &cases[bc_case];
juint counter = *counter_addr;
if ((counter == 0 && bc_counter_addr == NULL)
if ((counter == 0 && bc_counter_addr == nullptr)
|| (Bytecodes::Code)(counter & LSB_MASK) == bc) {
// this counter is either free or is already devoted to this BC
bc_counter_addr = counter_addr;
@ -2655,7 +2655,7 @@ void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
}
}
}
if (bc_counter_addr == NULL) {
if (bc_counter_addr == nullptr) {
// Overflow, or no given bytecode.
bc_counter_addr = &cases[BC_CASE_LIMIT-1];
bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB
@ -2672,14 +2672,14 @@ jint Deoptimization::total_deoptimization_count() {
// deoptimizations with the specific 'action' or 'reason' respectively.
// If both arguments are null, the method returns the total deopt count.
jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
if (reason_str == NULL && action_str == NULL) {
if (reason_str == nullptr && action_str == nullptr) {
return total_deoptimization_count();
}
juint counter = 0;
for (int reason = 0; reason < Reason_LIMIT; reason++) {
if (reason_str == NULL || !strcmp(reason_str, trap_reason_name(reason))) {
if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) {
for (int action = 0; action < Action_LIMIT; action++) {
if (action_str == NULL || !strcmp(action_str, trap_action_name(action))) {
if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) {
juint* cases = _deoptimization_hist[reason][1+action];
for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
counter += cases[bc_case] >> LSB_BITS;
@ -2696,7 +2696,7 @@ void Deoptimization::print_statistics() {
juint account = total;
if (total != 0) {
ttyLocker ttyl;
if (xtty != NULL) xtty->head("statistics type='deoptimization'");
if (xtty != nullptr) xtty->head("statistics type='deoptimization'");
tty->print_cr("Deoptimization traps recorded:");
#define PRINT_STAT_LINE(name, r) \
tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
@ -2728,7 +2728,7 @@ void Deoptimization::print_statistics() {
PRINT_STAT_LINE("unaccounted", account);
}
#undef PRINT_STAT_LINE
if (xtty != NULL) xtty->tail("statistics");
if (xtty != nullptr) xtty->tail("statistics");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -152,7 +152,7 @@ class Deoptimization : AllStatic {
// activations using those nmethods. If an nmethod is passed as an argument then it is
// marked_for_deoptimization and made not_entrant. Otherwise a scan of the code cache is done to
// find all marked nmethods and they are made not_entrant.
static void deoptimize_all_marked(nmethod* nmethod_only = NULL);
static void deoptimize_all_marked(nmethod* nmethod_only = nullptr);
public:
// Deoptimizes a frame lazily. Deopt happens on return to the frame.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -54,7 +54,7 @@ bool EscapeBarrier::objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) {
// first/oldest update holds the flag
GrowableArrayView<jvmtiDeferredLocalVariableSet*>* list = JvmtiDeferredUpdates::deferred_locals(thread);
bool result = false;
if (list != NULL) {
if (list != nullptr) {
for (int i = 0; i < list->length(); i++) {
if (list->at(i)->matches(fr_id)) {
result = list->at(i)->objects_are_deoptimized();
@ -88,12 +88,12 @@ bool EscapeBarrier::deoptimize_objects(int d1, int d2) {
int cur_depth = 0;
// Skip frames at depth < d1
while (vf != NULL && cur_depth < d1) {
while (vf != nullptr && cur_depth < d1) {
cur_depth++;
vf = vf->sender();
}
while (vf != NULL && ((cur_depth <= d2) || !vf->is_entry_frame())) {
while (vf != nullptr && ((cur_depth <= d2) || !vf->is_entry_frame())) {
if (vf->is_compiled_frame()) {
compiledVFrame* cvf = compiledVFrame::cast(vf);
// Deoptimize frame and local objects if any exist.
@ -125,7 +125,7 @@ bool EscapeBarrier::deoptimize_objects_all_threads() {
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
oop vt_oop = jt->jvmti_vthread();
// Skip virtual threads
if (vt_oop != NULL && java_lang_VirtualThread::is_instance(vt_oop)) {
if (vt_oop != nullptr && java_lang_VirtualThread::is_instance(vt_oop)) {
continue;
}
if (jt->frames_to_pop_failed_realloc() > 0) {
@ -143,7 +143,7 @@ bool EscapeBarrier::deoptimize_objects_all_threads() {
assert(jt->frame_anchor()->walkable(),
"The stack of JavaThread " PTR_FORMAT " is not walkable. Thread state is %d",
p2i(jt), jt->thread_state());
while (vf != NULL) {
while (vf != nullptr) {
if (vf->is_compiled_frame()) {
compiledVFrame* cvf = compiledVFrame::cast(vf);
if ((cvf->has_ea_local_in_scope() || cvf->arg_escape()) &&
@ -174,8 +174,8 @@ class EscapeBarrierSuspendHandshake : public HandshakeClosure {
};
void EscapeBarrier::sync_and_suspend_one() {
assert(_calling_thread != NULL, "calling thread must not be NULL");
assert(_deoptee_thread != NULL, "deoptee thread must not be NULL");
assert(_calling_thread != nullptr, "calling thread must not be null");
assert(_deoptee_thread != nullptr, "deoptee thread must not be null");
assert(barrier_active(), "should not call");
// Sync with other threads that might be doing deoptimizations
@ -205,7 +205,7 @@ void EscapeBarrier::sync_and_suspend_one() {
void EscapeBarrier::sync_and_suspend_all() {
assert(barrier_active(), "should not call");
assert(_calling_thread != NULL, "calling thread must not be NULL");
assert(_calling_thread != nullptr, "calling thread must not be null");
assert(all_threads(), "sanity");
// Sync with other threads that might be doing deoptimizations
@ -306,7 +306,7 @@ static void set_objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) {
GrowableArrayView<jvmtiDeferredLocalVariableSet*>* list =
JvmtiDeferredUpdates::deferred_locals(thread);
DEBUG_ONLY(bool found = false);
if (list != NULL) {
if (list != nullptr) {
for (int i = 0; i < list->length(); i++) {
if (list->at(i)->matches(fr_id)) {
DEBUG_ONLY(found = true);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -79,7 +79,7 @@ public:
// Revert ea based optimizations for all java threads
EscapeBarrier(bool barrier_active, JavaThread* calling_thread)
: _calling_thread(calling_thread), _deoptee_thread(NULL),
: _calling_thread(calling_thread), _deoptee_thread(nullptr),
_barrier_active(barrier_active && (JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false)
COMPILER2_PRESENT(|| DoEscapeAnalysis)))
{
@ -130,7 +130,7 @@ public:
}
// Should revert optimizations for all threads.
bool all_threads() const { return _deoptee_thread == NULL; }
bool all_threads() const { return _deoptee_thread == nullptr; }
// Current thread deoptimizes its own objects.
bool self_deopt() const { return _calling_thread == _deoptee_thread; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@
Symbol* fieldDescriptor::generic_signature() const {
if (!has_generic_signature()) {
return NULL;
return nullptr;
}
int idx = 0;
@ -61,16 +61,16 @@ bool fieldDescriptor::is_trusted_final() const {
AnnotationArray* fieldDescriptor::annotations() const {
InstanceKlass* ik = field_holder();
Array<AnnotationArray*>* md = ik->fields_annotations();
if (md == NULL)
return NULL;
if (md == nullptr)
return nullptr;
return md->at(index());
}
AnnotationArray* fieldDescriptor::type_annotations() const {
InstanceKlass* ik = field_holder();
Array<AnnotationArray*>* type_annos = ik->fields_type_annotations();
if (type_annos == NULL)
return NULL;
if (type_annos == nullptr)
return nullptr;
return type_annos->at(index());
}
@ -185,17 +185,17 @@ void fieldDescriptor::print_on_for(outputStream* st, oop obj) {
st->print("%s", obj->bool_field(offset()) ? "true" : "false");
break;
case T_ARRAY:
if (obj->obj_field(offset()) != NULL) {
if (obj->obj_field(offset()) != nullptr) {
obj->obj_field(offset())->print_value_on(st);
} else {
st->print("NULL");
st->print("nullptr");
}
break;
case T_OBJECT:
if (obj->obj_field(offset()) != NULL) {
if (obj->obj_field(offset()) != nullptr) {
obj->obj_field(offset())->print_value_on(st);
} else {
st->print("NULL");
st->print("nullptr");
}
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,7 +59,7 @@
range, \
constraint) \
\
product(ccstr, DummyManageableStringFlag, NULL, MANAGEABLE, \
product(ccstr, DummyManageableStringFlag, nullptr, MANAGEABLE, \
"Dummy flag for testing string handling in WriteableFlags") \
\
product(bool, TestFlagFor_bool, false, \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,7 @@ void JVMFlag::set_product() {
assert(is_product(), "sanity");
}
// Get custom message for this locked flag, or NULL if
// Get custom message for this locked flag, or null if
// none is available. Returns message type produced.
JVMFlag::MsgType JVMFlag::get_locked_message(char* buf, int buflen) const {
buf[0] = '\0';
@ -225,9 +225,9 @@ void JVMFlag::print_on(outputStream* st, bool withComments, bool printRanges) co
} else if (is_ccstr()) {
// Honor <newline> characters in ccstr: print multiple lines.
const char* cp = get_ccstr();
if (cp != NULL) {
if (cp != nullptr) {
const char* eol;
while ((eol = strchr(cp, '\n')) != NULL) {
while ((eol = strchr(cp, '\n')) != nullptr) {
size_t llen = pointer_delta(eol, cp, sizeof(char));
st->print("%.*s", (int)llen, cp);
st->cr();
@ -429,7 +429,7 @@ void JVMFlag::print_as_flag(outputStream* st) const {
} else if (is_ccstr()) {
st->print("-XX:%s=", _name);
const char* cp = get_ccstr();
if (cp != NULL) {
if (cp != nullptr) {
// Need to turn embedded '\n's back into separate arguments
// Not so efficient to print one character at a time,
// but the choice is to do the transformation to a buffer
@ -460,7 +460,7 @@ const char* JVMFlag::flag_error_str(JVMFlag::Error error) {
case JVMFlag::INVALID_FLAG: return "INVALID_FLAG";
case JVMFlag::ERR_OTHER: return "ERR_OTHER";
case JVMFlag::SUCCESS: return "SUCCESS";
default: ShouldNotReachHere(); return "NULL";
default: ShouldNotReachHere(); return "nullptr";
}
}
@ -543,7 +543,7 @@ const int EXPERIMENTAL = JVMFlag::KIND_EXPERIMENTAL;
static JVMFlag flagTable[NUM_JVMFlagsEnum + 1] = {
MATERIALIZE_ALL_FLAGS
JVMFlag() // The iteration code wants a flag with a NULL name at the end of the table.
JVMFlag() // The iteration code wants a flag with a null name at the end of the table.
};
// We want flagTable[] to be completely initialized at C++ compilation time, which requires
@ -572,33 +572,33 @@ const int JVMFlag::type_signatures[] = {
// Search the flag table for a named flag
JVMFlag* JVMFlag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
JVMFlag* flag = JVMFlagLookup::find(name, length);
if (flag != NULL) {
if (flag != nullptr) {
// Found a matching entry.
// Don't report notproduct and develop flags in product builds.
if (flag->is_constant_in_binary()) {
return (return_flag ? flag : NULL);
return (return_flag ? flag : nullptr);
}
// Report locked flags only if allowed.
if (!(flag->is_unlocked() || flag->is_unlocker())) {
if (!allow_locked) {
// disable use of locked flags, e.g. diagnostic, experimental,
// etc. until they are explicitly unlocked
return NULL;
return nullptr;
}
}
return flag;
}
// JVMFlag name is not in the flag table
return NULL;
return nullptr;
}
JVMFlag* JVMFlag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
float VMOptionsFuzzyMatchSimilarity = 0.7f;
JVMFlag* match = NULL;
JVMFlag* match = nullptr;
float score;
float max_score = -1;
for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) {
for (JVMFlag* current = &flagTable[0]; current->_name != nullptr; current++) {
score = StringUtils::similarity(current->_name, strlen(current->_name), name, length);
if (score > max_score) {
max_score = score;
@ -606,18 +606,18 @@ JVMFlag* JVMFlag::fuzzy_match(const char* name, size_t length, bool allow_locked
}
}
if (match == NULL) {
return NULL;
if (match == nullptr) {
return nullptr;
}
if (!(match->is_unlocked() || match->is_unlocker())) {
if (!allow_locked) {
return NULL;
return nullptr;
}
}
if (max_score < VMOptionsFuzzyMatchSimilarity) {
return NULL;
return nullptr;
}
return match;
@ -690,7 +690,7 @@ void JVMFlag::assert_valid_flag_enum(JVMFlagsEnum i) {
}
void JVMFlag::check_all_flag_declarations() {
for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) {
for (JVMFlag* current = &flagTable[0]; current->_name != nullptr; current++) {
int flags = static_cast<int>(current->_flags);
// Backwards compatibility. This will be relaxed/removed in JDK-7123237.
int mask = JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_MANAGEABLE | JVMFlag::KIND_EXPERIMENTAL;
@ -728,7 +728,7 @@ void JVMFlag::printFlags(outputStream* out, bool withComments, bool printRanges,
// Sort
JVMFlag** array = NEW_C_HEAP_ARRAY_RETURN_NULL(JVMFlag*, length, mtArguments);
if (array != NULL) {
if (array != nullptr) {
for (size_t i = 0; i < length; i++) {
array[i] = &flagTable[i];
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ public:
JVMFlag::Error check_constraint_and_set(JVMFlag* flag, void* value_addr, JVMFlagOrigin origin, bool verbose) const {
T value = *((T*)value_addr);
const JVMTypedFlagLimit<T>* constraint = (const JVMTypedFlagLimit<T>*)JVMFlagLimit::get_constraint(flag);
if (constraint != NULL && constraint->phase() <= static_cast<int>(JVMFlagLimit::validating_phase())) {
if (constraint != nullptr && constraint->phase() <= static_cast<int>(JVMFlagLimit::validating_phase())) {
JVMFlag::Error err = typed_check_constraint(constraint->constraint_func(), value, verbose);
if (err != JVMFlag::SUCCESS) {
return err;
@ -107,7 +107,7 @@ public:
bool verbose = JVMFlagLimit::verbose_checks_needed();
const JVMTypedFlagLimit<T>* range = (const JVMTypedFlagLimit<T>*)JVMFlagLimit::get_range(flag);
if (range != NULL) {
if (range != nullptr) {
if ((value < range->min()) || (value > range->max())) {
range_error(flag->name(), value, range->min(), range->max(), verbose);
return JVMFlag::OUT_OF_BOUNDS;
@ -119,7 +119,7 @@ public:
virtual JVMFlag::Error check_range(const JVMFlag* flag, bool verbose) const {
const JVMTypedFlagLimit<T>* range = (const JVMTypedFlagLimit<T>*)JVMFlagLimit::get_range(flag);
if (range != NULL) {
if (range != nullptr) {
T value = flag->read<T>();
if ((value < range->min()) || (value > range->max())) {
range_error(flag->name(), value, range->min(), range->max(), verbose);
@ -301,16 +301,16 @@ JVMFlag::Error JVMFlagAccess::set_impl(JVMFlag* flag, void* value, JVMFlagOrigin
}
JVMFlag::Error JVMFlagAccess::set_ccstr(JVMFlag* flag, ccstr* value, JVMFlagOrigin origin) {
if (flag == NULL) return JVMFlag::INVALID_FLAG;
if (flag == nullptr) return JVMFlag::INVALID_FLAG;
if (!flag->is_ccstr()) return JVMFlag::WRONG_FORMAT;
ccstr old_value = flag->get_ccstr();
trace_flag_changed<ccstr, EventStringFlagChanged>(flag, old_value, *value, origin);
char* new_value = NULL;
if (*value != NULL) {
char* new_value = nullptr;
if (*value != nullptr) {
new_value = os::strdup_check_oom(*value);
}
flag->set_ccstr(new_value);
if (!flag->is_default() && old_value != NULL) {
if (!flag->is_default() && old_value != nullptr) {
// Old value is heap allocated so free it.
FREE_C_HEAP_ARRAY(char, old_value);
}
@ -318,7 +318,7 @@ JVMFlag::Error JVMFlagAccess::set_ccstr(JVMFlag* flag, ccstr* value, JVMFlagOrig
// The callers typically don't care what the old value is.
// If the caller really wants to know the old value, read it (and make a copy if necessary)
// before calling this API.
*value = NULL;
*value = nullptr;
flag->set_origin(origin);
return JVMFlag::SUCCESS;
}
@ -355,11 +355,11 @@ void JVMFlagAccess::print_range(outputStream* st, const JVMFlag* flag, const JVM
void JVMFlagAccess::print_range(outputStream* st, const JVMFlag* flag) {
const JVMFlagLimit* range = JVMFlagLimit::get_range(flag);
if (range != NULL) {
if (range != nullptr) {
print_range(st, flag, range);
} else {
const JVMFlagLimit* limit = JVMFlagLimit::get_constraint(flag);
if (limit != NULL) {
if (limit != nullptr) {
void* func = limit->constraint_func();
// Two special cases where the lower limit of the range is defined by an os:: function call

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class outputStream;
// /* If you use a wrong type, a run-time assertion will happen */
// intx v = flag->read<intx>();
//
// /* If you use a wrong type, or a NULL flag, an error code is returned */
// /* If you use a wrong type, or a null flag, an error code is returned */
// JVMFlag::Error err = JVMFlagAccess::get<JVM_FLAG_TYPE(intx)>(flag, &v, origin);
#define JVM_FLAG_TYPE(t) \
@ -80,7 +80,7 @@ public:
// set<double, JVMFlag::TYPE_int>(flag, double_ptr);
assert(JVMFlag::is_compatible_type<T>(type_enum), "must be");
if (flag == NULL) {
if (flag == nullptr) {
return JVMFlag::INVALID_FLAG;
}
if (!is_correct_type(flag, type_enum)) {
@ -110,7 +110,7 @@ public:
// set<double, JVMFlag::TYPE_int>(flag, double_ptr);
assert(JVMFlag::is_compatible_type<T>(type_enum), "must be");
if (flag == NULL) {
if (flag == nullptr) {
return JVMFlag::INVALID_FLAG;
}
if (!is_correct_type(flag, type_enum)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,14 +66,14 @@ struct DummyLimit {
template <typename T>
class LimitGetter {
public:
// These functions return NULL for develop flags in a PRODUCT build
// These functions return null for develop flags in a PRODUCT build
static constexpr const JVMFlagLimit* no_limit(...) {
return NULL;
return nullptr;
}
// This is for flags that have neither range no constraint. We don't need the JVMFlagLimit struct.
static constexpr const JVMFlagLimit* get_limit(const JVMTypedFlagLimit<T>* p, int dummy) {
return NULL;
return nullptr;
}
static constexpr const JVMFlagLimit* get_limit(const JVMTypedFlagLimit<T>* p, int dummy, T min, T max) {
@ -123,7 +123,7 @@ constexpr JVMTypedFlagLimit<int> limit_dummy
static constexpr const JVMFlagLimit* const flagLimitTable[1 + NUM_JVMFlagsEnum] = {
// Because FLAG_LIMIT_PTR must start with an "),", we have to place a dummy element here.
LimitGetter<int>::get_limit(NULL, 0
LimitGetter<int>::get_limit(nullptr, 0
#ifdef PRODUCT
ALL_FLAGS(FLAG_LIMIT_PTR_NONE,
@ -154,7 +154,7 @@ const JVMFlag* JVMFlagLimit::last_checked_flag() {
if (_last_checked != INVALID_JVMFlagsEnum) {
return JVMFlag::flag_from_enum(_last_checked);
} else {
return NULL;
return nullptr;
}
}
@ -162,7 +162,7 @@ bool JVMFlagLimit::check_all_ranges() {
bool status = true;
for (int i = 0; i < NUM_JVMFlagsEnum; i++) {
JVMFlagsEnum flag_enum = static_cast<JVMFlagsEnum>(i);
if (get_range_at(flag_enum) != NULL &&
if (get_range_at(flag_enum) != nullptr &&
JVMFlagAccess::check_range(JVMFlag::flag_from_enum(flag_enum), true) != JVMFlag::SUCCESS) {
status = false;
}
@ -179,7 +179,7 @@ bool JVMFlagLimit::check_all_constraints(JVMFlagConstraintPhase phase) {
for (int i = 0; i < NUM_JVMFlagsEnum; i++) {
JVMFlagsEnum flag_enum = static_cast<JVMFlagsEnum>(i);
const JVMFlagLimit* constraint = get_constraint_at(flag_enum);
if (constraint != NULL && constraint->phase() == static_cast<int>(phase) &&
if (constraint != nullptr && constraint->phase() == static_cast<int>(phase) &&
JVMFlagAccess::check_constraint(JVMFlag::flag_from_enum(flag_enum),
constraint->constraint_func(), true) != JVMFlag::SUCCESS) {
status = false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,12 +58,12 @@ template <typename T> class JVMTypedFlagLimit;
// To query the range information of a JVMFlag:
// JVMFlagLimit::get_range(JVMFlag*)
// JVMFlagLimit::get_range_at(int flag_enum)
// If the given flag doesn't have a range, NULL is returned.
// If the given flag doesn't have a range, null is returned.
//
// To query the constraint information of a JVMFlag:
// JVMFlagLimit::get_constraint(JVMFlag*)
// JVMFlagLimit::get_constraint_at(int flag_enum)
// If the given flag doesn't have a constraint, NULL is returned.
// If the given flag doesn't have a constraint, null is returned.
class JVMFlagLimit {
short _constraint_func;
@ -85,11 +85,11 @@ protected:
private:
static const JVMFlagLimit* get_kind_at(JVMFlagsEnum flag_enum, int required_kind) {
const JVMFlagLimit* limit = at(flag_enum);
if (limit != NULL && (limit->_kind & required_kind) != 0) {
if (limit != nullptr && (limit->_kind & required_kind) != 0) {
_last_checked = flag_enum;
return limit;
} else {
return NULL;
return nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ JVMFlag* JVMFlagLookup::find_impl(const char* name, size_t length) const {
flag_enum = (int)_table[flag_enum];
}
return NULL;
return nullptr;
}
JVMFlag* JVMFlagLookup::find(const char* name, size_t length) {

View File

@ -66,27 +66,27 @@ RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames
_process_frames = process_frames == ProcessFrames::include;
_walk_cont = walk_cont == WalkContinuation::include;
clear();
DEBUG_ONLY (_update_for_id = NULL;)
DEBUG_ONLY (_update_for_id = nullptr;)
NOT_PRODUCT(_skip_missing = false;)
NOT_PRODUCT(_async = false;)
if (walk_cont == WalkContinuation::include && thread != NULL && thread->last_continuation() != NULL) {
if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) {
_chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */);
}
_chunk_index = -1;
#ifndef PRODUCT
for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
#endif /* PRODUCT */
}
RegisterMap::RegisterMap(oop continuation, UpdateMap update_map) {
_thread = NULL;
_thread = nullptr;
_update_map = update_map == UpdateMap::include;
_process_frames = false;
_walk_cont = true;
clear();
DEBUG_ONLY (_update_for_id = NULL;)
DEBUG_ONLY (_update_for_id = nullptr;)
NOT_PRODUCT(_skip_missing = false;)
NOT_PRODUCT(_async = false;)
@ -94,13 +94,13 @@ RegisterMap::RegisterMap(oop continuation, UpdateMap update_map) {
_chunk_index = -1;
#ifndef PRODUCT
for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr;
#endif /* PRODUCT */
}
RegisterMap::RegisterMap(const RegisterMap* map) {
assert(map != this, "bad initialization parameter");
assert(map != NULL, "RegisterMap must be present");
assert(map != nullptr, "RegisterMap must be present");
_thread = map->thread();
_update_map = map->update_map();
_process_frames = map->process_frames();
@ -134,16 +134,16 @@ RegisterMap::RegisterMap(const RegisterMap* map) {
}
oop RegisterMap::cont() const {
return _chunk() != NULL ? _chunk()->cont() : (oop)NULL;
return _chunk() != nullptr ? _chunk()->cont() : (oop)nullptr;
}
void RegisterMap::set_stack_chunk(stackChunkOop chunk) {
assert(chunk == NULL || _walk_cont, "");
assert(chunk == NULL || _chunk.not_null(), "");
assert(chunk == nullptr || _walk_cont, "");
assert(chunk == nullptr || _chunk.not_null(), "");
if (_chunk.is_null()) return;
log_trace(continuations)("set_stack_chunk: " INTPTR_FORMAT " this: " INTPTR_FORMAT, p2i((oopDesc*)chunk), p2i(this));
_chunk.replace(chunk); // reuse handle. see comment above in the constructor
if (chunk == NULL) {
if (chunk == nullptr) {
_chunk_index = -1;
} else {
_chunk_index++;
@ -169,7 +169,7 @@ VMReg RegisterMap::find_register_spilled_here(void* p, intptr_t* sp) {
VMReg r = VMRegImpl::as_VMReg(i);
if (p == location(r, sp)) return r;
}
return NULL;
return nullptr;
}
void RegisterMap::print_on(outputStream* st) const {
@ -178,7 +178,7 @@ void RegisterMap::print_on(outputStream* st) const {
VMReg r = VMRegImpl::as_VMReg(i);
intptr_t* src = (intptr_t*) location(r, nullptr);
if (src != NULL) {
if (src != nullptr) {
r->print_on(st);
st->print(" [" INTPTR_FORMAT "] = ", p2i(src));
@ -219,7 +219,7 @@ address frame::raw_pc() const {
//
void frame::set_pc(address newpc) {
#ifdef ASSERT
if (_cb != NULL && _cb->is_nmethod()) {
if (_cb != nullptr && _cb->is_nmethod()) {
assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
}
#endif // ASSERT
@ -237,7 +237,7 @@ bool frame::is_ignored_frame() const {
}
bool frame::is_native_frame() const {
return (_cb != NULL &&
return (_cb != nullptr &&
_cb->is_nmethod() &&
((nmethod*)_cb)->is_native_method());
}
@ -249,11 +249,11 @@ bool frame::is_java_frame() const {
}
bool frame::is_runtime_frame() const {
return (_cb != NULL && _cb->is_runtime_stub());
return (_cb != nullptr && _cb->is_runtime_stub());
}
bool frame::is_safepoint_blob_frame() const {
return (_cb != NULL && _cb->is_safepoint_stub());
return (_cb != nullptr && _cb->is_safepoint_stub());
}
// testers
@ -286,7 +286,7 @@ JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) con
return *jcw;
}
return NULL;
return nullptr;
}
bool frame::is_entry_frame_valid(JavaThread* thread) const {
@ -304,7 +304,7 @@ bool frame::is_entry_frame_valid(JavaThread* thread) const {
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false;
assert(_cb != NULL && _cb->is_compiled(), "must be an nmethod");
assert(_cb != nullptr && _cb->is_compiled(), "must be an nmethod");
CompiledMethod* nm = (CompiledMethod *)_cb;
if (TraceDependencies) {
tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
@ -331,11 +331,11 @@ bool frame::can_be_deoptimized() const {
}
void frame::deoptimize(JavaThread* thread) {
assert(thread == NULL
assert(thread == nullptr
|| (thread->frame_anchor()->has_last_Java_frame() &&
thread->frame_anchor()->walkable()), "must be");
// Schedule deoptimization of an nmethod activation with this frame.
assert(_cb != NULL && _cb->is_compiled(), "must be");
assert(_cb != nullptr && _cb->is_compiled(), "must be");
// If the call site is a MethodHandle call site use the MH deopt handler.
CompiledMethod* cm = (CompiledMethod*) _cb;
@ -351,7 +351,7 @@ void frame::deoptimize(JavaThread* thread) {
assert(is_deoptimized_frame(), "must be");
#ifdef ASSERT
if (thread != NULL) {
if (thread != nullptr) {
frame check = thread->last_frame();
if (is_older(check.id())) {
RegisterMap map(thread,
@ -495,16 +495,16 @@ const char* frame::print_name() const {
if (is_deoptimized_frame()) return "Deoptimized";
return "Compiled";
}
if (sp() == NULL) return "Empty";
if (sp() == nullptr) return "Empty";
return "C";
}
void frame::print_value_on(outputStream* st, JavaThread *thread) const {
NOT_PRODUCT(address begin = pc()-40;)
NOT_PRODUCT(address end = NULL;)
NOT_PRODUCT(address end = nullptr;)
st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), p2i(sp()), p2i(unextended_sp()));
if (sp() != NULL)
if (sp() != nullptr)
st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT,
p2i(fp()), p2i(real_fp()), p2i(pc()));
st->print_cr(")");
@ -515,7 +515,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const {
NOT_PRODUCT(begin = desc->begin(); end = desc->end();)
} else if (Interpreter::contains(pc())) {
InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
if (desc != NULL) {
if (desc != nullptr) {
st->print("~");
desc->print_on(st);
NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
@ -525,10 +525,10 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const {
}
#ifndef PRODUCT
if (_cb != NULL) {
if (_cb != nullptr) {
st->print(" ");
_cb->print_value_on(st);
if (end == NULL) {
if (end == nullptr) {
begin = _cb->code_begin();
end = _cb->code_end();
}
@ -538,7 +538,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const {
}
void frame::print_on(outputStream* st) const {
print_value_on(st,NULL);
print_value_on(st,nullptr);
if (is_interpreted_frame()) {
interpreter_frame_print_on(st);
}
@ -599,7 +599,7 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
int offset;
bool found;
if (buf == NULL || buflen < 1) return;
if (buf == nullptr || buflen < 1) return;
// libname
buf[0] = '\0';
found = os::dll_address_to_library_name(pc, buf, buflen, &offset);
@ -608,7 +608,7 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
const char *p1, *p2;
p1 = buf;
int len = (int)strlen(os::file_separator());
while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
while ((p2 = strstr(p1, os::file_separator())) != nullptr) p1 = p2 + len;
st->print(" [%s+0x%x]", p1, offset);
} else {
st->print(" " PTR_FORMAT, p2i(pc));
@ -635,10 +635,10 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
// suggests the problem is in user lib; everything else is likely a VM bug.
void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
if (_cb != NULL) {
if (_cb != nullptr) {
if (Interpreter::contains(pc())) {
Method* m = this->interpreter_frame_method();
if (m != NULL) {
if (m != nullptr) {
m->name_and_sig_as_C_string(buf, buflen);
st->print("j %s", buf);
st->print("+%d", this->interpreter_frame_bci());
@ -646,7 +646,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
if (module->is_named()) {
module->name()->as_C_string(buf, buflen);
st->print(" %s", buf);
if (module->version() != NULL) {
if (module->version() != nullptr) {
module->version()->as_C_string(buf, buflen);
st->print("@%s", buf);
}
@ -656,7 +656,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
}
} else if (StubRoutines::contains(pc())) {
StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
if (desc != NULL) {
if (desc != nullptr) {
st->print("v ~StubRoutines::%s " PTR_FORMAT, desc->name(), p2i(pc()));
} else {
st->print("v ~StubRoutines::" PTR_FORMAT, p2i(pc()));
@ -666,7 +666,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
} else if (_cb->is_compiled()) {
CompiledMethod* cm = (CompiledMethod*)_cb;
Method* m = cm->method();
if (m != NULL) {
if (m != nullptr) {
if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
@ -678,7 +678,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
if (module->is_named()) {
module->name()->as_C_string(buf, buflen);
st->print(" %s", buf);
if (module->version() != NULL) {
if (module->version() != nullptr) {
module->version()->as_C_string(buf, buflen);
st->print("@%s", buf);
}
@ -689,7 +689,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod();
const char* jvmciName = nm->jvmci_name();
if (jvmciName != NULL) {
if (jvmciName != nullptr) {
st->print(" (%s)", jvmciName);
}
}
@ -847,7 +847,7 @@ class EntryFrameOopFinder: public SignatureIterator {
public:
EntryFrameOopFinder(const frame* frame, Symbol* signature, bool is_static) : SignatureIterator(signature) {
_f = NULL; // will be set later
_f = nullptr; // will be set later
_fr = frame;
_is_static = is_static;
_offset = ArgumentSizeComputer(signature).size(); // pre-decremented down to zero
@ -908,7 +908,7 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
Symbol* signature = NULL;
Symbol* signature = nullptr;
bool has_receiver = false;
// Process a callee's arguments if we are at a call site
@ -957,10 +957,10 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver,
}
void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const {
assert(_cb != NULL, "sanity check");
assert((oop_map() == NULL) == (_cb->oop_maps() == NULL), "frame and _cb must agree that oopmap is set or not");
if (oop_map() != NULL) {
if (df != NULL) {
assert(_cb != nullptr, "sanity check");
assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not");
if (oop_map() != nullptr) {
if (df != nullptr) {
_oop_map->oops_do(this, reg_map, f, df);
} else {
_oop_map->oops_do(this, reg_map, f, derived_mode);
@ -977,7 +977,7 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClos
// prevent them from being collected. However, this visit should be
// restricted to certain phases of the collection only. The
// closure decides how it wants nmethods to be traced.
if (cf != NULL)
if (cf != nullptr)
cf->do_code_blob(_cb);
}
@ -1004,13 +1004,13 @@ class CompiledArgumentOopFinder: public SignatureIterator {
VMReg reg = _regs[_offset].first();
oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map);
#ifdef ASSERT
if (loc == NULL) {
if (loc == nullptr) {
if (_reg_map->should_skip_missing()) {
return;
}
tty->print_cr("Error walking frame oops:");
_fr.print_on(tty);
assert(loc != NULL, "missing register map entry reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
assert(loc != nullptr, "missing register map entry reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc));
}
#endif
_f->do_oop(loc);
@ -1066,9 +1066,9 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) {
// First consult the ADLC on where it puts parameter 0 for this signature.
VMReg reg = SharedRuntime::name_for_receiver();
oop* oop_adr = caller.oopmapreg_to_oop_location(reg, reg_map);
if (oop_adr == NULL) {
guarantee(oop_adr != NULL, "bad register save location");
return NULL;
if (oop_adr == nullptr) {
guarantee(oop_adr != nullptr, "bad register save location");
return nullptr;
}
oop r = *oop_adr;
assert(Universe::heap()->is_in_or_null(r), "bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", p2i(r), p2i(r));
@ -1078,7 +1078,7 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) {
BasicLock* frame::get_native_monitor() {
nmethod* nm = (nmethod*)_cb;
assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
assert(_cb != nullptr && _cb->is_nmethod() && nm->method()->is_native(),
"Should not call this unless it's a native nmethod");
int byte_offset = in_bytes(nm->native_basic_lock_sp_offset());
assert(byte_offset >= 0, "should not see invalid offset");
@ -1087,7 +1087,7 @@ BasicLock* frame::get_native_monitor() {
oop frame::get_native_receiver() {
nmethod* nm = (nmethod*)_cb;
assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
assert(_cb != nullptr && _cb->is_nmethod() && nm->method()->is_native(),
"Should not call this unless it's a native nmethod");
int byte_offset = in_bytes(nm->native_receiver_sp_offset());
assert(byte_offset >= 0, "should not see invalid offset");
@ -1097,7 +1097,7 @@ oop frame::get_native_receiver() {
}
void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) const {
assert(map != NULL, "map must be set");
assert(map != nullptr, "map must be set");
if (map->include_argument_oops()) {
// must collect argument oops, as nobody else is doing it
Thread *thread = Thread::current();
@ -1119,7 +1119,7 @@ bool frame::is_deoptimized_frame() const {
* as in return address being patched.
* It doesn't care if the OP that we return to is a
* deopt instruction */
/*if (_cb != NULL && _cb->is_nmethod()) {
/*if (_cb != nullptr && _cb->is_nmethod()) {
return NativeDeoptInstruction::is_deopt_at(_pc);
}*/
return false;
@ -1131,7 +1131,7 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf,
#ifndef PRODUCT
// simulate GC crash here to dump java thread in error report
if (CrashGCForDumpingJavaThread) {
char *t = NULL;
char *t = nullptr;
*t = 'c';
}
#endif
@ -1149,7 +1149,7 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf,
}
void frame::nmethods_do(CodeBlobClosure* cf) const {
if (_cb != NULL && _cb->is_nmethod()) {
if (_cb != nullptr && _cb->is_nmethod()) {
cf->do_code_blob(_cb);
}
}
@ -1160,7 +1160,7 @@ void frame::metadata_do(MetadataClosure* f) const {
ResourceMark rm;
if (is_interpreted_frame()) {
Method* m = this->interpreter_frame_method();
assert(m != NULL, "expecting a method in this frame");
assert(m != nullptr, "expecting a method in this frame");
f->do_metadata(m);
}
}
@ -1188,7 +1188,7 @@ void frame::verify(const RegisterMap* map) const {
#endif
if (map->update_map()) { // The map has to be up-to-date for the current frame
oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, DerivedPointerIterationMode::_ignore, map, false);
oops_do_internal(&VerifyOopClosure::verify_oop, nullptr, nullptr, DerivedPointerIterationMode::_ignore, map, false);
}
}
@ -1299,8 +1299,8 @@ public:
virtual void do_value(VMReg reg, OopMapValue::oop_types type) override {
intptr_t* p = (intptr_t*)_fr->oopmapreg_to_location(reg, _reg_map);
if (p != NULL && (((intptr_t)p & WordAlignmentMask) == 0)) {
const char* type_name = NULL;
if (p != nullptr && (((intptr_t)p & WordAlignmentMask) == 0)) {
const char* type_name = nullptr;
switch(type) {
case OopMapValue::oop_value: type_name = "oop"; break;
case OopMapValue::narrowoop_value: type_name = "narrow oop"; break;
@ -1309,7 +1309,7 @@ public:
// case OopMapValue::live_value: type_name = "live"; break;
default: break;
}
if (type_name != NULL) {
if (type_name != nullptr) {
_values.describe(_frame_no, p, err_msg("%s for #%d", type_name, _frame_no));
}
}
@ -1344,10 +1344,10 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
// Label the method and current bci
values.describe(-1, info_address,
FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 3);
if (desc != NULL) {
if (desc != nullptr) {
values.describe(-1, info_address, err_msg("- %s codelet: %s",
desc->bytecode() >= 0 ? Bytecodes::name(desc->bytecode()) : "",
desc->description() != NULL ? desc->description() : "?"), 2);
desc->description() != nullptr ? desc->description() : "?"), 2);
}
values.describe(-1, info_address,
err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 2);
@ -1373,20 +1373,20 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
// Compute the actual expression stack size
InterpreterOopMap mask;
OopMapCache::compute_one_oop_map(methodHandle(Thread::current(), m), bci, &mask);
intptr_t* tos = NULL;
intptr_t* tos = nullptr;
// Report each stack element and mark as owned by this frame
for (int e = 0; e < mask.expression_stack_size(); e++) {
tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
values.describe(frame_no, interpreter_frame_expression_stack_at(e),
err_msg("stack %d", e), 1);
}
if (tos != NULL) {
if (tos != nullptr) {
values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2);
}
if (reg_map != NULL) {
if (reg_map != nullptr) {
FrameValuesOopClosure oopsFn;
oops_do(&oopsFn, NULL, &oopsFn, reg_map);
oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
oopsFn.describe(values, frame_no);
}
} else if (is_entry_frame()) {
@ -1456,29 +1456,29 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
}
}
if (reg_map != NULL && is_java_frame()) {
if (reg_map != nullptr && is_java_frame()) {
int scope_no = 0;
for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != NULL; scope = scope->sender(), scope_no++) {
for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) {
Method* m = scope->method();
int bci = scope->bci();
values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2);
{ // mark locals
GrowableArray<ScopeValue*>* scvs = scope->locals();
int scvs_length = scvs != NULL ? scvs->length() : 0;
int scvs_length = scvs != nullptr ? scvs->length() : 0;
for (int i = 0; i < scvs_length; i++) {
intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
if (stack_address != NULL) {
if (stack_address != nullptr) {
values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1);
}
}
}
{ // mark expression stack
GrowableArray<ScopeValue*>* scvs = scope->expressions();
int scvs_length = scvs != NULL ? scvs->length() : 0;
int scvs_length = scvs != nullptr ? scvs->length() : 0;
for (int i = 0; i < scvs_length; i++) {
intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i));
if (stack_address != NULL) {
if (stack_address != nullptr) {
values.describe(frame_no, stack_address, err_msg("stack %d for #%d (scope %d)", i, frame_no, scope_no), 1);
}
}
@ -1486,10 +1486,10 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
}
FrameValuesOopClosure oopsFn;
oops_do(&oopsFn, NULL, &oopsFn, reg_map);
oops_do(&oopsFn, nullptr, &oopsFn, reg_map);
oopsFn.describe(values, frame_no);
if (oop_map() != NULL) {
if (oop_map() != nullptr) {
FrameValuesOopMapClosure valuesFn(this, reg_map, values, frame_no);
// also OopMapValue::live_value ??
oop_map()->all_type_do(this, OopMapValue::callee_saved_value, &valuesFn);
@ -1509,8 +1509,8 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
} else {
// provide default info if not handled before
char *info = (char *) "special frame";
if ((_cb != NULL) &&
(_cb->name() != NULL)) {
if ((_cb != nullptr) &&
(_cb->name() != nullptr)) {
info = (char *)_cb->name();
}
values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
@ -1575,7 +1575,7 @@ void FrameValues::print_on(JavaThread* thread, outputStream* st) {
intptr_t* v0 = _values.at(min_index).location;
intptr_t* v1 = _values.at(max_index).location;
if (thread != NULL) {
if (thread != nullptr) {
if (thread == Thread::current()) {
while (!thread->is_in_live_stack((address)v0)) v0 = _values.at(++min_index).location;
while (!thread->is_in_live_stack((address)v1)) v1 = _values.at(--max_index).location;
@ -1608,7 +1608,7 @@ void FrameValues::print_on(outputStream* st, int min_index, int max_index, intpt
intptr_t* min = MIN2(v0, v1);
intptr_t* max = MAX2(v0, v1);
intptr_t* cur = max;
intptr_t* last = NULL;
intptr_t* last = nullptr;
for (int i = max_index; i >= min_index; i--) {
FrameValue fv = _values.at(i);
while (cur > fv.location) {

View File

@ -147,7 +147,7 @@ class frame {
// inline void set_cb(CodeBlob* cb);
const ImmutableOopMap* oop_map() const {
if (_oop_map == NULL) {
if (_oop_map == nullptr) {
_oop_map = get_oop_map();
}
return _oop_map;
@ -159,10 +159,10 @@ class frame {
// Every frame needs to return a unique id which distinguishes it from all other frames.
// For sparc and ia32 use sp. ia64 can have memory frames that are empty so multiple frames
// will have identical sp values. For ia64 the bsp (fp) value will serve. No real frame
// should have an id() of NULL so it is a distinguishing value for an unmatchable frame.
// should have an id() of null so it is a distinguishing value for an unmatchable frame.
// We also have relationals which allow comparing a frame to anoth frame's id() allow
// us to distinguish younger (more recent activation) from older (less recent activations)
// A NULL id is only valid when comparing for equality.
// A null id is only valid when comparing for equality.
intptr_t* id(void) const;
bool is_younger(intptr_t* id) const;
@ -175,7 +175,7 @@ class frame {
bool equal(frame other) const;
// type testers
bool is_empty() const { return _pc == NULL; }
bool is_empty() const { return _pc == nullptr; }
bool is_interpreted_frame() const;
bool is_java_frame() const;
bool is_entry_frame() const; // Java frame called from C?
@ -415,7 +415,7 @@ class frame {
void describe_pd(FrameValues& values, int frame_no);
public:
void print_value() const { print_value_on(tty,NULL); }
void print_value() const { print_value_on(tty,nullptr); }
void print_value_on(outputStream* st, JavaThread *thread) const;
void print_on(outputStream* st) const;
void interpreter_frame_print_on(outputStream* st) const;
@ -423,7 +423,7 @@ class frame {
static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
// Add annotated descriptions of memory locations belonging to this frame to values
void describe(FrameValues& values, int frame_no, const RegisterMap* reg_map=NULL);
void describe(FrameValues& values, int frame_no, const RegisterMap* reg_map=nullptr);
// Conversion from a VMReg to physical stack location
template <typename RegisterMapT>
@ -457,7 +457,7 @@ class frame {
#else
DerivedPointerIterationMode dpim = DerivedPointerIterationMode::_ignore;;
#endif
oops_do_internal(f, cf, NULL, dpim, map, true);
oops_do_internal(f, cf, nullptr, dpim, map, true);
}
void oops_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, const RegisterMap* map) {
@ -466,7 +466,7 @@ class frame {
void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map,
DerivedPointerIterationMode derived_mode) const {
oops_do_internal(f, cf, NULL, derived_mode, map, true);
oops_do_internal(f, cf, nullptr, derived_mode, map, true);
}
void nmethods_do(CodeBlobClosure* cf) const;
@ -494,8 +494,8 @@ class FrameValue {
int priority;
FrameValue() {
location = NULL;
description = NULL;
location = nullptr;
description = nullptr;
owner = -1;
priority = 0;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ inline bool frame::is_entry_frame() const {
}
inline bool frame::is_stub_frame() const {
return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob());
return StubRoutines::is_stub_code(pc()) || (_cb != nullptr && _cb->is_adapter_blob());
}
inline bool frame::is_first_frame() const {
@ -59,11 +59,11 @@ inline bool frame::is_first_frame() const {
}
inline bool frame::is_upcall_stub_frame() const {
return _cb != NULL && _cb->is_upcall_stub();
return _cb != nullptr && _cb->is_upcall_stub();
}
inline bool frame::is_compiled_frame() const {
if (_cb != NULL &&
if (_cb != nullptr &&
_cb->is_compiled() &&
((CompiledMethod*)_cb)->is_java_method()) {
return true;
@ -82,7 +82,7 @@ inline address frame::oopmapreg_to_location(VMReg reg, const RegisterMapT* reg_m
return (address)((intptr_t)reg_map->as_RegisterMap()->stack_chunk()->relativize_usp_offset(*this, sp_offset_in_bytes));
}
address usp = (address)unextended_sp();
assert(reg_map->thread() == NULL || reg_map->thread()->is_in_usable_stack(usp), INTPTR_FORMAT, p2i(usp));
assert(reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(usp), INTPTR_FORMAT, p2i(usp));
return (usp + sp_offset_in_bytes);
}
}
@ -93,11 +93,11 @@ inline oop* frame::oopmapreg_to_oop_location(VMReg reg, const RegisterMapT* reg_
}
inline CodeBlob* frame::get_cb() const {
// if (_cb == NULL) _cb = CodeCache::find_blob(_pc);
if (_cb == NULL) {
// if (_cb == nullptr) _cb = CodeCache::find_blob(_pc);
if (_cb == nullptr) {
int slot;
_cb = CodeCache::find_blob_and_oopmap(_pc, slot);
if (_oop_map == NULL && slot >= 0) {
if (_oop_map == nullptr && slot >= 0) {
_oop_map = _cb->oop_map_for_slot(slot, _pc);
}
}

View File

@ -544,7 +544,7 @@ const int ObjectAlignmentInBytes = 8;
"Dump heap to file when java.lang.OutOfMemoryError is thrown " \
"from JVM") \
\
product(ccstr, HeapDumpPath, NULL, MANAGEABLE, \
product(ccstr, HeapDumpPath, nullptr, MANAGEABLE, \
"When HeapDumpOnOutOfMemoryError is on, the path (filename or " \
"directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \
@ -598,7 +598,7 @@ const int ObjectAlignmentInBytes = 8;
product(bool, PrintAssembly, false, DIAGNOSTIC, \
"Print assembly code (using external disassembler.so)") \
\
product(ccstr, PrintAssemblyOptions, NULL, DIAGNOSTIC, \
product(ccstr, PrintAssemblyOptions, nullptr, DIAGNOSTIC, \
"Print options string passed to disassembler.so") \
\
notproduct(bool, PrintNMethodStatistics, false, \
@ -626,7 +626,7 @@ const int ObjectAlignmentInBytes = 8;
"Exercise compiled exception handlers") \
\
develop(bool, InterceptOSException, false, \
"Start debugger when an implicit OS (e.g. NULL) " \
"Start debugger when an implicit OS (e.g. nullptr) " \
"exception happens") \
\
product(bool, PrintCodeCache, false, \
@ -832,7 +832,7 @@ const int ObjectAlignmentInBytes = 8;
develop(bool, StressRewriter, false, \
"Stress linktime bytecode rewriting") \
\
product(ccstr, TraceJVMTI, NULL, \
product(ccstr, TraceJVMTI, nullptr, \
"Trace flags for JVMTI functions and events") \
\
product(bool, StressLdcRewrite, false, DIAGNOSTIC, \
@ -1032,11 +1032,11 @@ const int ObjectAlignmentInBytes = 8;
product(bool, LogVMOutput, false, DIAGNOSTIC, \
"Save VM output to LogFile") \
\
product(ccstr, LogFile, NULL, DIAGNOSTIC, \
product(ccstr, LogFile, nullptr, DIAGNOSTIC, \
"If LogVMOutput or LogCompilation is on, save VM output to " \
"this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
\
product(ccstr, ErrorFile, NULL, \
product(ccstr, ErrorFile, nullptr, \
"If an error occurs, save the error data to this file " \
"[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
\
@ -1073,11 +1073,11 @@ const int ObjectAlignmentInBytes = 8;
notproduct(bool, PrintSymbolTableSizeHistogram, false, \
"print histogram of the symbol table") \
\
product(ccstr, AbortVMOnException, NULL, DIAGNOSTIC, \
product(ccstr, AbortVMOnException, nullptr, DIAGNOSTIC, \
"Call fatal if this exception is thrown. Example: " \
"java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
\
product(ccstr, AbortVMOnExceptionMessage, NULL, DIAGNOSTIC, \
product(ccstr, AbortVMOnExceptionMessage, nullptr, DIAGNOSTIC, \
"Call fatal if the exception pointed by AbortVMOnException " \
"has this message") \
\
@ -1741,7 +1741,7 @@ const int ObjectAlignmentInBytes = 8;
product(bool, PerfDataSaveToFile, false, \
"Save PerfData memory to hsperfdata_<pid> file on exit") \
\
product(ccstr, PerfDataSaveFile, NULL, \
product(ccstr, PerfDataSaveFile, nullptr, \
"Save PerfData memory to the specified absolute pathname. " \
"The string %p in the file name (if present) " \
"will be replaced by pid") \
@ -1821,7 +1821,7 @@ const int ObjectAlignmentInBytes = 8;
"Causes the VM to pause at startup time and wait for the pause " \
"file to be removed (default: ./vm.paused.<pid>)") \
\
product(ccstr, PauseAtStartupFile, NULL, DIAGNOSTIC, \
product(ccstr, PauseAtStartupFile, nullptr, DIAGNOSTIC, \
"The file to create and for whose removal to await when pausing " \
"at startup. (default: ./vm.paused.<pid>)") \
\
@ -1928,7 +1928,7 @@ const int ObjectAlignmentInBytes = 8;
range(0, max_intx) \
constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \
\
product(ccstr, AllocateHeapAt, NULL, \
product(ccstr, AllocateHeapAt, nullptr, \
"Path to the directory where a temporary file will be created " \
"to use as the backing store for Java Heap.") \
\
@ -1963,10 +1963,10 @@ const int ObjectAlignmentInBytes = 8;
JFR_ONLY(product(bool, FlightRecorder, false, \
"(Deprecated) Enable Flight Recorder")) \
\
JFR_ONLY(product(ccstr, FlightRecorderOptions, NULL, \
JFR_ONLY(product(ccstr, FlightRecorderOptions, nullptr, \
"Flight Recorder options")) \
\
JFR_ONLY(product(ccstr, StartFlightRecording, NULL, \
JFR_ONLY(product(ccstr, StartFlightRecording, nullptr, \
"Start flight recording with options")) \
\
product(bool, UseFastUnorderedTimeStamps, false, EXPERIMENTAL, \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ oop* HandleArea::allocate_handle(oop obj) {
oop* HandleArea::allocate_null_handle() {
assert_handle_mark_nesting();
return real_allocate_handle(NULL);
return real_allocate_handle(nullptr);
}
#endif
@ -53,9 +53,9 @@ oop* HandleArea::allocate_null_handle() {
#define DEF_METADATA_HANDLE_FN_NOINLINE(name, type) \
name##Handle::name##Handle(const name##Handle &h) { \
_value = h._value; \
if (_value != NULL) { \
if (_value != nullptr) { \
assert(_value->is_valid(), "obj is valid"); \
if (h._thread != NULL) { \
if (h._thread != nullptr) { \
assert(h._thread == Thread::current(), "thread must be current");\
_thread = h._thread; \
} else { \
@ -64,15 +64,15 @@ name##Handle::name##Handle(const name##Handle &h) { \
assert(_thread->is_in_live_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
_thread = nullptr; \
} \
} \
name##Handle& name##Handle::operator=(const name##Handle &s) { \
remove(); \
_value = s._value; \
if (_value != NULL) { \
if (_value != nullptr) { \
assert(_value->is_valid(), "obj is valid"); \
if (s._thread != NULL) { \
if (s._thread != nullptr) { \
assert(s._thread == Thread::current(), "thread must be current");\
_thread = s._thread; \
} else { \
@ -81,12 +81,12 @@ name##Handle& name##Handle::operator=(const name##Handle &s) { \
assert(_thread->is_in_live_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
_thread = nullptr; \
} \
return *this; \
} \
inline void name##Handle::remove() { \
if (_value != NULL) { \
if (_value != nullptr) { \
int i = _thread->metadata_handles()->find_from_end((Metadata*)_value); \
assert(i!=-1, "not in metadata_handles list"); \
_thread->metadata_handles()->remove_at(i); \
@ -122,7 +122,7 @@ void HandleArea::oops_do(OopClosure* f) {
k = k->next();
}
if (_prev != NULL) _prev->oops_do(f);
if (_prev != nullptr) _prev->oops_do(f);
}
void HandleMark::initialize(Thread* thread) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,12 +67,12 @@ class Handle {
oop* _handle;
protected:
oop obj() const { return _handle == NULL ? (oop)NULL : *_handle; }
oop non_null_obj() const { assert(_handle != NULL, "resolving NULL handle"); return *_handle; }
oop obj() const { return _handle == nullptr ? (oop)nullptr : *_handle; }
oop non_null_obj() const { assert(_handle != nullptr, "resolving nullptr handle"); return *_handle; }
public:
// Constructors
Handle() { _handle = NULL; }
Handle() { _handle = nullptr; }
inline Handle(Thread* thread, oop obj);
// General access
@ -85,8 +85,8 @@ class Handle {
bool operator != (const Handle& h) const { return obj() != h.obj(); }
// Null checks
bool is_null() const { return _handle == NULL; }
bool not_null() const { return _handle != NULL; }
bool is_null() const { return _handle == nullptr; }
bool not_null() const { return _handle != nullptr; }
// Debugging
void print() { obj()->print(); }
@ -99,7 +99,7 @@ class Handle {
// Raw handle access. Allows easy duplication of Handles. This can be very unsafe
// since duplicates is only valid as long as original handle is alive.
oop* raw_value() const { return _handle; }
static oop raw_resolve(oop *handle) { return handle == NULL ? (oop)NULL : *handle; }
static oop raw_resolve(oop *handle) { return handle == nullptr ? (oop)nullptr : *handle; }
inline void replace(oop obj);
};
@ -144,11 +144,11 @@ DEF_HANDLE(typeArray , is_typeArray_noinline )
Thread* _thread; \
protected: \
type* obj() const { return _value; } \
type* non_null_obj() const { assert(_value != NULL, "resolving NULL _value"); return _value; } \
type* non_null_obj() const { assert(_value != nullptr, "resolving nullptr _value"); return _value; } \
\
public: \
/* Constructors */ \
name##Handle () : _value(NULL), _thread(NULL) {} \
name##Handle () : _value(nullptr), _thread(nullptr) {} \
name##Handle (Thread* thread, type* obj); \
\
name##Handle (const name##Handle &h); \
@ -166,8 +166,8 @@ DEF_HANDLE(typeArray , is_typeArray_noinline )
bool operator == (const name##Handle& h) const { return obj() == h.obj(); } \
\
/* Null checks */ \
bool is_null() const { return _value == NULL; } \
bool not_null() const { return _value != NULL; } \
bool is_null() const { return _value == nullptr; } \
bool not_null() const { return _value != nullptr; } \
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,8 +36,8 @@
inline Handle::Handle(Thread* thread, oop obj) {
assert(thread == Thread::current(), "sanity check");
if (obj == NULL) {
_handle = NULL;
if (obj == nullptr) {
_handle = nullptr;
} else {
_handle = thread->handle_area()->allocate_handle(obj);
}
@ -47,7 +47,7 @@ inline void Handle::replace(oop obj) {
// Unlike in OopHandle::replace, we shouldn't use a barrier here.
// OopHandle has its storage in OopStorage, which is walked concurrently and uses barriers.
// Handle is thread private, and iterated by Thread::oops_do, which is why it shouldn't have any barriers at all.
assert(_handle != NULL, "should not use replace");
assert(_handle != nullptr, "should not use replace");
*_handle = obj;
}
@ -65,7 +65,7 @@ DEF_HANDLE_CONSTR(typeArray, is_typeArray_noinline)
// Constructor for metadata handles
#define DEF_METADATA_HANDLE_FN(name, type) \
inline name##Handle::name##Handle(Thread* thread, type* obj) : _value(obj), _thread(thread) { \
if (obj != NULL) { \
if (obj != nullptr) { \
assert(((Metadata*)obj)->is_valid(), "obj is valid"); \
assert(_thread == Thread::current(), "thread must be current"); \
assert(_thread->is_in_live_stack((address)this), "not on stack?"); \
@ -85,7 +85,7 @@ inline void HandleMark::push() {
inline void HandleMark::pop_and_restore() {
// Delete later chunks
if(_chunk->next() != NULL) {
if(_chunk->next() != nullptr) {
assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check");
chop_later_chunks();
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,7 +90,7 @@ class AsyncHandshakeOperation : public HandshakeOperation {
jlong _start_time_ns;
public:
AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns)
: HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {}
: HandshakeOperation(cl, target, nullptr), _start_time_ns(start_ns) {}
virtual ~AsyncHandshakeOperation() { delete _handshake_cl; }
jlong start_time() const { return _start_time_ns; }
};
@ -188,7 +188,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT,
op->name(), p2i(op), op->pending_threads());
if (target == NULL) {
if (target == nullptr) {
for ( ; JavaThread* thr = jtiwh.next(); ) {
if (thr->handshake_state()->operation_pending(op)) {
log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op));
@ -200,7 +200,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op));
}
if (target != NULL) {
if (target != nullptr) {
if (os::signal_thread(target, SIGILL, "cannot be handshaked")) {
// Give target a chance to report the error and terminate the VM.
os::naked_sleep(3000);
@ -211,7 +211,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) {
fatal("Handshake timeout");
}
static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) {
static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = nullptr) {
// Check if handshake operation has timed out
jlong timeout_ns = millis_to_nanos(HandshakeTimeout);
if (timeout_ns > 0) {
@ -221,15 +221,15 @@ static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, Ja
}
}
static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) {
static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = nullptr) {
if (log_is_enabled(Info, handshake)) {
jlong completion_time = os::javaTimeNanos() - start_time_ns;
log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s",
name, targets,
emitted_handshakes_executed,
completion_time,
extra != NULL ? ", " : "",
extra != NULL ? extra : "");
extra != nullptr ? ", " : "",
extra != nullptr ? extra : "");
}
}
@ -245,7 +245,7 @@ class VM_HandshakeAllThreads: public VM_Operation {
JavaThreadIteratorWithHandle jtiwh;
int number_of_threads_issued = 0;
for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
for (JavaThread* thr = jtiwh.next(); thr != nullptr; thr = jtiwh.next()) {
thr->handshake_state()->add_operation(_op);
number_of_threads_issued++;
}
@ -273,7 +273,7 @@ class VM_HandshakeAllThreads: public VM_Operation {
// Observing a blocked state may of course be transient but the processing is guarded
// by mutexes and we optimistically begin by working on the blocked threads
jtiwh.rewind();
for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
for (JavaThread* thr = jtiwh.next(); thr != nullptr; thr = jtiwh.next()) {
// A new thread on the ThreadsList will not have an operation,
// hence it is skipped in handshake_try_process.
HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op);
@ -306,7 +306,7 @@ void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_t
// Only when the target is not executing the handshake itself.
StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc);
}
if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) {
if (_requester != nullptr && _requester != executing_thread && _requester->is_Java_thread()) {
// The handshake closure may contain oop Handles from the _requester.
// We must make sure we can use them.
StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc);
@ -343,7 +343,7 @@ void HandshakeOperation::do_handshake(JavaThread* thread) {
}
void Handshake::execute(HandshakeClosure* hs_cl) {
HandshakeOperation cto(hs_cl, NULL, Thread::current());
HandshakeOperation cto(hs_cl, nullptr, Thread::current());
VM_HandshakeAllThreads handshake(&cto);
VMThread::execute(&handshake);
}
@ -496,13 +496,13 @@ HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend, bool che
bool HandshakeState::has_operation(bool allow_suspend, bool check_async_exception) {
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
return get_op_for_self(allow_suspend, check_async_exception) != NULL;
return get_op_for_self(allow_suspend, check_async_exception) != nullptr;
}
bool HandshakeState::has_async_exception_operation() {
if (!has_operation()) return false;
MutexLocker ml(_lock.owned_by_self() ? NULL : &_lock, Mutex::_no_safepoint_check_flag);
return _queue.peek(async_exception_filter) != NULL;
MutexLocker ml(_lock.owned_by_self() ? nullptr : &_lock, Mutex::_no_safepoint_check_flag);
return _queue.peek(async_exception_filter) != nullptr;
}
void HandshakeState::clean_async_exception_operation() {
@ -548,8 +548,8 @@ bool HandshakeState::process_by_self(bool allow_suspend, bool check_async_except
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
HandshakeOperation* op = get_op_for_self(allow_suspend, check_async_exception);
if (op != NULL) {
assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread");
if (op != nullptr) {
assert(op->_target == nullptr || op->_target == Thread::current(), "Wrong thread");
bool async = op->is_async();
log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self",
async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee));
@ -646,9 +646,9 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
HandshakeOperation* op = get_op();
assert(op != NULL, "Must have an op");
assert(op != nullptr, "Must have an op");
assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be");
assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread");
assert(op->_target == nullptr || _handshakee == op->_target, "Wrong thread");
log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op),
op == match_op ? "handshaker" : "cooperative",
@ -658,7 +658,7 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
set_active_handshaker(current_thread);
op->do_handshake(_handshakee); // acquire, op removed after
set_active_handshaker(NULL);
set_active_handshaker(nullptr);
remove_op(op);
_lock.unlock();
@ -703,7 +703,7 @@ class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
};
bool HandshakeState::suspend_with_handshake() {
assert(_handshakee->threadObj() != NULL, "cannot suspend with a NULL threadObj");
assert(_handshakee->threadObj() != nullptr, "cannot suspend with a null threadObj");
if (_handshakee->is_exiting()) {
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,14 +29,14 @@
#include "utilities/align.hpp"
// The flush stub function address
AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = NULL;
AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = nullptr;
void AbstractICache::initialize() {
// Making this stub must be FIRST use of assembler
ResourceMark rm;
BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size);
if (b == NULL) {
if (b == nullptr) {
vm_exit_out_of_memory(ICache::stub_size, OOM_MALLOC_ERROR, "CodeCache: no space for flush_icache_stub");
}
CodeBuffer c(b);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ void InterfaceSupport::gc_alot() {
if (!thread->is_Java_thread()) return; // Avoid concurrent calls
// Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC.
JavaThread *current_thread = JavaThread::cast(thread);
if (current_thread->active_handles() == NULL) return;
if (current_thread->active_handles() == nullptr) return;
// Short-circuit any possible re-entrant gc-a-lot attempt
if (thread->skip_gcalot()) return;
@ -220,7 +220,7 @@ void InterfaceSupport::verify_stack() {
// In case of exceptions we might not have a runtime_stub on
// top of stack, hence, all callee-saved registers are not going
// to be setup correctly, hence, we cannot do stack verify
if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
if (cb != nullptr && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;
for (; !sfs.is_done(); sfs.next()) {
sfs.current()->verify(sfs.register_map());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -80,7 +80,7 @@ class ThreadStateTransition : public StackObj {
public:
ThreadStateTransition(JavaThread *thread) : _thread(thread) {
assert(thread != NULL, "must be active Java thread");
assert(thread != nullptr, "must be active Java thread");
assert(thread == Thread::current(), "must be current thread");
}
@ -145,7 +145,7 @@ class ThreadInVMfromJava : public ThreadStateTransition {
class ThreadInVMfromUnknown {
JavaThread* _thread;
public:
ThreadInVMfromUnknown() : _thread(NULL) {
ThreadInVMfromUnknown() : _thread(nullptr) {
Thread* t = Thread::current();
if (t->is_Java_thread()) {
JavaThread* t2 = JavaThread::cast(t);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,7 +107,7 @@ int compare_methods(Method** a, Method** b) {
void collect_profiled_methods(Method* m) {
Thread* thread = Thread::current();
methodHandle mh(thread, m);
if ((m->method_data() != NULL) &&
if ((m->method_data() != nullptr) &&
(PrintMethodData || CompilerOracle::should_print(mh))) {
collected_profiled_methods->push(m);
}
@ -132,7 +132,7 @@ void print_method_profiling_data() {
tty->print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes());
tty->cr();
// Dump data on parameters if any
if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) {
if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) {
tty->fill_to(2);
m->method_data()->parameters_type_data()->print_data_on(tty);
}
@ -295,7 +295,7 @@ void print_statistics() {
// CodeHeap State Analytics.
if (PrintCodeHeapAnalytics) {
CompileBroker::print_heapinfo(NULL, "all", 4096); // details
CompileBroker::print_heapinfo(nullptr, "all", 4096); // details
}
if (PrintCodeCache2) {
@ -360,7 +360,7 @@ void print_statistics() {
// CodeHeap State Analytics.
if (PrintCodeHeapAnalytics) {
CompileBroker::print_heapinfo(NULL, "all", 4096); // details
CompileBroker::print_heapinfo(nullptr, "all", 4096); // details
}
#ifdef COMPILER2
@ -491,7 +491,7 @@ void before_exit(JavaThread* thread, bool halt) {
#if INCLUDE_CDS
if (DynamicArchive::should_dump_at_vm_exit()) {
assert(ArchiveClassesAtExit != NULL, "Must be already set");
assert(ArchiveClassesAtExit != nullptr, "Must be already set");
ExceptionMark em(thread);
DynamicArchive::dump(ArchiveClassesAtExit, thread);
if (thread->has_pending_exception()) {
@ -527,8 +527,8 @@ void before_exit(JavaThread* thread, bool halt) {
void vm_exit(int code) {
Thread* thread =
ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : NULL;
if (thread == NULL) {
ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr;
if (thread == nullptr) {
// very early initialization failure -- just exit
vm_direct_exit(code);
}
@ -538,7 +538,7 @@ void vm_exit(int code) {
// XML termination logging safe is tied to the termination of the
// VMThread, and it doesn't terminate on this exit path. See 8222534.
if (VMThread::vm_thread() != NULL) {
if (VMThread::vm_thread() != nullptr) {
if (thread->is_Java_thread()) {
// We must be "in_vm" for the code below to work correctly.
// Historically there must have been some exit path for which
@ -589,7 +589,7 @@ void vm_direct_exit(int code, const char* message) {
void vm_perform_shutdown_actions() {
if (is_init_completed()) {
Thread* thread = Thread::current_or_null();
if (thread != NULL && thread->is_Java_thread()) {
if (thread != nullptr && thread->is_Java_thread()) {
// We are leaving the VM, set state to native (in case any OS exit
// handlers call back to the VM)
JavaThread* jt = JavaThread::cast(thread);
@ -622,10 +622,10 @@ void vm_abort(bool dump_core) {
}
void vm_notify_during_cds_dumping(const char* error, const char* message) {
if (error != NULL) {
if (error != nullptr) {
tty->print_cr("Error occurred during CDS dumping");
tty->print("%s", error);
if (message != NULL) {
if (message != nullptr) {
tty->print_cr(": %s", message);
}
else {
@ -642,10 +642,10 @@ void vm_exit_during_cds_dumping(const char* error, const char* message) {
}
void vm_notify_during_shutdown(const char* error, const char* message) {
if (error != NULL) {
if (error != nullptr) {
tty->print_cr("Error occurred during initialization of VM");
tty->print("%s", error);
if (message != NULL) {
if (message != nullptr) {
tty->print_cr(": %s", message);
}
else {
@ -658,7 +658,7 @@ void vm_notify_during_shutdown(const char* error, const char* message) {
}
void vm_exit_during_initialization() {
vm_notify_during_shutdown(NULL, NULL);
vm_notify_during_shutdown(nullptr, nullptr);
// Failure during initialization, we don't want to dump core
vm_abort(false);
@ -669,13 +669,13 @@ void vm_exit_during_initialization(Handle exception) {
// If there are exceptions on this thread it must be cleared
// first and here. Any future calls to EXCEPTION_MARK requires
// that no pending exceptions exist.
JavaThread* THREAD = JavaThread::current(); // can't be NULL
JavaThread* THREAD = JavaThread::current(); // can't be null
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
}
java_lang_Throwable::print_stack_trace(exception, tty);
tty->cr();
vm_notify_during_shutdown(NULL, NULL);
vm_notify_during_shutdown(nullptr, nullptr);
// Failure during initialization, we don't want to dump core
vm_abort(false);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,10 +54,10 @@ extern void notify_vm_shutdown();
extern void vm_exit_during_initialization();
extern void vm_exit_during_initialization(Handle exception);
extern void vm_exit_during_initialization(Symbol* exception_name, const char* message);
extern void vm_exit_during_initialization(const char* error, const char* message = NULL);
extern void vm_shutdown_during_initialization(const char* error, const char* message = NULL);
extern void vm_exit_during_initialization(const char* error, const char* message = nullptr);
extern void vm_shutdown_during_initialization(const char* error, const char* message = nullptr);
extern void vm_exit_during_cds_dumping(const char* error, const char* message = NULL);
extern void vm_exit_during_cds_dumping(const char* error, const char* message = nullptr);
/**
* With the integration of the changes to handle the version string

View File

@ -85,7 +85,7 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei
_handles = _thread->active_handles(); // save previous handle block & Java frame linkage
// For the profiler, the last_Java_frame information in thread must always be in
// legal state. We have no last Java frame if last_Java_sp == NULL so
// legal state. We have no last Java frame if last_Java_sp == nullptr so
// the valid transition is to clear _last_Java_sp and then reset the rest of
// the (platform specific) state.
@ -174,7 +174,7 @@ static BasicType runtime_type_from(JavaValue* result) {
void JavaCalls::call_virtual(JavaValue* result, Klass* spec_klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) {
CallInfo callinfo;
Handle receiver = args->receiver();
Klass* recvrKlass = receiver.is_null() ? (Klass*)NULL : receiver->klass();
Klass* recvrKlass = receiver.is_null() ? (Klass*)nullptr : receiver->klass();
LinkInfo link_info(spec_klass, name, signature);
LinkResolver::resolve_virtual_call(
callinfo, receiver, recvrKlass, link_info, true, CHECK);
@ -405,8 +405,8 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC
// Must extract verified entry point from HotSpotNmethod after VM to Java
// transition in JavaCallWrapper constructor so that it is safe with
// respect to nmethod sweeping.
address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(NULL, alternative_target());
if (verified_entry_point != NULL) {
address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(nullptr, alternative_target());
if (verified_entry_point != nullptr) {
thread->set_jvmci_alternate_call_target(verified_entry_point);
entry_point = method->adapter()->get_i2c_entry();
}
@ -439,7 +439,7 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC
// Restore possible oop return
if (oop_result_flag) {
result->set_oop(thread->vm_result());
thread->set_vm_result(NULL);
thread->set_vm_result(nullptr);
}
}
@ -471,7 +471,7 @@ inline oop resolve_indirect_oop(intptr_t value, uint state) {
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ class JavaCallWrapper: StackObj {
Method* callee_method() { return _callee_method; }
void oops_do(OopClosure* f);
bool is_first_frame() const { return _anchor.last_Java_sp() == NULL; }
bool is_first_frame() const { return _anchor.last_Java_sp() == nullptr; }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,7 @@ friend class UpcallLinker;
private:
//
// Whenever _last_Java_sp != NULL other anchor fields MUST be valid!
// Whenever _last_Java_sp != nullptr other anchor fields MUST be valid!
// The stack may not be walkable [check with walkable() ] but the values must be valid.
// The profiler apparently depends on this.
//
@ -72,14 +72,14 @@ friend class UpcallLinker;
volatile address _last_Java_pc;
// tells whether the last Java frame is set
// It is important that when last_Java_sp != NULL that the rest of the frame
// It is important that when last_Java_sp != nullptr that the rest of the frame
// anchor (including platform specific) all be valid.
bool has_last_Java_frame() const { return _last_Java_sp != NULL; }
// This is very dangerous unless sp == NULL
bool has_last_Java_frame() const { return _last_Java_sp != nullptr; }
// This is very dangerous unless sp == nullptr
// Invalidate the anchor so that has_last_frame is false
// and no one should look at the other fields.
void zap(void) { _last_Java_sp = NULL; }
void zap(void) { _last_Java_sp = nullptr; }
#include CPU_HEADER(javaFrameAnchor)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -146,19 +146,19 @@ void JavaThread::smr_delete() {
}
// Initialized by VMThread at vm_global_init
OopStorage* JavaThread::_thread_oop_storage = NULL;
OopStorage* JavaThread::_thread_oop_storage = nullptr;
OopStorage* JavaThread::thread_oop_storage() {
assert(_thread_oop_storage != NULL, "not yet initialized");
assert(_thread_oop_storage != nullptr, "not yet initialized");
return _thread_oop_storage;
}
void JavaThread::set_threadOopHandles(oop p) {
assert(_thread_oop_storage != NULL, "not yet initialized");
assert(_thread_oop_storage != nullptr, "not yet initialized");
_threadObj = OopHandle(_thread_oop_storage, p);
_vthread = OopHandle(_thread_oop_storage, p);
_jvmti_vthread = OopHandle(_thread_oop_storage, NULL);
_scopedValueCache = OopHandle(_thread_oop_storage, NULL);
_jvmti_vthread = OopHandle(_thread_oop_storage, nullptr);
_scopedValueCache = OopHandle(_thread_oop_storage, nullptr);
}
oop JavaThread::threadObj() const {
@ -174,7 +174,7 @@ oop JavaThread::vthread() const {
}
void JavaThread::set_vthread(oop p) {
assert(_thread_oop_storage != NULL, "not yet initialized");
assert(_thread_oop_storage != nullptr, "not yet initialized");
_vthread.replace(p);
}
@ -183,7 +183,7 @@ oop JavaThread::jvmti_vthread() const {
}
void JavaThread::set_jvmti_vthread(oop p) {
assert(_thread_oop_storage != NULL, "not yet initialized");
assert(_thread_oop_storage != nullptr, "not yet initialized");
_jvmti_vthread.replace(p);
}
@ -192,19 +192,19 @@ oop JavaThread::scopedValueCache() const {
}
void JavaThread::set_scopedValueCache(oop p) {
if (_scopedValueCache.ptr_raw() != NULL) { // i.e. if the OopHandle has been allocated
if (_scopedValueCache.ptr_raw() != nullptr) { // i.e. if the OopHandle has been allocated
_scopedValueCache.replace(p);
} else {
assert(p == NULL, "not yet initialized");
assert(p == nullptr, "not yet initialized");
}
}
void JavaThread::clear_scopedValueBindings() {
set_scopedValueCache(NULL);
set_scopedValueCache(nullptr);
oop vthread_oop = vthread();
// vthread may be null here if we get a VM error during startup,
// before the java.lang.Thread instance has been created.
if (vthread_oop != NULL) {
if (vthread_oop != nullptr) {
java_lang_Thread::clear_scopedValueBindings(vthread_oop);
}
}
@ -212,7 +212,7 @@ void JavaThread::clear_scopedValueBindings() {
void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name,
bool daemon, TRAPS) {
assert(thread_group.not_null(), "thread group should be specified");
assert(threadObj() == NULL, "should only create Java thread object once");
assert(threadObj() == nullptr, "should only create Java thread object once");
InstanceKlass* ik = vmClasses::Thread_klass();
assert(ik->is_initialized(), "must be");
@ -225,7 +225,7 @@ void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name
set_threadOopHandles(thread_oop());
JavaValue result(T_VOID);
if (thread_name != NULL) {
if (thread_name != nullptr) {
Handle name = java_lang_String::create_from_str(thread_name, CHECK);
// Thread gets assigned specified name and null target
JavaCalls::call_special(&result,
@ -282,10 +282,10 @@ void JavaThread::collect_counters(jlong* array, int length) {
// Attempt to enlarge the array for per thread counters.
jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) {
jlong* new_counters = NEW_C_HEAP_ARRAY_RETURN_NULL(jlong, new_size, mtJVMCI);
if (new_counters == NULL) {
return NULL;
if (new_counters == nullptr) {
return nullptr;
}
if (old_counters == NULL) {
if (old_counters == nullptr) {
old_counters = new_counters;
memset(old_counters, 0, sizeof(jlong) * new_size);
} else {
@ -303,7 +303,7 @@ jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size
// Attempt to enlarge the array for per thread counters.
bool JavaThread::resize_counters(int current_size, int new_size) {
jlong* new_counters = resize_counters_array(_jvmci_counters, current_size, new_size);
if (new_counters == NULL) {
if (new_counters == nullptr) {
return false;
} else {
_jvmci_counters = new_counters;
@ -323,7 +323,7 @@ class VM_JVMCIResizeCounters : public VM_Operation {
void doit() {
// Resize the old thread counters array
jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size);
if (new_counters == NULL) {
if (new_counters == nullptr) {
_failed = true;
return;
} else {
@ -409,11 +409,11 @@ JavaThread::JavaThread() :
_vm_result(nullptr),
_vm_result_2(nullptr),
_current_pending_monitor(NULL),
_current_pending_monitor(nullptr),
_current_pending_monitor_is_from_java(true),
_current_waiting_monitor(NULL),
_active_handles(NULL),
_free_handle_block(NULL),
_current_waiting_monitor(nullptr),
_active_handles(nullptr),
_free_handle_block(nullptr),
_Stalled(0),
_monitor_chunks(nullptr),
@ -541,9 +541,9 @@ void JavaThread::interrupt() {
bool JavaThread::is_interrupted(bool clear_interrupted) {
debug_only(check_for_dangling_thread_pointer(this);)
if (_threadObj.peek() == NULL) {
if (_threadObj.peek() == nullptr) {
// If there is no j.l.Thread then it is impossible to have
// been interrupted. We can find NULL during VM initialization
// been interrupted. We can find null during VM initialization
// or when a JNI thread is still in the process of attaching.
// In such cases this must be the current thread.
assert(this == Thread::current(), "invariant");
@ -598,7 +598,7 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : JavaThread
thr_type = entry_point == &CompilerThread::thread_entry ? os::compiler_thread :
os::java_thread;
os::create_thread(this, thr_type, stack_sz);
// The _osthread may be NULL here because we ran out of memory (too many threads active).
// The _osthread may be null here because we ran out of memory (too many threads active).
// We need to throw and OutOfMemoryError - however we cannot do this here because the caller
// may hold a lock and all locks must be unlocked before throwing the exception (throwing
// the exception consists of creating the exception object & initializing it, initialization
@ -617,30 +617,30 @@ JavaThread::~JavaThread() {
// Return the sleep event to the free list
ParkEvent::Release(_SleepEvent);
_SleepEvent = NULL;
_SleepEvent = nullptr;
// Free any remaining previous UnrollBlock
vframeArray* old_array = vframe_array_last();
if (old_array != NULL) {
if (old_array != nullptr) {
Deoptimization::UnrollBlock* old_info = old_array->unroll_block();
old_array->set_unroll_block(NULL);
old_array->set_unroll_block(nullptr);
delete old_info;
delete old_array;
}
JvmtiDeferredUpdates* updates = deferred_updates();
if (updates != NULL) {
if (updates != nullptr) {
// This can only happen if thread is destroyed before deoptimization occurs.
assert(updates->count() > 0, "Updates holder not deleted");
// free deferred updates.
delete updates;
set_deferred_updates(NULL);
set_deferred_updates(nullptr);
}
// All Java related clean up happens in exit
ThreadSafepointState::destroy(this);
if (_thread_stat != NULL) delete _thread_stat;
if (_thread_stat != nullptr) delete _thread_stat;
#if INCLUDE_JVMCI
if (JVMCICounterSize > 0) {
@ -697,7 +697,7 @@ void JavaThread::run() {
void JavaThread::thread_main_inner() {
assert(JavaThread::current() == this, "sanity check");
assert(_threadObj.peek() != NULL, "just checking");
assert(_threadObj.peek() != nullptr, "just checking");
// Execute thread entry point unless this thread has a pending exception.
// Note: Due to JVMTI StopThread we can have pending exceptions already!
@ -733,14 +733,14 @@ static void ensure_join(JavaThread* thread) {
java_lang_Thread::set_thread_status(threadObj(), JavaThreadStatus::TERMINATED);
// Clear the native thread instance - this makes isAlive return false and allows the join()
// to complete once we've done the notify_all below
java_lang_Thread::set_thread(threadObj(), NULL);
java_lang_Thread::set_thread(threadObj(), nullptr);
lock.notify_all(thread);
// Ignore pending exception, since we are exiting anyway
thread->clear_pending_exception();
}
static bool is_daemon(oop threadObj) {
return (threadObj != NULL && java_lang_Thread::is_daemon(threadObj));
return (threadObj != nullptr && java_lang_Thread::is_daemon(threadObj));
}
// For any new cleanup additions, please check to see if they need to be applied to
@ -878,15 +878,15 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// is in a consistent state, in case GC happens
JFR_ONLY(Jfr::on_thread_exit(this);)
if (active_handles() != NULL) {
if (active_handles() != nullptr) {
JNIHandleBlock* block = active_handles();
set_active_handles(NULL);
set_active_handles(nullptr);
JNIHandleBlock::release_block(block);
}
if (free_handle_block() != NULL) {
if (free_handle_block() != nullptr) {
JNIHandleBlock* block = free_handle_block();
set_free_handle_block(NULL);
set_free_handle_block(nullptr);
JNIHandleBlock::release_block(block);
}
@ -903,7 +903,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
// We need to cache the thread name for logging purposes below as once
// we have called on_thread_detach this thread must not access any oops.
char* thread_name = NULL;
char* thread_name = nullptr;
if (log_is_enabled(Debug, os, thread, timer)) {
ResourceMark rm(this);
thread_name = os::strdup(name());
@ -949,15 +949,15 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
}
void JavaThread::cleanup_failed_attach_current_thread(bool is_daemon) {
if (active_handles() != NULL) {
if (active_handles() != nullptr) {
JNIHandleBlock* block = active_handles();
set_active_handles(NULL);
set_active_handles(nullptr);
JNIHandleBlock::release_block(block);
}
if (free_handle_block() != NULL) {
if (free_handle_block() != nullptr) {
JNIHandleBlock* block = free_handle_block();
set_free_handle_block(NULL);
set_free_handle_block(nullptr);
JNIHandleBlock::release_block(block);
}
@ -979,7 +979,7 @@ JavaThread* JavaThread::active() {
} else {
assert(thread->is_VM_thread(), "this must be a vm thread");
VM_Operation* op = ((VMThread*) thread)->vm_operation();
JavaThread *ret = op == NULL ? NULL : JavaThread::cast(op->calling_thread());
JavaThread *ret = op == nullptr ? nullptr : JavaThread::cast(op->calling_thread());
return ret;
}
}
@ -987,7 +987,7 @@ JavaThread* JavaThread::active() {
bool JavaThread::is_lock_owned(address adr) const {
if (Thread::is_lock_owned(adr)) return true;
for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) {
if (chunk->contains(adr)) return true;
}
@ -1008,7 +1008,7 @@ void JavaThread::add_monitor_chunk(MonitorChunk* chunk) {
}
void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) {
guarantee(monitor_chunks() != NULL, "must be non empty");
guarantee(monitor_chunks() != nullptr, "must be non empty");
if (monitor_chunks() == chunk) {
set_monitor_chunks(chunk->next());
} else {
@ -1030,7 +1030,7 @@ void JavaThread::handle_special_runtime_exit_condition() {
// Asynchronous exceptions support
//
void JavaThread::handle_async_exception(oop java_throwable) {
assert(java_throwable != NULL, "should have an _async_exception to throw");
assert(java_throwable != nullptr, "should have an _async_exception to throw");
assert(!is_at_poll_safepoint(), "should have never called this method");
if (has_last_Java_frame()) {
@ -1242,7 +1242,7 @@ void JavaThread::deoptimize() {
jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci());
size_t len = strlen(buffer);
const char * found = strstr(DeoptimizeOnlyAt, buffer);
while (found != NULL) {
while (found != nullptr) {
if ((found[len] == ',' || found[len] == '\n' || found[len] == '\0') &&
(found == DeoptimizeOnlyAt || found[-1] == ',' || found[-1] == '\n')) {
// Check that the bci found is bracketed by terminators.
@ -1310,7 +1310,7 @@ void JavaThread::push_jni_handle_block() {
// Inlined code from jni_PushLocalFrame()
JNIHandleBlock* old_handles = active_handles();
JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(this);
assert(old_handles != NULL && new_handles != NULL, "should not be NULL");
assert(old_handles != nullptr && new_handles != nullptr, "should not be null");
new_handles->set_pop_frame_link(old_handles); // make sure java handles get gc'd.
set_active_handles(new_handles);
}
@ -1322,7 +1322,7 @@ void JavaThread::pop_jni_handle_block() {
JNIHandleBlock* new_handles = old_handles->pop_frame_link();
assert(new_handles != nullptr, "should never set active handles to null");
set_active_handles(new_handles);
old_handles->set_pop_frame_link(NULL);
old_handles->set_pop_frame_link(nullptr);
JNIHandleBlock::release_block(old_handles, this);
}
@ -1333,7 +1333,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
// Traverse the GCHandles
Thread::oops_do_no_frames(f, cf);
if (active_handles() != NULL) {
if (active_handles() != nullptr) {
active_handles()->oops_do(f);
}
@ -1341,16 +1341,16 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
if (has_last_Java_frame()) {
// Traverse the monitor chunks
for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) {
for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) {
chunk->oops_do(f);
}
}
assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!");
assert(vframe_array_head() == nullptr, "deopt in progress at a safepoint!");
// If we have deferred set_locals there might be oops waiting to be
// written
GrowableArray<jvmtiDeferredLocalVariableSet*>* list = JvmtiDeferredUpdates::deferred_locals(this);
if (list != NULL) {
if (list != nullptr) {
for (int i = 0; i < list->length(); i++) {
list->at(i)->oops_do(f);
}
@ -1364,7 +1364,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
f->do_oop((oop*) &_jvmci_reserved_oop0);
#endif
if (jvmti_thread_state() != NULL) {
if (jvmti_thread_state() != nullptr) {
jvmti_thread_state()->oops_do(f, cf);
}
@ -1385,7 +1385,7 @@ void JavaThread::oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {
return;
}
// Finish any pending lazy GC activity for the frames
StackWatermarkSet::finish_processing(this, NULL /* context */, StackWatermarkKind::gc);
StackWatermarkSet::finish_processing(this, nullptr /* context */, StackWatermarkKind::gc);
// Traverse the execution stack
for (StackFrameStream fst(this, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) {
fst.current()->oops_do(f, cf, fst.register_map());
@ -1410,7 +1410,7 @@ void JavaThread::nmethods_do(CodeBlobClosure* cf) {
}
}
if (jvmti_thread_state() != NULL) {
if (jvmti_thread_state() != nullptr) {
jvmti_thread_state()->nmethods_do(cf);
}
}
@ -1424,11 +1424,11 @@ void JavaThread::metadata_do(MetadataClosure* f) {
} else if (is_Compiler_thread()) {
// need to walk ciMetadata in current compile tasks to keep alive.
CompilerThread* ct = (CompilerThread*)this;
if (ct->env() != NULL) {
if (ct->env() != nullptr) {
ct->env()->metadata_do(f);
}
CompileTask* task = ct->task();
if (task != NULL) {
if (task != nullptr) {
task->metadata_do(f);
}
}
@ -1462,7 +1462,7 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const {
st->print_raw(name());
st->print_raw("\" ");
oop thread_oop = threadObj();
if (thread_oop != NULL) {
if (thread_oop != nullptr) {
st->print("#" INT64_FORMAT " [%ld] ", (int64_t)java_lang_Thread::thread_id(thread_oop), (long) osthread()->thread_id());
if (java_lang_Thread::is_daemon(thread_oop)) st->print("daemon ");
st->print("prio=%d ", java_lang_Thread::priority(thread_oop));
@ -1470,10 +1470,10 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const {
Thread::print_on(st, print_extended_info);
// print guess for valid stack memory region (assume 4K pages); helps lock debugging
st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12));
if (thread_oop != NULL) {
if (thread_oop != nullptr) {
if (is_vthread_mounted()) {
oop vt = vthread();
assert(vt != NULL, "");
assert(vt != nullptr, "");
st->print_cr(" Carrying virtual thread #" INT64_FORMAT, (int64_t)java_lang_Thread::thread_id(vt));
} else {
st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop));
@ -1484,9 +1484,9 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const {
#endif // PRODUCT
if (is_Compiler_thread()) {
CompileTask *task = ((CompilerThread*)this)->task();
if (task != NULL) {
if (task != nullptr) {
st->print(" Compiling: ");
task->print(st, NULL, true, false);
task->print(st, nullptr, true, false);
} else {
st->print(" No compile task");
}
@ -1544,19 +1544,19 @@ static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); }
void JavaThread::verify() {
// Verify oops in the thread.
oops_do(&VerifyOopClosure::verify_oop, NULL);
oops_do(&VerifyOopClosure::verify_oop, nullptr);
// Verify the stack frames.
frames_do(frame_verify);
}
// CR 6300358 (sub-CR 2137150)
// Most callers of this method assume that it can't return NULL but a
// Most callers of this method assume that it can't return null but a
// thread may not have a name whilst it is in the process of attaching to
// the VM - see CR 6412693, and there are places where a JavaThread can be
// seen prior to having its threadObj set (e.g., JNI attaching threads and
// if vm exit occurs during initialization). These cases can all be accounted
// for such that this method never returns NULL.
// for such that this method never returns null.
const char* JavaThread::name() const {
if (Thread::is_JavaThread_protected(/* target */ this)) {
// The target JavaThread is protected so get_thread_name_string() is safe:
@ -1567,7 +1567,7 @@ const char* JavaThread::name() const {
return Thread::name();
}
// Returns a non-NULL representation of this thread's name, or a suitable
// Returns a non-null representation of this thread's name, or a suitable
// descriptive string if there is no set name.
const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
const char* name_str;
@ -1579,10 +1579,10 @@ const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
// or if it is a JavaThread that can safely access oops.
#endif
oop thread_obj = threadObj();
if (thread_obj != NULL) {
if (thread_obj != nullptr) {
oop name = java_lang_Thread::name(thread_obj);
if (name != NULL) {
if (buf == NULL) {
if (name != nullptr) {
if (buf == nullptr) {
name_str = java_lang_String::as_utf8_string(name);
} else {
name_str = java_lang_String::as_utf8_string(name, buf, buflen);
@ -1608,16 +1608,16 @@ const char* JavaThread::get_thread_name_string(char* buf, int buflen) const {
}
}
#endif
assert(name_str != NULL, "unexpected NULL thread name");
assert(name_str != nullptr, "unexpected null thread name");
return name_str;
}
// Helper to extract the name from the thread oop for logging.
const char* JavaThread::name_for(oop thread_obj) {
assert(thread_obj != NULL, "precondition");
assert(thread_obj != nullptr, "precondition");
oop name = java_lang_Thread::name(thread_obj);
const char* name_str;
if (name != NULL) {
if (name != nullptr) {
name_str = java_lang_String::as_utf8_string(name);
} else {
name_str = "<un-named>";
@ -1668,10 +1668,10 @@ void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) {
oop JavaThread::current_park_blocker() {
// Support for JSR-166 locks
oop thread_oop = threadObj();
if (thread_oop != NULL) {
if (thread_oop != nullptr) {
return java_lang_Thread::park_blocker(thread_oop);
}
return NULL;
return nullptr;
}
// Print current stack trace for checked JNI warnings and JNI fatal errors.
@ -1707,7 +1707,7 @@ void JavaThread::print_stack_on(outputStream* st) {
RegisterMap::WalkContinuation::skip);
vframe* start_vf = platform_thread_last_java_vframe(&reg_map);
int count = 0;
for (vframe* f = start_vf; f != NULL; f = f->sender()) {
for (vframe* f = start_vf; f != nullptr; f = f->sender()) {
if (f->is_java_frame()) {
javaVFrame* jvf = javaVFrame::cast(f);
java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci());
@ -1741,7 +1741,7 @@ void JavaThread::print_vthread_stack_on(outputStream* st) {
ContinuationEntry* cont_entry = last_continuation();
vframe* start_vf = last_java_vframe(&reg_map);
int count = 0;
for (vframe* f = start_vf; f != NULL; f = f->sender()) {
for (vframe* f = start_vf; f != nullptr; f = f->sender()) {
// Watch for end of vthread stack
if (Continuation::is_continuation_enterSpecial(f->fr())) {
assert(cont_entry == Continuation::get_continuation_entry_for_entry_frame(this, f->fr()), "");
@ -1793,7 +1793,7 @@ JvmtiThreadState* JavaThread::rebind_to_jvmti_thread_state_of(oop thread_oop) {
// JVMTI PopFrame support
void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments");
assert(_popframe_preserved_args == nullptr, "should not wipe out old PopFrame preserved arguments");
if (in_bytes(size_in_bytes) != 0) {
_popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes), mtThread);
_popframe_preserved_args_size = in_bytes(size_in_bytes);
@ -1816,9 +1816,9 @@ WordSize JavaThread::popframe_preserved_args_size_in_words() {
}
void JavaThread::popframe_free_preserved_args() {
assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice");
assert(_popframe_preserved_args != nullptr, "should not free PopFrame preserved arguments twice");
FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args);
_popframe_preserved_args = NULL;
_popframe_preserved_args = nullptr;
_popframe_preserved_args_size = 0;
}
@ -1838,7 +1838,7 @@ class PrintAndVerifyOopClosure: public OopClosure {
protected:
template <class T> inline void do_oop_work(T* p) {
oop obj = RawAccess<>::oop_load(p);
if (obj == NULL) return;
if (obj == nullptr) return;
tty->print(INTPTR_FORMAT ": ", p2i(p));
if (oopDesc::is_oop_or_null(obj)) {
if (obj->is_objArray()) {
@ -1938,7 +1938,7 @@ frame JavaThread::vthread_last_frame() {
frame JavaThread::carrier_last_frame(RegisterMap* reg_map) {
const ContinuationEntry* entry = vthread_continuation();
guarantee (entry != NULL, "Not a carrier thread");
guarantee (entry != nullptr, "Not a carrier thread");
frame f = entry->to_frame();
if (reg_map->process_frames()) {
entry->flush_stack_processing(this);
@ -1952,11 +1952,11 @@ frame JavaThread::platform_thread_last_frame(RegisterMap* reg_map) {
}
javaVFrame* JavaThread::last_java_vframe(const frame f, RegisterMap *reg_map) {
assert(reg_map != NULL, "a map must be given");
assert(reg_map != nullptr, "a map must be given");
for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender()) {
if (vf->is_java_frame()) return javaVFrame::cast(vf);
}
return NULL;
return nullptr;
}
Klass* JavaThread::security_get_caller_class(int depth) {
@ -1968,7 +1968,7 @@ Klass* JavaThread::security_get_caller_class(int depth) {
if (!vfst.at_end()) {
return vfst.method()->method_holder();
}
return NULL;
return nullptr;
}
// java.lang.Thread.sleep support
@ -2043,7 +2043,7 @@ void JavaThread::invoke_shutdown_hooks() {
Klass* shutdown_klass =
SystemDictionary::resolve_or_null(vmSymbols::java_lang_Shutdown(),
THREAD);
if (shutdown_klass != NULL) {
if (shutdown_klass != nullptr) {
// SystemDictionary::resolve_or_null will return null if there was
// an exception. If we cannot load the Shutdown class, just don't
// call Shutdown.shutdown() at all. This will mean the shutdown hooks
@ -2092,7 +2092,7 @@ Handle JavaThread::create_system_thread_object(const char* name,
void JavaThread::start_internal_daemon(JavaThread* current, JavaThread* target,
Handle thread_oop, ThreadPriority prio) {
assert(target->osthread() != NULL, "target thread is not properly initialized");
assert(target->osthread() != nullptr, "target thread is not properly initialized");
MutexLocker mu(current, Threads_lock);

View File

@ -131,7 +131,7 @@ class JavaThread: public Thread {
// adapter to store the callee Method*. This value is NEVER live
// across a gc point so it does NOT have to be gc'd
// The handshake is open ended since we can't be certain that it will
// be NULLed. This is because we rarely ever see the race and end up
// be nulled. This is because we rarely ever see the race and end up
// in handle_wrong_method which is the backend of the handshake. See
// code in i2c adapters and handle_wrong_method.
@ -163,7 +163,7 @@ class JavaThread: public Thread {
ObjectMonitor* current_pending_monitor() {
// Use Atomic::load() to prevent data race between concurrent modification and
// concurrent readers, e.g. ThreadService::get_current_contended_monitor().
// Especially, reloading pointer from thread after NULL check must be prevented.
// Especially, reloading pointer from thread after null check must be prevented.
return Atomic::load(&_current_pending_monitor);
}
void set_current_pending_monitor(ObjectMonitor* monitor) {
@ -224,7 +224,7 @@ class JavaThread: public Thread {
friend class AsyncExceptionHandshake;
friend class HandshakeState;
void install_async_exception(AsyncExceptionHandshake* aec = NULL);
void install_async_exception(AsyncExceptionHandshake* aec = nullptr);
void handle_async_exception(oop java_throwable);
public:
bool has_async_exception_condition();
@ -514,7 +514,7 @@ private:
return on_thread_list() && !is_terminated();
}
// Thread oop. threadObj() can be NULL for initial JavaThread
// Thread oop. threadObj() can be null for initial JavaThread
// (or for threads attached via JNI)
oop threadObj() const;
void set_threadOopHandles(oop p);
@ -536,7 +536,7 @@ private:
ThreadFunction entry_point() const { return _entry_point; }
// Allocates a new Java level thread object for this thread. thread_name may be NULL.
// Allocates a new Java level thread object for this thread. thread_name may be null.
void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS);
// Last frame anchor routines
@ -595,7 +595,7 @@ private:
void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; }
void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; }
intptr_t* raw_cont_fastpath() const { return _cont_fastpath; }
bool cont_fastpath() const { return _cont_fastpath == NULL && _cont_fastpath_thread_state != 0; }
bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; }
bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; }
void inc_held_monitor_count(int i = 1, bool jni = false);
@ -706,8 +706,8 @@ private:
void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; }
void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; }
void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; }
void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; }
void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; }
void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == nullptr, "must be"); _jvmci._alternate_call_target = a; }
void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == nullptr, "must be"); _jvmci._implicit_exception_pc = a; }
virtual bool in_retryable_allocation() const { return _in_retryable_allocation; }
void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; }
@ -729,8 +729,8 @@ private:
void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; }
void clear_exception_oop_and_pc() {
set_exception_oop(NULL);
set_exception_pc(NULL);
set_exception_oop(nullptr);
set_exception_pc(nullptr);
}
// Check if address is in the usable part of the stack (excludes protected
@ -847,8 +847,8 @@ private:
// pending check, this is done for Native->Java transitions (i.e. user JNI code).
// VM->Java transitions are not cleared, it is expected that JNI code enclosed
// within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal).
bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; }
void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; }
bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != nullptr; }
void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = nullptr; }
const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; }
void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; }
@ -876,7 +876,7 @@ private:
void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; }
// factor out low-level mechanics for use in both normal and error cases
const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const;
const char* get_thread_name_string(char* buf = nullptr, int buflen = 0) const;
public:
@ -969,7 +969,7 @@ private:
return JavaThread::cast(Thread::current());
}
// Returns the current thread as a JavaThread, or NULL if not attached
// Returns the current thread as a JavaThread, or nullptr if not attached
static inline JavaThread* current_or_null();
// Casts
@ -1000,10 +1000,10 @@ private:
void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; }
// A JvmtiThreadState is lazily allocated. This jvmti_thread_state()
// getter is used to get this JavaThread's JvmtiThreadState if it has
// one which means NULL can be returned. JvmtiThreadState::state_for()
// one which means null can be returned. JvmtiThreadState::state_for()
// is used to get the specified JavaThread's JvmtiThreadState if it has
// one or it allocates a new JvmtiThreadState for the JavaThread and
// returns it. JvmtiThreadState::state_for() will return NULL only if
// returns it. JvmtiThreadState::state_for() will return null only if
// the specified JavaThread is exiting.
JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; }
static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -148,7 +148,7 @@ inline JavaThreadState JavaThread::thread_state() const {
}
inline void JavaThread::set_thread_state(JavaThreadState s) {
assert(current_or_null() == NULL || current_or_null() == this,
assert(current_or_null() == nullptr || current_or_null() == this,
"state change should only be called by the current thread");
#if defined(PPC64) || defined (AARCH64) || defined(RISCV64)
// Use membars when accessing volatile _thread_state. See
@ -225,8 +225,8 @@ inline void JavaThread::set_terminated(TerminatedTypes t) {
// Allow tracking of class initialization monitor use
inline void JavaThread::set_class_to_be_initialized(InstanceKlass* k) {
assert((k == NULL && _class_to_be_initialized != NULL) ||
(k != NULL && _class_to_be_initialized == NULL), "incorrect usage");
assert((k == nullptr && _class_to_be_initialized != nullptr) ||
(k != nullptr && _class_to_be_initialized == nullptr), "incorrect usage");
assert(this == Thread::current(), "Only the current thread can set this field");
_class_to_be_initialized = k;
}

View File

@ -47,8 +47,8 @@ OopStorage* JNIHandles::weak_global_handles() {
}
// Serviceability agent support.
OopStorage* JNIHandles::_global_handles = NULL;
OopStorage* JNIHandles::_weak_global_handles = NULL;
OopStorage* JNIHandles::_global_handles = nullptr;
OopStorage* JNIHandles::_weak_global_handles = nullptr;
void jni_handles_init() {
JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal);
@ -59,10 +59,10 @@ jobject JNIHandles::make_local(oop obj) {
return make_local(JavaThread::current(), obj);
}
// Used by NewLocalRef which requires NULL on out-of-memory
// Used by NewLocalRef which requires null on out-of-memory
jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) {
if (obj == NULL) {
return NULL; // ignore null handles
if (obj == nullptr) {
return nullptr; // ignore null handles
} else {
assert(oopDesc::is_oop(obj), "not an oop");
assert(!current_thread_in_native(), "must not be in native");
@ -85,14 +85,14 @@ static void report_handle_allocation_failure(AllocFailType alloc_failmode,
jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native");
jobject res = NULL;
jobject res = nullptr;
if (!obj.is_null()) {
// ignore null handles
assert(oopDesc::is_oop(obj()), "not an oop");
oop* ptr = global_handles()->allocate();
// Return NULL on allocation failure.
if (ptr != NULL) {
assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(NULL), "invariant");
// Return null on allocation failure.
if (ptr != nullptr) {
assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
NativeAccess<>::oop_store(ptr, obj());
char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::global;
res = reinterpret_cast<jobject>(tptr);
@ -107,14 +107,14 @@ jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native");
jweak res = NULL;
jweak res = nullptr;
if (!obj.is_null()) {
// ignore null handles
assert(oopDesc::is_oop(obj()), "not an oop");
oop* ptr = weak_global_handles()->allocate();
// Return NULL on allocation failure.
if (ptr != NULL) {
assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(NULL), "invariant");
// Return nullptr on allocation failure.
if (ptr != nullptr) {
assert(NativeAccess<AS_NO_KEEPALIVE>::oop_load(ptr) == oop(nullptr), "invariant");
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj());
char* tptr = reinterpret_cast<char*>(ptr) + TypeTag::weak_global;
res = reinterpret_cast<jweak>(tptr);
@ -125,38 +125,38 @@ jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
return res;
}
// Resolve some erroneous cases to NULL, rather than treating them as
// Resolve some erroneous cases to null, rather than treating them as
// possibly unchecked errors. In particular, deleted handles are
// treated as NULL (though a deleted and later reallocated handle
// treated as null (though a deleted and later reallocated handle
// isn't detected).
oop JNIHandles::resolve_external_guard(jobject handle) {
oop result = NULL;
if (handle != NULL) {
oop result = nullptr;
if (handle != nullptr) {
result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle);
}
return result;
}
bool JNIHandles::is_weak_global_cleared(jweak handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
oop* oop_ptr = weak_global_ptr(handle);
oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr);
return value == NULL;
return value == nullptr;
}
void JNIHandles::destroy_global(jobject handle) {
if (handle != NULL) {
if (handle != nullptr) {
oop* oop_ptr = global_ptr(handle);
NativeAccess<>::oop_store(oop_ptr, (oop)NULL);
NativeAccess<>::oop_store(oop_ptr, (oop)nullptr);
global_handles()->release(oop_ptr);
}
}
void JNIHandles::destroy_weak_global(jweak handle) {
if (handle != NULL) {
if (handle != nullptr) {
oop* oop_ptr = weak_global_ptr(handle);
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)nullptr);
weak_global_handles()->release(oop_ptr);
}
}
@ -181,7 +181,7 @@ inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
jobjectRefType result = JNIInvalidRefType;
if (is_weak_global_tagged(handle)) {
if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) {
@ -212,11 +212,11 @@ jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) {
bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
JNIHandleBlock* block = thread->active_handles();
// Look back past possible native calls to jni_PushLocalFrame.
while (block != NULL) {
while (block != nullptr) {
if (block->chain_contains(handle)) {
return true;
}
@ -231,7 +231,7 @@ bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) {
// come from, so we'll check the whole stack.
bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
// If there is no java frame, then this must be top level code, such
// as the java command executable, in which case, this type of handle
// is not permitted.
@ -241,14 +241,14 @@ bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) {
bool JNIHandles::is_global_handle(jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
assert(!is_global_tagged(handle) || is_storage_handle(global_handles(), global_ptr(handle)), "invalid storage");
return is_global_tagged(handle);
}
bool JNIHandles::is_weak_global_handle(jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
assert(!is_weak_global_tagged(handle) || is_storage_handle(weak_global_handles(), weak_global_ptr(handle)), "invalid storage");
return is_weak_global_tagged(handle);
}
@ -315,7 +315,7 @@ void JNIHandleBlock::zap() {
// Zap block values
_top = 0;
for (int index = 0; index < block_size_in_oops; index++) {
// NOT using Access here; just bare clobbering to NULL, since the
// NOT using Access here; just bare clobbering to null, since the
// block no longer contains valid oops.
_handles[index] = 0;
}
@ -324,20 +324,20 @@ void JNIHandleBlock::zap() {
JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode) {
// The VM thread can allocate a handle block in behalf of another thread during a safepoint.
assert(thread == NULL || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
"sanity check");
JNIHandleBlock* block;
// Check the thread-local free list for a block so we don't
// have to acquire a mutex.
if (thread != NULL && thread->free_handle_block() != NULL) {
if (thread != nullptr && thread->free_handle_block() != nullptr) {
block = thread->free_handle_block();
thread->set_free_handle_block(block->_next);
} else {
// Allocate new block
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
block = new (std::nothrow) JNIHandleBlock();
if (block == NULL) {
return NULL;
if (block == nullptr) {
return nullptr;
}
} else {
block = new JNIHandleBlock();
@ -346,46 +346,46 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType
block->zap();
}
block->_top = 0;
block->_next = NULL;
block->_pop_frame_link = NULL;
block->_next = nullptr;
block->_pop_frame_link = nullptr;
// _last, _free_list & _allocate_before_rebuild initialized in allocate_handle
debug_only(block->_last = NULL);
debug_only(block->_free_list = NULL);
debug_only(block->_last = nullptr);
debug_only(block->_free_list = nullptr);
debug_only(block->_allocate_before_rebuild = -1);
return block;
}
void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
assert(thread == NULL || thread == Thread::current(), "sanity check");
assert(thread == nullptr || thread == Thread::current(), "sanity check");
JNIHandleBlock* pop_frame_link = block->pop_frame_link();
// Put returned block at the beginning of the thread-local free list.
// Note that if thread == NULL, we use it as an implicit argument that
// Note that if thread == nullptr, we use it as an implicit argument that
// we _don't_ want the block to be kept on the free_handle_block.
// See for instance JavaThread::exit().
if (thread != NULL ) {
if (thread != nullptr ) {
block->zap();
JNIHandleBlock* freelist = thread->free_handle_block();
block->_pop_frame_link = NULL;
block->_pop_frame_link = nullptr;
thread->set_free_handle_block(block);
// Add original freelist to end of chain
if ( freelist != NULL ) {
while ( block->_next != NULL ) block = block->_next;
if ( freelist != nullptr ) {
while ( block->_next != nullptr ) block = block->_next;
block->_next = freelist;
}
block = NULL;
block = nullptr;
} else {
DEBUG_ONLY(block->set_pop_frame_link(NULL));
while (block != NULL) {
DEBUG_ONLY(block->set_pop_frame_link(nullptr));
while (block != nullptr) {
JNIHandleBlock* next = block->_next;
Atomic::dec(&_blocks_allocated);
assert(block->pop_frame_link() == NULL, "pop_frame_link should be NULL");
assert(block->pop_frame_link() == nullptr, "pop_frame_link should be nullptr");
delete block;
block = next;
}
}
if (pop_frame_link != NULL) {
if (pop_frame_link != nullptr) {
// As a sanity check we release blocks pointed to by the pop_frame_link.
// This should never happen (only if PopLocalFrame is not called the
// correct number of times).
@ -398,10 +398,10 @@ void JNIHandleBlock::oops_do(OopClosure* f) {
JNIHandleBlock* current_chain = this;
// Iterate over chain of blocks, followed by chains linked through the
// pop frame links.
while (current_chain != NULL) {
for (JNIHandleBlock* current = current_chain; current != NULL;
while (current_chain != nullptr) {
for (JNIHandleBlock* current = current_chain; current != nullptr;
current = current->_next) {
assert(current == current_chain || current->pop_frame_link() == NULL,
assert(current == current_chain || current->pop_frame_link() == nullptr,
"only blocks first in chain should have pop frame link set");
for (int index = 0; index < current->_top; index++) {
uintptr_t* addr = &(current->_handles)[index];
@ -429,15 +429,15 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy
// This is the first allocation or the initial block got zapped when
// entering a native function. If we have any following blocks they are
// not valid anymore.
for (JNIHandleBlock* current = _next; current != NULL;
for (JNIHandleBlock* current = _next; current != nullptr;
current = current->_next) {
assert(current->_last == NULL, "only first block should have _last set");
assert(current->_free_list == NULL,
assert(current->_last == nullptr, "only first block should have _last set");
assert(current->_free_list == nullptr,
"only first block should have _free_list set");
if (current->_top == 0) {
// All blocks after the first clear trailing block are already cleared.
#ifdef ASSERT
for (current = current->_next; current != NULL; current = current->_next) {
for (current = current->_next; current != nullptr; current = current->_next) {
assert(current->_top == 0, "trailing blocks must already be cleared");
}
#endif
@ -447,7 +447,7 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy
current->zap();
}
// Clear initial block
_free_list = NULL;
_free_list = nullptr;
_allocate_before_rebuild = 0;
_last = this;
zap();
@ -461,14 +461,14 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy
}
// Try free list
if (_free_list != NULL) {
if (_free_list != nullptr) {
oop* handle = (oop*)_free_list;
_free_list = (uintptr_t*) untag_free_list(*_free_list);
*handle = obj;
return (jobject) handle;
}
// Check if unused block follow last
if (_last->_next != NULL) {
if (_last->_next != nullptr) {
// update last and retry
_last = _last->_next;
return allocate_handle(caller, obj, alloc_failmode);
@ -479,8 +479,8 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy
rebuild_free_list(); // updates _allocate_before_rebuild counter
} else {
_last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode);
if (_last->_next == NULL) {
return NULL;
if (_last->_next == nullptr) {
return nullptr;
}
_last = _last->_next;
_allocate_before_rebuild--;
@ -489,15 +489,15 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy
}
void JNIHandleBlock::rebuild_free_list() {
assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking");
int free = 0;
int blocks = 0;
for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) {
for (int index = 0; index < current->_top; index++) {
uintptr_t* handle = &(current->_handles)[index];
if (*handle == 0) {
// this handle was cleared out by a delete call, reuse it
*handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list);
*handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list);
_free_list = handle;
free++;
}
@ -524,7 +524,7 @@ bool JNIHandleBlock::contains(jobject handle) const {
bool JNIHandleBlock::chain_contains(jobject handle) const {
for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) {
for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) {
if (current->contains(handle)) {
return true;
}

View File

@ -112,13 +112,13 @@ public:
static void print_on(outputStream* st);
static void print();
static void verify();
// The category predicates all require handle != NULL.
// The category predicates all require handle != nullptr.
static bool is_local_handle(JavaThread* thread, jobject handle);
static bool is_frame_handle(JavaThread* thread, jobject handle);
static bool is_global_handle(jobject handle);
static bool is_weak_global_handle(jobject handle);
// precondition: handle != NULL.
// precondition: handle != nullptr.
static jobjectRefType handle_type(JavaThread* thread, jobject handle);
// Garbage collection support(global handles only, local handles are traversed from thread)
@ -170,8 +170,8 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
jobject allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
// Block allocation and block free list management
static JNIHandleBlock* allocate_block(JavaThread* thread = NULL, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
static void release_block(JNIHandleBlock* block, JavaThread* thread = NULL);
static JNIHandleBlock* allocate_block(JavaThread* thread = nullptr, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
static void release_block(JNIHandleBlock* block, JavaThread* thread = nullptr);
// JNI PushLocalFrame/PopLocalFrame support
JNIHandleBlock* pop_frame_link() const { return _pop_frame_link; }

View File

@ -69,7 +69,7 @@ inline oop* JNIHandles::weak_global_ptr(jweak handle) {
// external_guard is true if called from resolve_external_guard.
template <DecoratorSet decorators, bool external_guard>
inline oop JNIHandles::resolve_impl(jobject handle) {
assert(handle != NULL, "precondition");
assert(handle != nullptr, "precondition");
assert(!current_thread_in_native(), "must not be in native");
oop result;
if (is_weak_global_tagged(handle)) { // Unlikely
@ -78,27 +78,27 @@ inline oop JNIHandles::resolve_impl(jobject handle) {
result = NativeAccess<decorators>::oop_load(global_ptr(handle));
// Construction of jobjects canonicalize a null value into a null
// jobject, so for non-jweak the pointee should never be null.
assert(external_guard || result != NULL, "Invalid JNI handle");
assert(external_guard || result != nullptr, "Invalid JNI handle");
} else {
result = *local_ptr(handle);
// Construction of jobjects canonicalize a null value into a null
// jobject, so for non-jweak the pointee should never be null.
assert(external_guard || result != NULL, "Invalid JNI handle");
assert(external_guard || result != nullptr, "Invalid JNI handle");
}
return result;
}
inline oop JNIHandles::resolve(jobject handle) {
oop result = NULL;
if (handle != NULL) {
oop result = nullptr;
if (handle != nullptr) {
result = resolve_impl<DECORATORS_NONE, false /* external_guard */>(handle);
}
return result;
}
inline oop JNIHandles::resolve_no_keepalive(jobject handle) {
oop result = NULL;
if (handle != NULL) {
oop result = nullptr;
if (handle != nullptr) {
result = resolve_impl<AS_NO_KEEPALIVE, false /* external_guard */>(handle);
}
return result;
@ -111,15 +111,15 @@ inline bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
}
inline oop JNIHandles::resolve_non_null(jobject handle) {
assert(handle != NULL, "JNI handle should not be null");
assert(handle != nullptr, "JNI handle should not be null");
oop result = resolve_impl<DECORATORS_NONE, false /* external_guard */>(handle);
assert(result != NULL, "NULL read from jni handle");
assert(result != nullptr, "nullptr read from jni handle");
return result;
}
inline void JNIHandles::destroy_local(jobject handle) {
if (handle != NULL) {
*local_ptr(handle) = NULL;
if (handle != nullptr) {
*local_ptr(handle) = nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ class JniPeriodicCheckerTask : public PeriodicTask {
//----------------------------------------------------------
// Implementation of JniPeriodicChecker
JniPeriodicCheckerTask* JniPeriodicChecker::_task = NULL;
JniPeriodicCheckerTask* JniPeriodicChecker::_task = nullptr;
/*
* The engage() method is called at initialization time via

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ class JniPeriodicChecker : AllStatic {
public:
// Start/stop task
static void engage();
static bool is_active() { return _task != NULL; }
static bool is_active() { return _task != nullptr; }
};
#endif // SHARE_RUNTIME_JNIPERIODICCHECKER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ KeepStackGCProcessedMark::KeepStackGCProcessedMark(JavaThread* jt) :
return;
}
StackWatermark* our_watermark = StackWatermarkSet::get(JavaThread::current(), StackWatermarkKind::gc);
if (our_watermark == NULL) {
if (our_watermark == nullptr) {
_active = false;
return;
}
@ -56,5 +56,5 @@ KeepStackGCProcessedMark::~KeepStackGCProcessedMark() {
}
void KeepStackGCProcessedMark::finish_processing() {
StackWatermarkSet::finish_processing(_jt, NULL /* context */, StackWatermarkKind::gc);
StackWatermarkSet::finish_processing(_jt, nullptr /* context */, StackWatermarkKind::gc);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
MonitorChunk::MonitorChunk(int number_on_monitors) {
_number_of_monitors = number_on_monitors;
_monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors, mtSynchronizer);
_next = NULL;
_next = nullptr;
}

View File

@ -40,13 +40,13 @@ class InFlightMutexRelease {
Mutex* _in_flight_mutex;
public:
InFlightMutexRelease(Mutex* in_flight_mutex) : _in_flight_mutex(in_flight_mutex) {
assert(in_flight_mutex != NULL, "must be");
assert(in_flight_mutex != nullptr, "must be");
}
void operator()(JavaThread* current) {
_in_flight_mutex->release_for_safepoint();
_in_flight_mutex = NULL;
_in_flight_mutex = nullptr;
}
bool not_released() { return _in_flight_mutex != NULL; }
bool not_released() { return _in_flight_mutex != nullptr; }
};
#ifdef ASSERT
@ -124,7 +124,7 @@ void Mutex::lock(Thread* self) {
lock_contended(self);
}
assert_owner(NULL);
assert_owner(nullptr);
set_owner(self);
}
@ -145,7 +145,7 @@ void Mutex::lock_without_safepoint_check(Thread * self) {
check_rank(self);
_lock.lock();
assert_owner(NULL);
assert_owner(nullptr);
set_owner(self);
}
@ -171,7 +171,7 @@ bool Mutex::try_lock_inner(bool do_rank_checks) {
check_block_state(self);
if (_lock.try_lock()) {
assert_owner(NULL);
assert_owner(nullptr);
set_owner(self);
return true;
}
@ -189,13 +189,13 @@ bool Mutex::try_lock_without_rank_check() {
}
void Mutex::release_for_safepoint() {
assert_owner(NULL);
assert_owner(nullptr);
_lock.unlock();
}
void Mutex::unlock() {
DEBUG_ONLY(assert_owner(Thread::current()));
set_owner(NULL);
set_owner(nullptr);
_lock.unlock();
}
@ -216,9 +216,9 @@ bool Monitor::wait_without_safepoint_check(uint64_t timeout) {
assert_owner(self);
check_rank(self);
// conceptually set the owner to NULL in anticipation of
// conceptually set the owner to null in anticipation of
// abdicating the lock in wait
set_owner(NULL);
set_owner(nullptr);
// Check safepoint state after resetting owner and possible NSV.
check_no_safepoint_state(self);
@ -237,9 +237,9 @@ bool Monitor::wait(uint64_t timeout) {
assert_owner(self);
check_rank(self);
// conceptually set the owner to NULL in anticipation of
// conceptually set the owner to null in anticipation of
// abdicating the lock in wait
set_owner(NULL);
set_owner(nullptr);
// Check safepoint state after resetting owner and possible NSV.
check_safepoint_state(self);
@ -256,7 +256,7 @@ bool Monitor::wait(uint64_t timeout) {
if (ifmr.not_released()) {
// Not unlocked by ~ThreadBlockInVMPreprocess
assert_owner(NULL);
assert_owner(nullptr);
// Conceptually reestablish ownership of the lock.
set_owner(self);
} else {
@ -267,13 +267,13 @@ bool Monitor::wait(uint64_t timeout) {
}
Mutex::~Mutex() {
assert_owner(NULL);
assert_owner(nullptr);
os::free(const_cast<char*>(_name));
}
Mutex::Mutex(Rank rank, const char * name, bool allow_vm_block) : _owner(NULL) {
Mutex::Mutex(Rank rank, const char * name, bool allow_vm_block) : _owner(nullptr) {
assert(os::mutex_init_done(), "Too early!");
assert(name != NULL, "Mutex requires a name");
assert(name != nullptr, "Mutex requires a name");
_name = os::strdup(name, mtInternal);
#ifdef ASSERT
_allow_vm_block = allow_vm_block;
@ -363,7 +363,7 @@ void Mutex::print() const {
#ifdef ASSERT
void Mutex::assert_owner(Thread * expected) {
const char* msg = "invalid owner";
if (expected == NULL) {
if (expected == nullptr) {
msg = "should be un-owned";
}
else if (expected == Thread::current()) {
@ -376,7 +376,7 @@ void Mutex::assert_owner(Thread * expected) {
Mutex* Mutex::get_least_ranked_lock(Mutex* locks) {
Mutex *res, *tmp;
for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
for (res = tmp = locks; tmp != nullptr; tmp = tmp->next()) {
if (tmp->rank() < res->rank()) {
res = tmp;
}
@ -386,8 +386,8 @@ Mutex* Mutex::get_least_ranked_lock(Mutex* locks) {
Mutex* Mutex::get_least_ranked_lock_besides_this(Mutex* locks) {
Mutex *res, *tmp;
for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
for (res = nullptr, tmp = locks; tmp != nullptr; tmp = tmp->next()) {
if (tmp != this && (res == nullptr || tmp->rank() < res->rank())) {
res = tmp;
}
}
@ -401,8 +401,8 @@ void Mutex::check_rank(Thread* thread) {
// We expect the locks already acquired to be in increasing rank order,
// modulo locks acquired in try_lock_without_rank_check()
for (Mutex* tmp = locks_owned; tmp != NULL; tmp = tmp->next()) {
if (tmp->next() != NULL) {
for (Mutex* tmp = locks_owned; tmp != nullptr; tmp = tmp->next()) {
if (tmp->next() != nullptr) {
assert(tmp->rank() < tmp->next()->rank()
|| tmp->skip_rank_check(), "mutex rank anomaly?");
}
@ -416,7 +416,7 @@ void Mutex::check_rank(Thread* thread) {
// able to check for safepoints first with a TBIVM.
// For all threads, we enforce not holding the tty lock or below, since this could block progress also.
// Also "this" should be the monitor with lowest rank owned by this thread.
if (least != NULL && ((least->rank() <= Mutex::nosafepoint && thread->is_Java_thread()) ||
if (least != nullptr && ((least->rank() <= Mutex::nosafepoint && thread->is_Java_thread()) ||
least->rank() <= Mutex::tty ||
least->rank() <= this->rank())) {
ResourceMark rm(thread);
@ -436,7 +436,7 @@ void Mutex::check_rank(Thread* thread) {
// that the thread holds and m2 is the mutex the thread is trying
// to acquire, then deadlock prevention rules require that the rank
// of m2 be less than the rank of m1. This prevents circular waits.
if (least != NULL && least->rank() <= this->rank()) {
if (least != nullptr && least->rank() <= this->rank()) {
ResourceMark rm(thread);
if (least->rank() > Mutex::tty) {
// Printing owned locks acquires tty lock. If the least rank was below or equal
@ -461,15 +461,15 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
// It uses the Mutex::_owner, Mutex::_next, and
// Thread::_owned_locks fields, and no other function
// changes those fields.
// It is illegal to set the mutex from one non-NULL
// owner to another--it must be owned by NULL as an
// It is illegal to set the mutex from one non-null
// owner to another--it must be owned by null as an
// intermediate state.
if (new_owner != NULL) {
if (new_owner != nullptr) {
// the thread is acquiring this lock
assert(new_owner == Thread::current(), "Should I be doing this?");
assert(owner() == NULL, "setting the owner thread of an already owned mutex");
assert(owner() == nullptr, "setting the owner thread of an already owned mutex");
raw_set_owner(new_owner); // set the owner
// link "this" into the owned locks list
@ -490,30 +490,30 @@ void Mutex::set_owner_implementation(Thread *new_owner) {
_last_owner = old_owner;
_skip_rank_check = false;
assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
assert(old_owner != nullptr, "removing the owner thread of an unowned mutex");
assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
raw_set_owner(NULL); // set the owner
raw_set_owner(nullptr); // set the owner
Mutex* locks = old_owner->owned_locks();
// remove "this" from the owned locks list
Mutex* prev = NULL;
Mutex* prev = nullptr;
bool found = false;
for (; locks != NULL; prev = locks, locks = locks->next()) {
for (; locks != nullptr; prev = locks, locks = locks->next()) {
if (locks == this) {
found = true;
break;
}
}
assert(found, "Removing a lock not owned");
if (prev == NULL) {
if (prev == nullptr) {
old_owner->_owned_locks = _next;
} else {
prev->_next = _next;
}
_next = NULL;
_next = nullptr;
// ~NSV implied with locking allow_vm_block flag.
if (old_owner->is_Java_thread() && _allow_vm_block && this != tty_lock) {

View File

@ -89,7 +89,7 @@ class Mutex : public CHeapObj<mtSynchronizer> {
private:
// The _owner field is only set by the current thread, either to itself after it has acquired
// the low-level _lock, or to NULL before it has released the _lock. Accesses by any thread other
// the low-level _lock, or to null before it has released the _lock. Accesses by any thread other
// than the lock owner are inherently racy.
Thread* volatile _owner;
void raw_set_owner(Thread* new_owner) { Atomic::store(&_owner, new_owner); }
@ -168,7 +168,7 @@ class Mutex : public CHeapObj<mtSynchronizer> {
void lock(); // prints out warning if VM thread blocks
void lock(Thread *thread); // overloaded with current thread
void unlock();
bool is_locked() const { return owner() != NULL; }
bool is_locked() const { return owner() != nullptr; }
bool try_lock(); // Like lock(), but unblocking. It returns false instead
private:

View File

@ -42,126 +42,126 @@
// eliminating the indirection and using instances instead.
// Consider using GCC's __read_mostly.
Mutex* Patching_lock = NULL;
Mutex* CompiledMethod_lock = NULL;
Monitor* SystemDictionary_lock = NULL;
Mutex* InvokeMethodTable_lock = NULL;
Mutex* SharedDictionary_lock = NULL;
Monitor* ClassInitError_lock = NULL;
Mutex* Module_lock = NULL;
Mutex* CompiledIC_lock = NULL;
Mutex* InlineCacheBuffer_lock = NULL;
Mutex* VMStatistic_lock = NULL;
Mutex* JmethodIdCreation_lock = NULL;
Mutex* JfieldIdCreation_lock = NULL;
Monitor* JNICritical_lock = NULL;
Mutex* JvmtiThreadState_lock = NULL;
Monitor* EscapeBarrier_lock = NULL;
Monitor* JvmtiVTMSTransition_lock = NULL;
Monitor* Heap_lock = NULL;
Mutex* Patching_lock = nullptr;
Mutex* CompiledMethod_lock = nullptr;
Monitor* SystemDictionary_lock = nullptr;
Mutex* InvokeMethodTable_lock = nullptr;
Mutex* SharedDictionary_lock = nullptr;
Monitor* ClassInitError_lock = nullptr;
Mutex* Module_lock = nullptr;
Mutex* CompiledIC_lock = nullptr;
Mutex* InlineCacheBuffer_lock = nullptr;
Mutex* VMStatistic_lock = nullptr;
Mutex* JmethodIdCreation_lock = nullptr;
Mutex* JfieldIdCreation_lock = nullptr;
Monitor* JNICritical_lock = nullptr;
Mutex* JvmtiThreadState_lock = nullptr;
Monitor* EscapeBarrier_lock = nullptr;
Monitor* JvmtiVTMSTransition_lock = nullptr;
Monitor* Heap_lock = nullptr;
#ifdef INCLUDE_PARALLELGC
Mutex* PSOldGenExpand_lock = NULL;
Mutex* PSOldGenExpand_lock = nullptr;
#endif
Mutex* AdapterHandlerLibrary_lock = NULL;
Mutex* SignatureHandlerLibrary_lock = NULL;
Mutex* VtableStubs_lock = NULL;
Mutex* SymbolArena_lock = NULL;
Monitor* StringDedup_lock = NULL;
Mutex* StringDedupIntern_lock = NULL;
Monitor* CodeCache_lock = NULL;
Mutex* TouchedMethodLog_lock = NULL;
Mutex* RetData_lock = NULL;
Monitor* VMOperation_lock = NULL;
Monitor* Threads_lock = NULL;
Mutex* NonJavaThreadsList_lock = NULL;
Mutex* NonJavaThreadsListSync_lock = NULL;
Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL;
Monitor* G1OldGCCount_lock = NULL;
Mutex* G1DetachedRefinementStats_lock = NULL;
Mutex* MarkStackFreeList_lock = NULL;
Mutex* MarkStackChunkList_lock = NULL;
Mutex* MonitoringSupport_lock = NULL;
Mutex* ParGCRareEvent_lock = NULL;
Monitor* ConcurrentGCBreakpoints_lock = NULL;
Mutex* Compile_lock = NULL;
Monitor* MethodCompileQueue_lock = NULL;
Monitor* CompileThread_lock = NULL;
Monitor* Compilation_lock = NULL;
Mutex* CompileTaskAlloc_lock = NULL;
Mutex* CompileStatistics_lock = NULL;
Mutex* DirectivesStack_lock = NULL;
Mutex* MultiArray_lock = NULL;
Monitor* Terminator_lock = NULL;
Monitor* InitCompleted_lock = NULL;
Monitor* BeforeExit_lock = NULL;
Monitor* Notify_lock = NULL;
Mutex* ExceptionCache_lock = NULL;
Mutex* AdapterHandlerLibrary_lock = nullptr;
Mutex* SignatureHandlerLibrary_lock = nullptr;
Mutex* VtableStubs_lock = nullptr;
Mutex* SymbolArena_lock = nullptr;
Monitor* StringDedup_lock = nullptr;
Mutex* StringDedupIntern_lock = nullptr;
Monitor* CodeCache_lock = nullptr;
Mutex* TouchedMethodLog_lock = nullptr;
Mutex* RetData_lock = nullptr;
Monitor* VMOperation_lock = nullptr;
Monitor* Threads_lock = nullptr;
Mutex* NonJavaThreadsList_lock = nullptr;
Mutex* NonJavaThreadsListSync_lock = nullptr;
Monitor* CGC_lock = nullptr;
Monitor* STS_lock = nullptr;
Monitor* G1OldGCCount_lock = nullptr;
Mutex* G1DetachedRefinementStats_lock = nullptr;
Mutex* MarkStackFreeList_lock = nullptr;
Mutex* MarkStackChunkList_lock = nullptr;
Mutex* MonitoringSupport_lock = nullptr;
Mutex* ParGCRareEvent_lock = nullptr;
Monitor* ConcurrentGCBreakpoints_lock = nullptr;
Mutex* Compile_lock = nullptr;
Monitor* MethodCompileQueue_lock = nullptr;
Monitor* CompileThread_lock = nullptr;
Monitor* Compilation_lock = nullptr;
Mutex* CompileTaskAlloc_lock = nullptr;
Mutex* CompileStatistics_lock = nullptr;
Mutex* DirectivesStack_lock = nullptr;
Mutex* MultiArray_lock = nullptr;
Monitor* Terminator_lock = nullptr;
Monitor* InitCompleted_lock = nullptr;
Monitor* BeforeExit_lock = nullptr;
Monitor* Notify_lock = nullptr;
Mutex* ExceptionCache_lock = nullptr;
#ifndef PRODUCT
Mutex* FullGCALot_lock = NULL;
Mutex* FullGCALot_lock = nullptr;
#endif
Mutex* tty_lock = NULL;
Mutex* tty_lock = nullptr;
Mutex* RawMonitor_lock = NULL;
Mutex* PerfDataMemAlloc_lock = NULL;
Mutex* PerfDataManager_lock = NULL;
Mutex* OopMapCacheAlloc_lock = NULL;
Mutex* RawMonitor_lock = nullptr;
Mutex* PerfDataMemAlloc_lock = nullptr;
Mutex* PerfDataManager_lock = nullptr;
Mutex* OopMapCacheAlloc_lock = nullptr;
Mutex* FreeList_lock = NULL;
Mutex* OldSets_lock = NULL;
Mutex* Uncommit_lock = NULL;
Monitor* RootRegionScan_lock = NULL;
Mutex* FreeList_lock = nullptr;
Mutex* OldSets_lock = nullptr;
Mutex* Uncommit_lock = nullptr;
Monitor* RootRegionScan_lock = nullptr;
Mutex* Management_lock = NULL;
Monitor* MonitorDeflation_lock = NULL;
Monitor* Service_lock = NULL;
Monitor* Notification_lock = NULL;
Monitor* PeriodicTask_lock = NULL;
Monitor* RedefineClasses_lock = NULL;
Mutex* Verify_lock = NULL;
Monitor* Zip_lock = NULL;
Mutex* Management_lock = nullptr;
Monitor* MonitorDeflation_lock = nullptr;
Monitor* Service_lock = nullptr;
Monitor* Notification_lock = nullptr;
Monitor* PeriodicTask_lock = nullptr;
Monitor* RedefineClasses_lock = nullptr;
Mutex* Verify_lock = nullptr;
Monitor* Zip_lock = nullptr;
#if INCLUDE_JFR
Mutex* JfrStacktrace_lock = NULL;
Monitor* JfrMsg_lock = NULL;
Mutex* JfrBuffer_lock = NULL;
Monitor* JfrThreadSampler_lock = NULL;
Mutex* JfrStacktrace_lock = nullptr;
Monitor* JfrMsg_lock = nullptr;
Mutex* JfrBuffer_lock = nullptr;
Monitor* JfrThreadSampler_lock = nullptr;
#endif
#ifndef SUPPORTS_NATIVE_CX8
Mutex* UnsafeJlong_lock = NULL;
Mutex* UnsafeJlong_lock = nullptr;
#endif
Mutex* CodeHeapStateAnalytics_lock = NULL;
Mutex* CodeHeapStateAnalytics_lock = nullptr;
Monitor* ContinuationRelativize_lock = NULL;
Monitor* ContinuationRelativize_lock = nullptr;
Mutex* Metaspace_lock = NULL;
Monitor* MetaspaceCritical_lock = NULL;
Mutex* ClassLoaderDataGraph_lock = NULL;
Monitor* ThreadsSMRDelete_lock = NULL;
Mutex* ThreadIdTableCreate_lock = NULL;
Mutex* SharedDecoder_lock = NULL;
Mutex* DCmdFactory_lock = NULL;
Mutex* NMTQuery_lock = NULL;
Mutex* Metaspace_lock = nullptr;
Monitor* MetaspaceCritical_lock = nullptr;
Mutex* ClassLoaderDataGraph_lock = nullptr;
Monitor* ThreadsSMRDelete_lock = nullptr;
Mutex* ThreadIdTableCreate_lock = nullptr;
Mutex* SharedDecoder_lock = nullptr;
Mutex* DCmdFactory_lock = nullptr;
Mutex* NMTQuery_lock = nullptr;
#if INCLUDE_CDS
#if INCLUDE_JVMTI
Mutex* CDSClassFileStream_lock = NULL;
Mutex* CDSClassFileStream_lock = nullptr;
#endif
Mutex* DumpTimeTable_lock = NULL;
Mutex* CDSLambda_lock = NULL;
Mutex* DumpRegion_lock = NULL;
Mutex* ClassListFile_lock = NULL;
Mutex* UnregisteredClassesTable_lock= NULL;
Mutex* LambdaFormInvokers_lock = NULL;
Mutex* ScratchObjects_lock = NULL;
Mutex* DumpTimeTable_lock = nullptr;
Mutex* CDSLambda_lock = nullptr;
Mutex* DumpRegion_lock = nullptr;
Mutex* ClassListFile_lock = nullptr;
Mutex* UnregisteredClassesTable_lock= nullptr;
Mutex* LambdaFormInvokers_lock = nullptr;
Mutex* ScratchObjects_lock = nullptr;
#endif // INCLUDE_CDS
Mutex* Bootclasspath_lock = NULL;
Mutex* Bootclasspath_lock = nullptr;
#if INCLUDE_JVMCI
Monitor* JVMCI_lock = NULL;
Monitor* JVMCIRuntime_lock = NULL;
Monitor* JVMCI_lock = nullptr;
Monitor* JVMCIRuntime_lock = nullptr;
#endif
@ -172,7 +172,7 @@ static int _num_mutex;
#ifdef ASSERT
void assert_locked_or_safepoint(const Mutex* lock) {
// check if this thread owns the lock (common case)
assert(lock != NULL, "Need non-NULL lock");
assert(lock != nullptr, "Need non-null lock");
if (lock->owned_by_self()) return;
if (SafepointSynchronize::is_at_safepoint()) return;
if (!Universe::is_fully_initialized()) return;
@ -181,7 +181,7 @@ void assert_locked_or_safepoint(const Mutex* lock) {
// a weaker assertion than the above
void assert_locked_or_safepoint_weak(const Mutex* lock) {
assert(lock != NULL, "Need non-NULL lock");
assert(lock != nullptr, "Need non-null lock");
if (lock->is_locked()) return;
if (SafepointSynchronize::is_at_safepoint()) return;
if (!Universe::is_fully_initialized()) return;
@ -190,7 +190,7 @@ void assert_locked_or_safepoint_weak(const Mutex* lock) {
// a stronger assertion than the above
void assert_lock_strong(const Mutex* lock) {
assert(lock != NULL, "Need non-NULL lock");
assert(lock != nullptr, "Need non-null lock");
if (lock->owned_by_self()) return;
fatal("must own lock %s", lock->name());
}
@ -405,7 +405,7 @@ void print_owned_locks_on_error(outputStream* st) {
bool none = true;
for (int i = 0; i < _num_mutex; i++) {
// see if it has an owner
if (_mutex_array[i]->owner() != NULL) {
if (_mutex_array[i]->owner() != nullptr) {
if (none) {
// print format used by Mutex::print_on_error()
st->print_cr(" ([mutex/lock_event])");

View File

@ -195,7 +195,7 @@ class MutexLocker: public StackObj {
MutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
_mutex(mutex) {
bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
if (_mutex != NULL) {
if (_mutex != nullptr) {
if (no_safepoint_check) {
_mutex->lock_without_safepoint_check();
} else {
@ -207,7 +207,7 @@ class MutexLocker: public StackObj {
MutexLocker(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
_mutex(mutex) {
bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag;
if (_mutex != NULL) {
if (_mutex != nullptr) {
if (no_safepoint_check) {
_mutex->lock_without_safepoint_check(thread);
} else {
@ -217,7 +217,7 @@ class MutexLocker: public StackObj {
}
~MutexLocker() {
if (_mutex != NULL) {
if (_mutex != nullptr) {
assert_lock_strong(_mutex);
_mutex->unlock();
}
@ -228,7 +228,7 @@ class MutexLocker: public StackObj {
// A MonitorLocker is like a MutexLocker above, except it allows
// wait/notify as well which are delegated to the underlying Monitor.
// It also disallows NULL.
// It also disallows null.
class MonitorLocker: public MutexLocker {
Mutex::SafepointCheckFlag _flag;
@ -242,13 +242,13 @@ class MonitorLocker: public MutexLocker {
MonitorLocker(Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLocker(monitor, flag), _flag(flag) {
// Superclass constructor did locking
assert(monitor != NULL, "NULL monitor not allowed");
assert(monitor != nullptr, "null monitor not allowed");
}
MonitorLocker(Thread* thread, Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) :
MutexLocker(thread, monitor, flag), _flag(flag) {
// Superclass constructor did locking
assert(monitor != NULL, "NULL monitor not allowed");
assert(monitor != nullptr, "null monitor not allowed");
}
bool wait(int64_t timeout = 0) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ public:
NonJavaThread* volatile _head;
SingleWriterSynchronizer _protect;
List() : _head(NULL), _protect() {}
List() : _head(nullptr), _protect() {}
};
NonJavaThread::List NonJavaThread::_the_list;
@ -66,8 +66,8 @@ void NonJavaThread::Iterator::step() {
_current = Atomic::load_acquire(&_current->_next);
}
NonJavaThread::NonJavaThread() : Thread(), _next(NULL) {
assert(BarrierSet::barrier_set() != NULL, "NonJavaThread created too soon!");
NonJavaThread::NonJavaThread() : Thread(), _next(nullptr) {
assert(BarrierSet::barrier_set() != nullptr, "NonJavaThread created too soon!");
}
NonJavaThread::~NonJavaThread() { }
@ -86,7 +86,7 @@ void NonJavaThread::remove_from_the_list() {
// Cleanup BarrierSet-related data before removing from list.
BarrierSet::barrier_set()->on_thread_detach(this);
NonJavaThread* volatile* p = &_the_list._head;
for (NonJavaThread* t = *p; t != NULL; p = &t->_next, t = *p) {
for (NonJavaThread* t = *p; t != nullptr; p = &t->_next, t = *p) {
if (t == this) {
*p = _next;
break;
@ -98,7 +98,7 @@ void NonJavaThread::remove_from_the_list() {
// from NJTList_lock in case an iteration attempts to lock it.
MutexLocker ml(NonJavaThreadsListSync_lock, Mutex::_no_safepoint_check_flag);
_the_list._protect.synchronize();
_next = NULL; // Safe to drop the link now.
_next = nullptr; // Safe to drop the link now.
}
void NonJavaThread::pre_run() {
@ -106,7 +106,7 @@ void NonJavaThread::pre_run() {
// This is slightly odd in that NamedThread is a subclass, but
// in fact name() is defined in Thread
assert(this->name() != NULL, "thread name was not set before it was started");
assert(this->name() != nullptr, "thread name was not set before it was started");
this->set_native_thread_name(this->name());
}
@ -123,8 +123,8 @@ void NonJavaThread::post_run() {
// uniquely named instances should derive from this.
NamedThread::NamedThread() :
NonJavaThread(),
_name(NULL),
_processed_thread(NULL),
_name(nullptr),
_processed_thread(nullptr),
_gc_id(GCId::undefined())
{}
@ -133,7 +133,7 @@ NamedThread::~NamedThread() {
}
void NamedThread::set_name(const char* format, ...) {
guarantee(_name == NULL, "Only get to set name once.");
guarantee(_name == nullptr, "Only get to set name once.");
_name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread);
va_list ap;
va_start(ap, format);
@ -154,12 +154,12 @@ void NamedThread::print_on(outputStream* st) const {
// be replaced by an abstraction over whatever native support for
// timer interrupts exists on the platform.
WatcherThread* WatcherThread::_watcher_thread = NULL;
WatcherThread* WatcherThread::_watcher_thread = nullptr;
bool WatcherThread::_startable = false;
volatile bool WatcherThread::_should_terminate = false;
WatcherThread::WatcherThread() : NonJavaThread() {
assert(watcher_thread() == NULL, "we can only allocate one WatcherThread");
assert(watcher_thread() == nullptr, "we can only allocate one WatcherThread");
if (os::create_thread(this, os::watcher_thread)) {
_watcher_thread = this;
@ -285,7 +285,7 @@ void WatcherThread::run() {
// Signal that it is terminated
{
MutexLocker mu(Terminator_lock, Mutex::_no_safepoint_check_flag);
_watcher_thread = NULL;
_watcher_thread = nullptr;
Terminator_lock->notify_all();
}
}
@ -293,7 +293,7 @@ void WatcherThread::run() {
void WatcherThread::start() {
assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required");
if (watcher_thread() == NULL && _startable) {
if (watcher_thread() == nullptr && _startable) {
_should_terminate = false;
// Create the single instance of WatcherThread
new WatcherThread();
@ -313,7 +313,7 @@ void WatcherThread::stop() {
_should_terminate = true;
WatcherThread* watcher = watcher_thread();
if (watcher != NULL) {
if (watcher != nullptr) {
// unpark the WatcherThread so it can see that it should terminate
watcher->unpark();
}
@ -321,7 +321,7 @@ void WatcherThread::stop() {
MonitorLocker mu(Terminator_lock);
while (watcher_thread() != NULL) {
while (watcher_thread() != nullptr) {
// This wait should make safepoint checks and wait without a timeout.
mu.wait(0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,7 +65,7 @@ public:
Iterator();
~Iterator();
bool end() const { return _current == NULL; }
bool end() const { return _current == nullptr; }
NonJavaThread* current() const { return _current; }
void step();
};
@ -91,7 +91,7 @@ class NamedThread: public NonJavaThread {
// May only be called once per thread.
void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
virtual bool is_Named_thread() const { return true; }
virtual const char* name() const { return _name == NULL ? "Unknown Thread" : _name; }
virtual const char* name() const { return _name == nullptr ? "Unknown Thread" : _name; }
virtual const char* type_name() const { return "NamedThread"; }
Thread *processed_thread() { return _processed_thread; }
void set_processed_thread(Thread *thread) { _processed_thread = thread; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,11 +66,11 @@
#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
char* bytes = NULL; \
char* bytes = nullptr; \
int len = 0; \
jlong jtid = SharedRuntime::get_java_tid(thread); \
Symbol* klassname = obj->klass()->name(); \
if (klassname != NULL) { \
if (klassname != nullptr) { \
bytes = (char*)klassname->bytes(); \
len = klassname->utf8_length(); \
}
@ -121,7 +121,7 @@ static int Knob_PreSpin = 10; // 20-100 likely better
DEBUG_ONLY(static volatile bool InitDone = false;)
OopStorage* ObjectMonitor::_oop_storage = NULL;
OopStorage* ObjectMonitor::_oop_storage = nullptr;
// -----------------------------------------------------------------------------
// Theory of operations -- Monitors lists, thread residency, etc:
@ -259,18 +259,18 @@ static void check_object_context() {
ObjectMonitor::ObjectMonitor(oop object) :
_header(markWord::zero()),
_object(_oop_storage, object),
_owner(NULL),
_owner(nullptr),
_previous_owner_tid(0),
_next_om(NULL),
_next_om(nullptr),
_recursions(0),
_EntryList(NULL),
_cxq(NULL),
_succ(NULL),
_Responsible(NULL),
_EntryList(nullptr),
_cxq(nullptr),
_succ(nullptr),
_Responsible(nullptr),
_Spinner(0),
_SpinDuration(ObjectMonitor::Knob_SpinLimit),
_contentions(0),
_WaitSet(NULL),
_WaitSet(nullptr),
_waiters(0),
_WaitSetLock(0)
{ }
@ -282,14 +282,14 @@ ObjectMonitor::~ObjectMonitor() {
oop ObjectMonitor::object() const {
check_object_context();
if (_object.is_null()) {
return NULL;
return nullptr;
}
return _object.resolve();
}
oop ObjectMonitor::object_peek() const {
if (_object.is_null()) {
return NULL;
return nullptr;
}
return _object.peek();
}
@ -297,7 +297,7 @@ oop ObjectMonitor::object_peek() const {
void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
if (current->is_suspended()) {
_om->_recursions = 0;
_om->_succ = NULL;
_om->_succ = nullptr;
// Don't need a full fence after clearing successor here because of the call to exit().
_om->exit(current, false /* not_suspended */);
_om_exited = true;
@ -309,7 +309,7 @@ void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) {
void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) {
if (current->is_suspended()) {
if (_om->_succ == current) {
_om->_succ = NULL;
_om->_succ = nullptr;
OrderAccess::fence(); // always do a full fence when successor is cleared
}
}
@ -322,8 +322,8 @@ bool ObjectMonitor::enter(JavaThread* current) {
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
void* cur = try_set_owner_from(NULL, current);
if (cur == NULL) {
void* cur = try_set_owner_from(nullptr, current);
if (cur == nullptr) {
assert(_recursions == 0, "invariant");
return true;
}
@ -373,7 +373,7 @@ bool ObjectMonitor::enter(JavaThread* current) {
// above lost the race to async deflation. Undo the work and
// force the caller to retry.
const oop l_object = object();
if (l_object != NULL) {
if (l_object != nullptr) {
// Attempt to restore the header/dmw to the object's header so that
// we only retry once if the deflater thread happens to be slow.
install_displaced_markword_in_object(l_object);
@ -396,7 +396,7 @@ bool ObjectMonitor::enter(JavaThread* current) {
{ // Change java thread status to indicate blocked on monitor enter.
JavaThreadBlockedOnMonitorEnterState jtbmes(current, this);
assert(current->current_pending_monitor() == NULL, "invariant");
assert(current->current_pending_monitor() == nullptr, "invariant");
current->set_current_pending_monitor(this);
DTRACE_MONITOR_PROBE(contended__enter, this, object(), current);
@ -419,7 +419,7 @@ bool ObjectMonitor::enter(JavaThread* current) {
{
ThreadBlockInVMPreprocess<ExitOnSuspend> tbivs(current, eos, true /* allow_suspend */);
EnterI(current);
current->set_current_pending_monitor(NULL);
current->set_current_pending_monitor(nullptr);
// We can go to a safepoint at the end of this block. If we
// do a thread dump during that safepoint, then this thread will show
// as having "-locked" the monitor, but the OS and java.lang.Thread
@ -484,8 +484,8 @@ bool ObjectMonitor::enter(JavaThread* current) {
int ObjectMonitor::TryLock(JavaThread* current) {
void* own = owner_raw();
if (own != NULL) return 0;
if (try_set_owner_from(NULL, current) == NULL) {
if (own != nullptr) return 0;
if (try_set_owner_from(nullptr, current) == nullptr) {
assert(_recursions == 0, "invariant");
return 1;
}
@ -527,20 +527,20 @@ bool ObjectMonitor::deflate_monitor() {
const oop obj = object_peek();
if (obj == NULL) {
if (obj == nullptr) {
// If the object died, we can recycle the monitor without racing with
// Java threads. The GC already broke the association with the object.
set_owner_from(NULL, DEFLATER_MARKER);
set_owner_from(nullptr, DEFLATER_MARKER);
assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions());
_contentions = INT_MIN; // minimum negative int
} else {
// Attempt async deflation protocol.
// Set a NULL owner to DEFLATER_MARKER to force any contending thread
// Set a nullptr owner to DEFLATER_MARKER to force any contending thread
// through the slow path. This is just the first part of the async
// deflation dance.
if (try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) {
// The owner field is no longer NULL so we lost the race since the
if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) {
// The owner field is no longer null so we lost the race since the
// ObjectMonitor is now busy.
return false;
}
@ -549,8 +549,8 @@ bool ObjectMonitor::deflate_monitor() {
// Another thread has raced to enter the ObjectMonitor after
// is_busy() above or has already entered and waited on
// it which makes it busy so no deflation. Restore owner to
// NULL if it is still DEFLATER_MARKER.
if (try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) {
// null if it is still DEFLATER_MARKER.
if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
@ -561,9 +561,9 @@ bool ObjectMonitor::deflate_monitor() {
// to retry. This is the second part of the async deflation dance.
if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
// Contentions was no longer 0 so we lost the race since the
// ObjectMonitor is now busy. Restore owner to NULL if it is
// ObjectMonitor is now busy. Restore owner to nullptr if it is
// still DEFLATER_MARKER:
if (try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) {
if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation.
add_to_contentions(-1);
}
@ -576,13 +576,13 @@ bool ObjectMonitor::deflate_monitor() {
guarantee(contentions() < 0, "must be negative: contentions=%d",
contentions());
guarantee(_waiters == 0, "must be 0: waiters=%d", _waiters);
guarantee(_cxq == NULL, "must be no contending threads: cxq="
guarantee(_cxq == nullptr, "must be no contending threads: cxq="
INTPTR_FORMAT, p2i(_cxq));
guarantee(_EntryList == NULL,
guarantee(_EntryList == nullptr,
"must be no entering threads: EntryList=" INTPTR_FORMAT,
p2i(_EntryList));
if (obj != NULL) {
if (obj != nullptr) {
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm;
log_trace(monitorinflation)("deflate_monitor: object=" INTPTR_FORMAT
@ -611,7 +611,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
// those values could change when the ObjectMonitor gets moved from
// the global free list to a per-thread free list.
guarantee(obj != NULL, "must be non-NULL");
guarantee(obj != nullptr, "must be non-null");
// Separate loads in is_being_async_deflated(), which is almost always
// called before this function, from the load of dmw/header below.
@ -621,7 +621,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
OrderAccess::loadload_for_IRIW();
const oop l_object = object_peek();
if (l_object == NULL) {
if (l_object == nullptr) {
// ObjectMonitor's object ref has already been cleared by async
// deflation or GC so we're done here.
return;
@ -630,7 +630,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
INTPTR_FORMAT, p2i(l_object), p2i(obj));
markWord dmw = header();
// The dmw has to be neutral (not NULL, not locked and not marked).
// The dmw has to be neutral (not null, not locked and not marked).
assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
// Install displaced mark word if the object's header still points
@ -666,7 +666,7 @@ const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
if (!owner_is_DEFLATER_MARKER()) {
ss->print("owner=" INTPTR_FORMAT, p2i(owner_raw()));
} else {
// We report NULL instead of DEFLATER_MARKER here because is_busy()
// We report nullptr instead of DEFLATER_MARKER here because is_busy()
// ignores DEFLATER_MARKER values.
ss->print("owner=" INTPTR_FORMAT, NULL_WORD);
}
@ -783,7 +783,7 @@ void ObjectMonitor::EnterI(JavaThread* current) {
// timer scalability issues we see on some platforms as we'd only have one thread
// -- the checker -- parked on a timer.
if (nxt == NULL && _EntryList == NULL) {
if (nxt == nullptr && _EntryList == nullptr) {
// Try to assume the role of responsible thread for the monitor.
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=current }
Atomic::replace_if_null(&_Responsible, current);
@ -861,7 +861,7 @@ void ObjectMonitor::EnterI(JavaThread* current) {
// just spin again. This pattern can repeat, leaving _succ to simply
// spin on a CPU.
if (_succ == current) _succ = NULL;
if (_succ == current) _succ = nullptr;
// Invariant: after clearing _succ a thread *must* retry _owner before parking.
OrderAccess::fence();
@ -878,11 +878,11 @@ void ObjectMonitor::EnterI(JavaThread* current) {
assert(owner_raw() == current, "invariant");
UnlinkAfterAcquire(current, &node);
if (_succ == current) _succ = NULL;
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
if (_Responsible == current) {
_Responsible = NULL;
_Responsible = nullptr;
OrderAccess::fence(); // Dekker pivot-point
// We may leave threads on cxq|EntryList without a designated
@ -938,8 +938,8 @@ void ObjectMonitor::EnterI(JavaThread* current) {
// In the future we should reconcile EnterI() and ReenterI().
void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
assert(current != NULL, "invariant");
assert(currentNode != NULL, "invariant");
assert(current != nullptr, "invariant");
assert(currentNode != nullptr, "invariant");
assert(currentNode->_thread == current, "invariant");
assert(_waiters > 0, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
@ -981,7 +981,7 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
// Assuming this is not a spurious wakeup we'll normally
// find that _succ == current.
if (_succ == current) _succ = NULL;
if (_succ == current) _succ = nullptr;
// Invariant: after clearing _succ a contending thread
// *must* retry _owner before parking.
@ -1003,7 +1003,7 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) {
assert(owner_raw() == current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");
UnlinkAfterAcquire(current, currentNode);
if (_succ == current) _succ = NULL;
if (_succ == current) _succ = nullptr;
assert(_succ != current, "invariant");
currentNode->TState = ObjectWaiter::TS_RUN;
OrderAccess::fence(); // see comments at the end of EnterI()
@ -1022,11 +1022,11 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren
// This is a constant-time operation.
ObjectWaiter* nxt = currentNode->_next;
ObjectWaiter* prv = currentNode->_prev;
if (nxt != NULL) nxt->_prev = prv;
if (prv != NULL) prv->_next = nxt;
if (nxt != nullptr) nxt->_prev = prv;
if (prv != nullptr) prv->_next = nxt;
if (currentNode == _EntryList) _EntryList = nxt;
assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
assert(nxt == nullptr || nxt->TState == ObjectWaiter::TS_ENTER, "invariant");
assert(prv == nullptr || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
} else {
assert(currentNode->TState == ObjectWaiter::TS_CXQ, "invariant");
// Inopportune interleaving -- current is still on the cxq.
@ -1043,7 +1043,7 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren
// so it might as well be now.
ObjectWaiter* v = _cxq;
assert(v != NULL, "invariant");
assert(v != nullptr, "invariant");
if (v != currentNode || Atomic::cmpxchg(&_cxq, v, currentNode->_next) != v) {
// The CAS above can fail from interference IFF a "RAT" arrived.
// In that case current must be in the interior and can no longer be
@ -1053,15 +1053,15 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren
v = _cxq; // CAS above failed - start scan at head of list
}
ObjectWaiter* p;
ObjectWaiter* q = NULL;
for (p = v; p != NULL && p != currentNode; p = p->_next) {
ObjectWaiter* q = nullptr;
for (p = v; p != nullptr && p != currentNode; p = p->_next) {
q = p;
assert(p->TState == ObjectWaiter::TS_CXQ, "invariant");
}
assert(v != currentNode, "invariant");
assert(p == currentNode, "Node not found on cxq");
assert(p != _cxq, "invariant");
assert(q != NULL, "invariant");
assert(q != nullptr, "invariant");
assert(q->_next == p, "invariant");
q->_next = p->_next;
}
@ -1168,7 +1168,7 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
// Invariant: after setting Responsible=null an thread must execute
// a MEMBAR or other serializing instruction before fetching EntryList|cxq.
_Responsible = NULL;
_Responsible = nullptr;
#if INCLUDE_JFR
// get the owner's thread id for the MonitorEnter event
@ -1190,14 +1190,14 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
release_clear_owner(current);
OrderAccess::storeload();
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) {
return;
}
// Other threads are blocked trying to acquire the lock.
// Normally the exiting thread is responsible for ensuring succession,
// but if other successors are ready or other entering threads are spinning
// then this thread can simply store NULL into _owner and exit without
// then this thread can simply store null into _owner and exit without
// waking a successor. The existence of spinners or ready successors
// guarantees proper succession (liveness). Responsibility passes to the
// ready or running successors. The exiting thread delegates the duty.
@ -1231,20 +1231,20 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
// to reacquire the lock the responsibility for ensuring succession
// falls to the new owner.
//
if (try_set_owner_from(NULL, current) != NULL) {
if (try_set_owner_from(nullptr, current) != nullptr) {
return;
}
guarantee(owner_raw() == current, "invariant");
ObjectWaiter* w = NULL;
ObjectWaiter* w = nullptr;
w = _EntryList;
if (w != NULL) {
if (w != nullptr) {
// I'd like to write: guarantee (w->_thread != current).
// But in practice an exiting thread may find itself on the EntryList.
// Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
// then calls exit(). Exit release the lock by setting O._owner to NULL.
// then calls exit(). Exit release the lock by setting O._owner to null.
// Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
// notify() operation moves T1 from O's waitset to O's EntryList. T2 then
// release the lock "O". T2 resumes immediately after the ST of null into
@ -1260,20 +1260,20 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
// If we find that both _cxq and EntryList are null then just
// re-run the exit protocol from the top.
w = _cxq;
if (w == NULL) continue;
if (w == nullptr) continue;
// Drain _cxq into EntryList - bulk transfer.
// First, detach _cxq.
// The following loop is tantamount to: w = swap(&cxq, NULL)
// The following loop is tantamount to: w = swap(&cxq, nullptr)
for (;;) {
assert(w != NULL, "Invariant");
ObjectWaiter* u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL);
assert(w != nullptr, "Invariant");
ObjectWaiter* u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)nullptr);
if (u == w) break;
w = u;
}
assert(w != NULL, "invariant");
assert(_EntryList == NULL, "invariant");
assert(w != nullptr, "invariant");
assert(_EntryList == nullptr, "invariant");
// Convert the LIFO SLL anchored by _cxq into a DLL.
// The list reorganization step operates in O(LENGTH(w)) time.
@ -1285,25 +1285,25 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
// we have faster access to the tail.
_EntryList = w;
ObjectWaiter* q = NULL;
ObjectWaiter* q = nullptr;
ObjectWaiter* p;
for (p = w; p != NULL; p = p->_next) {
for (p = w; p != nullptr; p = p->_next) {
guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant");
p->TState = ObjectWaiter::TS_ENTER;
p->_prev = q;
q = p;
}
// In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
// In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = nullptr
// The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
// See if we can abdicate to a spinner instead of waking a thread.
// A primary goal of the implementation is to reduce the
// context-switch rate.
if (_succ != NULL) continue;
if (_succ != nullptr) continue;
w = _EntryList;
if (w != NULL) {
if (w != nullptr) {
guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant");
ExitEpilog(current, w);
return;
@ -1317,16 +1317,16 @@ void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) {
// Exit protocol:
// 1. ST _succ = wakee
// 2. membar #loadstore|#storestore;
// 2. ST _owner = NULL
// 2. ST _owner = nullptr
// 3. unpark(wakee)
_succ = Wakee->_thread;
ParkEvent * Trigger = Wakee->_event;
// Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
// Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again.
// The thread associated with Wakee may have grabbed the lock and "Wakee" may be
// out-of-scope (non-extant).
Wakee = NULL;
Wakee = nullptr;
// Drop the lock.
// Uses a fence to separate release_store(owner) from the LD in unpark().
@ -1427,8 +1427,8 @@ static void post_monitor_wait_event(EventJavaMonitorWait* event,
uint64_t notifier_tid,
jlong timeout,
bool timedout) {
assert(event != NULL, "invariant");
assert(monitor != NULL, "invariant");
assert(event != nullptr, "invariant");
assert(monitor != nullptr, "invariant");
const Klass* monitor_klass = monitor->object()->klass();
if (is_excluded(monitor_klass)) {
return;
@ -1504,7 +1504,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
AddWaiter(&node);
Thread::SpinRelease(&_WaitSetLock);
_Responsible = NULL;
_Responsible = nullptr;
intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
@ -1576,7 +1576,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// No other threads will asynchronously modify TState.
guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
OrderAccess::loadload();
if (_succ == current) _succ = NULL;
if (_succ == current) _succ = nullptr;
WasNotified = node._notified;
// Reentry phase -- reacquire the monitor.
@ -1638,7 +1638,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
assert(_succ != current, "invariant");
} // OSThreadWaitState()
current->set_current_waiting_monitor(NULL);
current->set_current_waiting_monitor(nullptr);
guarantee(_recursions == 0, "invariant");
int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current);
@ -1674,7 +1674,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
void ObjectMonitor::INotify(JavaThread* current) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify");
ObjectWaiter* iterator = DequeueWaiter();
if (iterator != NULL) {
if (iterator != nullptr) {
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
guarantee(iterator->_notified == 0, "invariant");
// Disposition - what might we do with iterator ?
@ -1689,15 +1689,15 @@ void ObjectMonitor::INotify(JavaThread* current) {
iterator->_notifier_tid = JFR_THREAD_ID(current);
ObjectWaiter* list = _EntryList;
if (list != NULL) {
assert(list->_prev == NULL, "invariant");
if (list != nullptr) {
assert(list->_prev == nullptr, "invariant");
assert(list->TState == ObjectWaiter::TS_ENTER, "invariant");
assert(list != iterator, "invariant");
}
// prepend to cxq
if (list == NULL) {
iterator->_next = iterator->_prev = NULL;
if (list == nullptr) {
iterator->_next = iterator->_prev = nullptr;
_EntryList = iterator;
} else {
iterator->TState = ObjectWaiter::TS_CXQ;
@ -1736,7 +1736,7 @@ void ObjectMonitor::INotify(JavaThread* current) {
void ObjectMonitor::notify(TRAPS) {
JavaThread* current = THREAD;
CHECK_OWNER(); // Throws IMSE if not owner.
if (_WaitSet == NULL) {
if (_WaitSet == nullptr) {
return;
}
DTRACE_MONITOR_PROBE(notify, this, object(), current);
@ -1755,13 +1755,13 @@ void ObjectMonitor::notify(TRAPS) {
void ObjectMonitor::notifyAll(TRAPS) {
JavaThread* current = THREAD;
CHECK_OWNER(); // Throws IMSE if not owner.
if (_WaitSet == NULL) {
if (_WaitSet == nullptr) {
return;
}
DTRACE_MONITOR_PROBE(notifyAll, this, object(), current);
int tally = 0;
while (_WaitSet != NULL) {
while (_WaitSet != nullptr) {
tally++;
INotify(current);
}
@ -1887,10 +1887,10 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
// CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
// when preparing to LD...CAS _owner, etc and the CAS is likely
// to succeed.
if (_succ == NULL) {
if (_succ == nullptr) {
_succ = current;
}
Thread* prv = NULL;
Thread* prv = nullptr;
// There are three ways to exit the following loop:
// 1. A successful spin where this thread has acquired the lock.
@ -1929,13 +1929,13 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
// spin count-down variable "ctr", reducing it by 100, say.
JavaThread* ox = static_cast<JavaThread*>(owner_raw());
if (ox == NULL) {
ox = static_cast<JavaThread*>(try_set_owner_from(NULL, current));
if (ox == NULL) {
if (ox == nullptr) {
ox = static_cast<JavaThread*>(try_set_owner_from(nullptr, current));
if (ox == nullptr) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (_succ == current) {
_succ = NULL;
_succ = nullptr;
}
// Increase _SpinDuration :
@ -1963,7 +1963,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
}
// Did lock ownership change hands ?
if (ox != prv && prv != NULL) {
if (ox != prv && prv != nullptr) {
goto Abort;
}
prv = ox;
@ -1975,7 +1975,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
if (NotRunnable(current, ox)) {
goto Abort;
}
if (_succ == NULL) {
if (_succ == nullptr) {
_succ = current;
}
}
@ -1996,7 +1996,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
Abort:
if (_succ == current) {
_succ = NULL;
_succ = nullptr;
// Invariant: after setting succ=null a contending thread
// must recheck-retry _owner before parking. This usually happens
// in the normal usage of TrySpin(), but it's safest
@ -2041,7 +2041,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) {
int ObjectMonitor::NotRunnable(JavaThread* current, JavaThread* ox) {
// Check ox->TypeTag == 2BAD.
if (ox == NULL) return 0;
if (ox == nullptr) return 0;
// Avoid transitive spinning ...
// Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
@ -2068,15 +2068,15 @@ int ObjectMonitor::NotRunnable(JavaThread* current, JavaThread* ox) {
// WaitSet management ...
ObjectWaiter::ObjectWaiter(JavaThread* current) {
_next = NULL;
_prev = NULL;
_next = nullptr;
_prev = nullptr;
_notified = 0;
_notifier_tid = 0;
TState = TS_RUN;
_thread = current;
_event = _thread->_ParkEvent;
_active = false;
assert(_event != NULL, "invariant");
assert(_event != nullptr, "invariant");
}
void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
@ -2088,11 +2088,11 @@ void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
}
inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
assert(node != NULL, "should not add NULL node");
assert(node->_prev == NULL, "node already in list");
assert(node->_next == NULL, "node already in list");
assert(node != nullptr, "should not add null node");
assert(node->_prev == nullptr, "node already in list");
assert(node->_next == nullptr, "node already in list");
// put node at end of queue (circular doubly linked list)
if (_WaitSet == NULL) {
if (_WaitSet == nullptr) {
_WaitSet = node;
node->_prev = node;
node->_next = node;
@ -2117,16 +2117,16 @@ inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
}
inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
assert(node != NULL, "should not dequeue NULL node");
assert(node->_prev != NULL, "node already removed from list");
assert(node->_next != NULL, "node already removed from list");
assert(node != nullptr, "should not dequeue nullptr node");
assert(node->_prev != nullptr, "node already removed from list");
assert(node->_next != nullptr, "node already removed from list");
// when the waiter has woken up because of interrupt,
// timeout or other spurious wake-up, dequeue the
// waiter from waiting list
ObjectWaiter* next = node->_next;
if (next == node) {
assert(node->_prev == node, "invariant check");
_WaitSet = NULL;
_WaitSet = nullptr;
} else {
ObjectWaiter* prev = node->_prev;
assert(prev->_next == node, "invariant check");
@ -2137,19 +2137,19 @@ inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
_WaitSet = next;
}
}
node->_next = NULL;
node->_prev = NULL;
node->_next = nullptr;
node->_prev = nullptr;
}
// -----------------------------------------------------------------------------
// PerfData support
PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL;
PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL;
PerfCounter * ObjectMonitor::_sync_Parks = NULL;
PerfCounter * ObjectMonitor::_sync_Notifications = NULL;
PerfCounter * ObjectMonitor::_sync_Inflations = NULL;
PerfCounter * ObjectMonitor::_sync_Deflations = NULL;
PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL;
PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = nullptr;
PerfCounter * ObjectMonitor::_sync_FutileWakeups = nullptr;
PerfCounter * ObjectMonitor::_sync_Parks = nullptr;
PerfCounter * ObjectMonitor::_sync_Notifications = nullptr;
PerfCounter * ObjectMonitor::_sync_Inflations = nullptr;
PerfCounter * ObjectMonitor::_sync_Deflations = nullptr;
PerfLongVariable * ObjectMonitor::_sync_MonExtant = nullptr;
// One-shot global initialization for the sync subsystem.
// We could also defer initialization and initialize on-demand

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -184,12 +184,12 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
// allocated and if the PerfDataManager has not freed the PerfData
// objects which can happen at normal VM shutdown.
//
#define OM_PERFDATA_OP(f, op_str) \
do { \
if (ObjectMonitor::_sync_ ## f != NULL && \
PerfDataManager::has_PerfData()) { \
ObjectMonitor::_sync_ ## f->op_str; \
} \
#define OM_PERFDATA_OP(f, op_str) \
do { \
if (ObjectMonitor::_sync_ ## f != nullptr && \
PerfDataManager::has_PerfData()) { \
ObjectMonitor::_sync_ ## f->op_str; \
} \
} while (0)
static PerfCounter * _sync_ContendedLockAttempts;
@ -246,7 +246,7 @@ class ObjectMonitor : public CHeapObj<mtObjectMonitor> {
// Returns true if this OM has an owner, false otherwise.
bool has_owner() const;
void* owner() const; // Returns NULL if DEFLATER_MARKER is observed.
void* owner() const; // Returns null if DEFLATER_MARKER is observed.
void* owner_raw() const;
// Returns true if owner field == DEFLATER_MARKER and false otherwise.
bool owner_is_DEFLATER_MARKER() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,13 +58,13 @@ inline int ObjectMonitor::waiters() const {
inline bool ObjectMonitor::has_owner() const {
void* owner = owner_raw();
return owner != NULL && owner != DEFLATER_MARKER;
return owner != nullptr && owner != DEFLATER_MARKER;
}
// Returns NULL if DEFLATER_MARKER is observed.
// Returns null if DEFLATER_MARKER is observed.
inline void* ObjectMonitor::owner() const {
void* owner = owner_raw();
return owner != DEFLATER_MARKER ? owner : NULL;
return owner != DEFLATER_MARKER ? owner : nullptr;
}
inline void* ObjectMonitor::owner_raw() const {
@ -73,7 +73,7 @@ inline void* ObjectMonitor::owner_raw() const {
// Returns true if owner field == DEFLATER_MARKER and false otherwise.
// This accessor is called when we really need to know if the owner
// field == DEFLATER_MARKER and any non-NULL value won't do the trick.
// field == DEFLATER_MARKER and any non-null value won't do the trick.
inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() const {
return owner_raw() == DEFLATER_MARKER;
}
@ -100,7 +100,7 @@ inline void ObjectMonitor::release_clear_owner(void* old_value) {
assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT
", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value));
#endif
Atomic::release_store(&_owner, (void*)NULL);
Atomic::release_store(&_owner, (void*)nullptr);
log_trace(monitorinflation, owner)("release_clear_owner(): mid="
INTPTR_FORMAT ", old_value=" INTPTR_FORMAT,
p2i(this), p2i(old_value));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@ void OrderAccess::StubRoutines_fence() {
// nothing in that case but assert if no fence code exists after threads have been created
void (*func)() = CAST_TO_FN_PTR(void (*)(), StubRoutines::fence_entry());
if (func != NULL) {
if (func != nullptr) {
(*func)();
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,8 +80,8 @@
# include <signal.h>
# include <errno.h>
OSThread* os::_starting_thread = NULL;
address os::_polling_page = NULL;
OSThread* os::_starting_thread = nullptr;
address os::_polling_page = nullptr;
volatile unsigned int os::_rand_seed = 1234567;
int os::_processor_count = 0;
int os::_initial_active_processor_count = 0;
@ -111,7 +111,7 @@ int os::snprintf_checked(char* buf, size_t len, const char* fmt, ...) {
// Fill in buffer with current local time as an ISO-8601 string.
// E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
// Returns buffer, or NULL if it failed.
// Returns buffer, or null if it failed.
char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) {
const jlong now = javaTimeMillis();
return os::iso8601_time(now, buffer, buffer_length, utc);
@ -119,7 +119,7 @@ char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) {
// Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value
// E.g., yyyy-mm-ddThh:mm:ss-zzzz.
// Returns buffer, or NULL if it failed.
// Returns buffer, or null if it failed.
// This would mostly be a call to
// strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
// except that on Windows the %z behaves badly, so we do it ourselves.
@ -129,13 +129,13 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b
// Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
// Sanity check the arguments
if (buffer == NULL) {
assert(false, "NULL buffer");
return NULL;
if (buffer == nullptr) {
assert(false, "null buffer");
return nullptr;
}
if (buffer_length < os::iso8601_timestamp_size) {
assert(false, "buffer_length too small");
return NULL;
return nullptr;
}
const int milliseconds_per_microsecond = 1000;
const time_t seconds_since_19700101 =
@ -145,14 +145,14 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b
// Convert the time value to a tm and timezone variable
struct tm time_struct;
if (utc) {
if (gmtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
if (gmtime_pd(&seconds_since_19700101, &time_struct) == nullptr) {
assert(false, "Failed gmtime_pd");
return NULL;
return nullptr;
}
} else {
if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
if (localtime_pd(&seconds_since_19700101, &time_struct) == nullptr) {
assert(false, "Failed localtime_pd");
return NULL;
return nullptr;
}
}
@ -218,7 +218,7 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b
zone_min);
if (printed == 0) {
assert(false, "Failed jio_printf");
return NULL;
return nullptr;
}
return buffer;
}
@ -293,7 +293,7 @@ static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t pri
static void free_array_of_char_arrays(char** a, size_t n) {
while (n > 0) {
n--;
if (a[n] != NULL) {
if (a[n] != nullptr) {
FREE_C_HEAP_ARRAY(char, a[n]);
}
}
@ -312,21 +312,21 @@ bool os::dll_locate_lib(char *buffer, size_t buflen,
if (pnamelen == 0) {
// If no path given, use current working directory.
const char* p = get_current_directory(buffer, buflen);
if (p != NULL) {
if (p != nullptr) {
const size_t plen = strlen(buffer);
const char lastchar = buffer[plen - 1];
retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen,
"", lastchar, fullfname);
}
} else if (strchr(pname, *os::path_separator()) != NULL) {
} else if (strchr(pname, *os::path_separator()) != nullptr) {
// A list of paths. Search for the path that contains the library.
size_t n;
char** pelements = split_path(pname, &n, fullfnamelen);
if (pelements != NULL) {
if (pelements != nullptr) {
for (size_t i = 0; i < n; i++) {
char* path = pelements[i];
// Really shouldn't be NULL, but check can't hurt.
size_t plen = (path == NULL) ? 0 : strlen(path);
// Really shouldn't be null, but check can't hurt.
size_t plen = (path == nullptr) ? 0 : strlen(path);
if (plen == 0) {
continue; // Skip the empty path values.
}
@ -425,7 +425,7 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) {
// Dispatch the signal to java
HandleMark hm(THREAD);
Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD);
if (klass != NULL) {
if (klass != nullptr) {
JavaValue result(T_VOID);
JavaCallArguments args;
args.push_int(sig);
@ -442,13 +442,13 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) {
// tty is initialized early so we don't expect it to be null, but
// if it is we can't risk doing an initialization that might
// trigger additional out-of-memory conditions
if (tty != NULL) {
if (tty != nullptr) {
char klass_name[256];
char tmp_sig_name[16];
const char* sig_name = "UNKNOWN";
InstanceKlass::cast(PENDING_EXCEPTION->klass())->
name()->as_klass_external_name(klass_name, 256);
if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
if (os::exception_name(sig, tmp_sig_name, 16) != nullptr)
sig_name = tmp_sig_name;
warning("Exception %s occurred dispatching signal %s to handler"
"- the VM may need to be forcibly terminated",
@ -499,10 +499,10 @@ void os::terminate_signal_thread() {
typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
extern struct JavaVM_ main_vm;
static void* _native_java_library = NULL;
static void* _native_java_library = nullptr;
void* os::native_java_library() {
if (_native_java_library == NULL) {
if (_native_java_library == nullptr) {
char buffer[JVM_MAXPATHLEN];
char ebuf[1024];
@ -511,7 +511,7 @@ void* os::native_java_library() {
"java")) {
_native_java_library = dll_load(buffer, ebuf, sizeof(ebuf));
}
if (_native_java_library == NULL) {
if (_native_java_library == nullptr) {
vm_exit_during_initialization("Unable to load native library", ebuf);
}
@ -538,24 +538,24 @@ void* os::native_java_library() {
*/
void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
const char *syms[], size_t syms_len) {
assert(agent_lib != NULL, "sanity check");
assert(agent_lib != nullptr, "sanity check");
const char *lib_name;
void *handle = agent_lib->os_lib();
void *entryName = NULL;
void *entryName = nullptr;
char *agent_function_name;
size_t i;
// If checking then use the agent name otherwise test is_static_lib() to
// see how to process this lookup
lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : nullptr);
for (i = 0; i < syms_len; i++) {
agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
if (agent_function_name == NULL) {
if (agent_function_name == nullptr) {
break;
}
entryName = dll_lookup(handle, agent_function_name);
FREE_C_HEAP_ARRAY(char, agent_function_name);
if (entryName != NULL) {
if (entryName != nullptr) {
break;
}
}
@ -569,8 +569,8 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
void *proc_handle;
void *save_handle;
assert(agent_lib != NULL, "sanity check");
if (agent_lib->name() == NULL) {
assert(agent_lib != nullptr, "sanity check");
if (agent_lib->name() == nullptr) {
return false;
}
proc_handle = get_default_process_handle();
@ -579,7 +579,7 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
// We want to look in this process' symbol table.
agent_lib->set_os_lib(proc_handle);
ret = find_agent_function(agent_lib, true, syms, syms_len);
if (ret != NULL) {
if (ret != nullptr) {
// Found an entry point like Agent_OnLoad_lib_name so we have a static agent
agent_lib->set_valid();
agent_lib->set_static_lib(true);
@ -594,14 +594,14 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
char *os::strdup(const char *str, MEMFLAGS flags) {
size_t size = strlen(str);
char *dup_str = (char *)malloc(size + 1, flags);
if (dup_str == NULL) return NULL;
if (dup_str == nullptr) return nullptr;
strcpy(dup_str, str);
return dup_str;
}
char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
char* p = os::strdup(str, flags);
if (p == NULL) {
if (p == nullptr) {
vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
}
return p;
@ -644,7 +644,7 @@ void* os::malloc(size_t size, MEMFLAGS flags) {
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
// Special handling for NMT preinit phase before arguments are parsed
void* rc = NULL;
void* rc = nullptr;
if (NMTPreInit::handle_malloc(&rc, size)) {
// No need to fill with 0 because DumpSharedSpaces doesn't use these
// early allocations.
@ -654,25 +654,25 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
DEBUG_ONLY(check_crash_protection());
// On malloc(0), implementations of malloc(3) have the choice to return either
// NULL or a unique non-NULL pointer. To unify libc behavior across our platforms
// null or a unique non-null pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);
// For the test flag -XX:MallocMaxTestWords
if (has_reached_max_malloc_test_peak(size)) {
return NULL;
return nullptr;
}
const size_t outer_size = size + MemTracker::overhead_per_malloc();
// Check for overflow.
if (outer_size < size) {
return NULL;
return nullptr;
}
ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);)
if (outer_ptr == NULL) {
return NULL;
if (outer_ptr == nullptr) {
return nullptr;
}
void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);
@ -694,25 +694,25 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
// Special handling for NMT preinit phase before arguments are parsed
void* rc = NULL;
void* rc = nullptr;
if (NMTPreInit::handle_realloc(&rc, memblock, size)) {
return rc;
}
if (memblock == NULL) {
if (memblock == nullptr) {
return os::malloc(size, memflags, stack);
}
DEBUG_ONLY(check_crash_protection());
// On realloc(p, 0), implementers of realloc(3) have the choice to return either
// NULL or a unique non-NULL pointer. To unify libc behavior across our platforms
// null or a unique non-null pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);
// For the test flag -XX:MallocMaxTestWords
if (has_reached_max_malloc_test_peak(size)) {
return NULL;
return nullptr;
}
if (MemTracker::enabled()) {
@ -722,7 +722,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
// Handle size overflow.
if (new_outer_size < size) {
return NULL;
return nullptr;
}
// Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it
@ -735,7 +735,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
// the real realloc
ALLOW_C_FUNCTION(::realloc, void* const new_outer_ptr = ::realloc(header, new_outer_size);)
if (new_outer_ptr == NULL) {
if (new_outer_ptr == nullptr) {
// realloc(3) failed and the block still exists.
// We have however marked it as dead, revert this change.
header->revive();
@ -762,8 +762,8 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
// NMT disabled.
ALLOW_C_FUNCTION(::realloc, rc = ::realloc(memblock, size);)
if (rc == NULL) {
return NULL;
if (rc == nullptr) {
return nullptr;
}
}
@ -780,7 +780,7 @@ void os::free(void *memblock) {
return;
}
if (memblock == NULL) {
if (memblock == nullptr) {
return;
}
@ -860,7 +860,7 @@ void os::start_thread(Thread* thread) {
}
void os::abort(bool dump_core) {
abort(dump_core && CreateCoredumpOnCrash, NULL, NULL);
abort(dump_core && CreateCoredumpOnCrash, nullptr, nullptr);
}
//---------------------------------------------------------------------------
@ -876,7 +876,7 @@ bool os::print_function_and_library_name(outputStream* st,
// (used during error handling; its a coin toss, really, if on-stack allocation
// is worse than (raw) C-heap allocation in that case).
char* p = buf;
if (p == NULL) {
if (p == nullptr) {
p = (char*)::alloca(O_BUFLEN);
buflen = O_BUFLEN;
}
@ -903,7 +903,7 @@ bool os::print_function_and_library_name(outputStream* st,
// Print function name, optionally demangled
if (demangle && strip_arguments) {
char* args_start = strchr(p, '(');
if (args_start != NULL) {
if (args_start != nullptr) {
*args_start = '\0';
}
}
@ -924,7 +924,7 @@ bool os::print_function_and_library_name(outputStream* st,
// Cut path parts
if (shorten_paths) {
char* p2 = strrchr(p, os::file_separator()[0]);
if (p2 != NULL) {
if (p2 != nullptr) {
p = p2 + 1;
}
}
@ -987,7 +987,7 @@ void os::print_dhm(outputStream* st, const char* startStr, long sec) {
long days = sec/86400;
long hours = (sec/3600) - (days * 24);
long minutes = (sec/60) - (days * 1440) - (hours * 60);
if (startStr == NULL) startStr = "";
if (startStr == nullptr) startStr = "";
st->print_cr("%s %ld days %ld:%02ld hours", startStr, days, hours, minutes);
}
@ -1005,9 +1005,9 @@ void os::print_environment_variables(outputStream* st, const char** env_list) {
if (env_list) {
st->print_cr("Environment Variables:");
for (int i = 0; env_list[i] != NULL; i++) {
for (int i = 0; env_list[i] != nullptr; i++) {
char *envvar = ::getenv(env_list[i]);
if (envvar != NULL) {
if (envvar != nullptr) {
st->print("%s", env_list[i]);
st->print("=");
st->print("%s", envvar);
@ -1070,12 +1070,12 @@ void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
char* timestring = ctime(&tloc); // ctime adds newline.
// edit out the newline
char* nl = strchr(timestring, '\n');
if (nl != NULL) {
if (nl != nullptr) {
*nl = '\0';
}
struct tm tz;
if (localtime_pd(&tloc, &tz) != NULL) {
if (localtime_pd(&tloc, &tz) != nullptr) {
wchar_t w_buf[80];
size_t n = ::wcsftime(w_buf, 80, L"%Z", &tz);
if (n > 0) {
@ -1106,7 +1106,7 @@ void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
// Check if pointer can be read from (4-byte read access).
// Helps to prove validity of a not-NULL pointer.
// Helps to prove validity of a non-null pointer.
// Returns true in very early stages of VM life when stub is not yet generated.
bool os::is_readable_pointer(const void* p) {
int* const aligned = (int*) align_down((intptr_t)p, 4);
@ -1130,15 +1130,15 @@ bool os::is_readable_range(const void* from, const void* to) {
// The verbose parameter is only set by the debug code in one case
void os::print_location(outputStream* st, intptr_t x, bool verbose) {
address addr = (address)x;
// Handle NULL first, so later checks don't need to protect against it.
if (addr == NULL) {
st->print_cr("0x0 is NULL");
// Handle null first, so later checks don't need to protect against it.
if (addr == nullptr) {
st->print_cr("0x0 is nullptr");
return;
}
// Check if addr points into a code blob.
CodeBlob* b = CodeCache::find_blob(addr);
if (b != NULL) {
if (b != nullptr) {
b->dump_for_addr(addr, st, verbose);
return;
}
@ -1330,7 +1330,7 @@ FILE* os::fopen(const char* path, const char* mode) {
#if !(defined LINUX || defined BSD || defined _WINDOWS)
// assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N'
// is not supported as mode in fopen
if (file != NULL) {
if (file != nullptr) {
int fd = fileno(file);
if (fd != -1) {
int fd_flags = fcntl(fd, F_GETFD);
@ -1352,7 +1352,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
// modular image if "modules" jimage exists
char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep);
if (jimage == NULL) return false;
if (jimage == nullptr) return false;
bool has_jimage = (os::stat(jimage, &st) == 0);
if (has_jimage) {
Arguments::set_boot_class_path(jimage, true);
@ -1363,7 +1363,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
// check if developer build with exploded modules
char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep);
if (base_classes == NULL) return false;
if (base_classes == nullptr) return false;
if (os::stat(base_classes, &st) == 0) {
Arguments::set_boot_class_path(base_classes, false);
FREE_C_HEAP_ARRAY(char, base_classes);
@ -1376,7 +1376,7 @@ bool os::set_boot_path(char fileSep, char pathSep) {
bool os::file_exists(const char* filename) {
struct stat statbuf;
if (filename == NULL || strlen(filename) == 0) {
if (filename == nullptr || strlen(filename) == 0) {
return false;
}
return os::stat(filename, &statbuf) == 0;
@ -1397,8 +1397,8 @@ bool os::file_exists(const char* filename) {
// c> free up the data.
char** os::split_path(const char* path, size_t* elements, size_t file_name_length) {
*elements = (size_t)0;
if (path == NULL || strlen(path) == 0 || file_name_length == (size_t)NULL) {
return NULL;
if (path == nullptr || strlen(path) == 0 || file_name_length == (size_t)nullptr) {
return nullptr;
}
const char psepchar = *os::path_separator();
char* inpath = NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
@ -1406,7 +1406,7 @@ char** os::split_path(const char* path, size_t* elements, size_t file_name_lengt
size_t count = 1;
char* p = strchr(inpath, psepchar);
// Get a count of elements to allocate memory
while (p != NULL) {
while (p != nullptr) {
count++;
p++;
p = strchr(p, psepchar);
@ -1499,7 +1499,7 @@ void os::pause() {
#if defined(_WINDOWS)
Sleep(100);
#else
(void)::poll(NULL, 0, 100);
(void)::poll(nullptr, 0, 100);
#endif
}
} else {
@ -1737,7 +1737,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
char* result = pd_reserve_memory(bytes, executable);
if (result != NULL) {
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags);
}
return result;
@ -1745,7 +1745,7 @@ char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) {
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable) {
char* result = pd_attempt_reserve_memory_at(addr, bytes, executable);
if (result != NULL) {
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
} else {
log_debug(os)("Attempt to reserve memory at " INTPTR_FORMAT " for "
@ -1862,9 +1862,9 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) {
char* os::map_memory_to_file(size_t bytes, int file_desc) {
// Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
// but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
// On all current implementations NULL is interpreted as any available address.
char* result = os::map_memory_to_file(NULL /* addr */, bytes, file_desc);
if (result != NULL) {
// On all current implementations null is interpreted as any available address.
char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc);
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC);
}
return result;
@ -1872,7 +1872,7 @@ char* os::map_memory_to_file(size_t bytes, int file_desc) {
char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc) {
char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc);
if (result != NULL) {
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
}
return result;
@ -1882,7 +1882,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec, MEMFLAGS flags) {
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
if (result != NULL) {
if (result != nullptr) {
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags);
}
return result;
@ -1923,7 +1923,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
assert(is_aligned(addr, alignment), "Unaligned request address");
char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable);
if (result != NULL) {
if (result != nullptr) {
// The memory is committed
MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC);
}

View File

@ -290,13 +290,13 @@ class os: AllStatic {
// Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value
// E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
// Returns buffer, or NULL if it failed.
// Returns buffer, or null if it failed.
static char* iso8601_time(jlong milliseconds_since_19700101, char* buffer,
size_t buffer_length, bool utc = false);
// Fill in buffer with current local time as an ISO-8601 string.
// E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
// Returns buffer, or NULL if it failed.
// Returns buffer, or null if it failed.
static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false);
// Interface for detecting multiprocessor system
@ -672,7 +672,7 @@ class os: AllStatic {
// dladdr() for all platforms. Name of the nearest function is copied
// to buf. Distance from its base address is optionally returned as offset.
// If function name is not found, buf[0] is set to '\0' and offset is
// set to -1 (if offset is non-NULL).
// set to -1 (if offset is non-null).
static bool dll_address_to_function_name(address addr, char* buf,
int buflen, int* offset,
bool demangle = true);
@ -680,7 +680,7 @@ class os: AllStatic {
// Locate DLL/DSO. On success, full path of the library is copied to
// buf, and offset is optionally set to be the distance between addr
// and the library's base address. On failure, buf[0] is set to '\0'
// and offset is set to -1 (if offset is non-NULL).
// and offset is set to -1 (if offset is non-null).
static bool dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset);
@ -697,7 +697,7 @@ class os: AllStatic {
// "<address> in <library>+<offset>"
static bool print_function_and_library_name(outputStream* st,
address addr,
char* buf = NULL, int buflen = 0,
char* buf = nullptr, int buflen = 0,
bool shorten_paths = true,
bool demangle = true,
bool strip_arguments = false);
@ -711,7 +711,7 @@ class os: AllStatic {
// Loads .dll/.so and
// in case of error it checks if .dll/.so was built for the
// same architecture as HotSpot is running on
// in case of an error NULL is returned and an error message is stored in ebuf
// in case of an error null is returned and an error message is stored in ebuf
static void* dll_load(const char *name, char *ebuf, int ebuflen);
// lookup symbol in a shared library
@ -816,7 +816,7 @@ class os: AllStatic {
static bool is_first_C_frame(frame *fr);
static frame get_sender_for_C_frame(frame *fr);
// return current frame. pc() and sp() are set to NULL on failure.
// return current frame. pc() and sp() are set to null on failure.
static frame current_frame();
static void print_hex_dump(outputStream* st, address start, address end, int unitsize,
@ -826,7 +826,7 @@ class os: AllStatic {
}
// returns a string to describe the exception/signal;
// returns NULL if exception_code is not an OS exception/signal.
// returns null if exception_code is not an OS exception/signal.
static const char* exception_name(int exception_code, char* buf, size_t buflen);
// Returns the signal number (e.g. 11) for a given signal name (SIGSEGV).
@ -868,10 +868,10 @@ class os: AllStatic {
static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
// handles NULL pointers
// handles null pointers
static void free (void *memblock);
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
// Like strdup, but exit VM when strdup() returns NULL
// Like strdup, but exit VM when strdup() returns null
static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
// SocketInterface (ex HPI SocketInterface )

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ inline bool os::register_code_area(char *low, char *high) {
#ifndef HAVE_FUNCTION_DESCRIPTORS
inline void* os::resolve_function_descriptor(void* p) {
return NULL;
return nullptr;
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@
// immediately.
volatile int ParkEvent::ListLock = 0 ;
ParkEvent * volatile ParkEvent::FreeList = NULL ;
ParkEvent * volatile ParkEvent::FreeList = nullptr ;
ParkEvent * ParkEvent::Allocate (Thread * t) {
ParkEvent * ev ;
@ -64,14 +64,14 @@ ParkEvent * ParkEvent::Allocate (Thread * t) {
Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
{
ev = FreeList;
if (ev != NULL) {
if (ev != nullptr) {
FreeList = ev->FreeNext;
}
}
Thread::SpinRelease(&ListLock);
if (ev != NULL) {
guarantee (ev->AssociatedWith == NULL, "invariant") ;
if (ev != nullptr) {
guarantee (ev->AssociatedWith == nullptr, "invariant") ;
} else {
// Do this the hard way -- materialize a new ParkEvent.
ev = new ParkEvent () ;
@ -79,14 +79,14 @@ ParkEvent * ParkEvent::Allocate (Thread * t) {
}
ev->reset() ; // courtesy to caller
ev->AssociatedWith = t ; // Associate ev with t
ev->FreeNext = NULL ;
ev->FreeNext = nullptr ;
return ev ;
}
void ParkEvent::Release (ParkEvent * ev) {
if (ev == NULL) return ;
guarantee (ev->FreeNext == NULL , "invariant") ;
ev->AssociatedWith = NULL ;
if (ev == nullptr) return ;
guarantee (ev->FreeNext == nullptr , "invariant") ;
ev->AssociatedWith = nullptr ;
// Note that if we didn't have the TSM/immortal constraint, then
// when reattaching we could trim the list.
Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,9 +137,9 @@ class ParkEvent : public PlatformEvent {
~ParkEvent() { guarantee (0, "invariant") ; }
ParkEvent() : PlatformEvent() {
AssociatedWith = NULL ;
FreeNext = NULL ;
ListNext = NULL ;
AssociatedWith = nullptr ;
FreeNext = nullptr ;
ListNext = nullptr ;
TState = 0 ;
Notified = 0 ;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,9 +37,9 @@
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
PerfDataList* PerfDataManager::_all = NULL;
PerfDataList* PerfDataManager::_sampled = NULL;
PerfDataList* PerfDataManager::_constants = NULL;
PerfDataList* PerfDataManager::_all = nullptr;
PerfDataList* PerfDataManager::_sampled = nullptr;
PerfDataList* PerfDataManager::_constants = nullptr;
volatile bool PerfDataManager::_has_PerfData = 0;
/*
@ -80,7 +80,7 @@ const char* PerfDataManager::_name_spaces[] = {
};
PerfData::PerfData(CounterNS ns, const char* name, Units u, Variability v)
: _name(NULL), _v(v), _u(u), _on_c_heap(false), _valuep(NULL) {
: _name(nullptr), _v(v), _u(u), _on_c_heap(false), _valuep(nullptr) {
const char* prefix = PerfDataManager::ns_to_string(ns);
@ -136,7 +136,7 @@ void PerfData::create_entry(BasicType dtype, size_t dsize, size_t vlen) {
size = ((size + align) & ~align);
char* psmp = PerfMemory::alloc(size);
if (psmp == NULL) {
if (psmp == nullptr) {
// out of PerfMemory memory resources. allocate on the C heap
// to avoid vm termination.
psmp = NEW_C_HEAP_ARRAY(char, size, mtInternal);
@ -194,7 +194,7 @@ PerfLong::PerfLong(CounterNS ns, const char* namep, Units u, Variability v)
PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u,
Variability v, jlong* sampled)
: PerfLong(ns, namep, u, v),
_sampled(sampled), _sample_helper(NULL) {
_sampled(sampled), _sample_helper(nullptr) {
sample();
}
@ -202,13 +202,13 @@ PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u,
PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u,
Variability v, PerfLongSampleHelper* helper)
: PerfLong(ns, namep, u, v),
_sampled(NULL), _sample_helper(helper) {
_sampled(nullptr), _sample_helper(helper) {
sample();
}
void PerfLongVariant::sample() {
if (_sample_helper != NULL) {
if (_sample_helper != nullptr) {
*(jlong*)_valuep = _sample_helper->take_sample();
}
}
@ -223,8 +223,8 @@ PerfByteArray::PerfByteArray(CounterNS ns, const char* namep, Units u,
void PerfString::set_string(const char* s2) {
// copy n bytes of the string, assuring the null string is
// copied if s2 == NULL.
strncpy((char *)_valuep, s2 == NULL ? "" : s2, _length);
// copied if s2 == nullptr.
strncpy((char *)_valuep, s2 == nullptr ? "" : s2, _length);
// assure the string is null terminated when strlen(s2) >= _length
((char*)_valuep)[_length-1] = '\0';
@ -233,13 +233,13 @@ void PerfString::set_string(const char* s2) {
PerfStringConstant::PerfStringConstant(CounterNS ns, const char* namep,
const char* initial_value)
: PerfString(ns, namep, V_Constant,
initial_value == NULL ? 1 :
initial_value == nullptr ? 1 :
MIN2((jint)(strlen((char*)initial_value)+1),
(jint)(PerfMaxStringConstLength+1)),
initial_value) {
if (PrintMiscellaneous && Verbose) {
if (is_valid() && initial_value != NULL &&
if (is_valid() && initial_value != nullptr &&
((jint)strlen(initial_value) > (jint)PerfMaxStringConstLength)) {
warning("Truncating PerfStringConstant: name = %s,"
@ -255,7 +255,7 @@ PerfStringConstant::PerfStringConstant(CounterNS ns, const char* namep,
void PerfDataManager::destroy() {
if (_all == NULL)
if (_all == nullptr)
// destroy already called, or initialization never happened
return;
@ -270,8 +270,8 @@ void PerfDataManager::destroy() {
os::naked_short_sleep(1); // 1ms sleep to let other thread(s) run
log_debug(perf, datacreation)("Total = %d, Sampled = %d, Constants = %d",
_all->length(), _sampled == NULL ? 0 : _sampled->length(),
_constants == NULL ? 0 : _constants->length());
_all->length(), _sampled == nullptr ? 0 : _sampled->length(),
_constants == nullptr ? 0 : _constants->length());
for (int index = 0; index < _all->length(); index++) {
PerfData* p = _all->at(index);
@ -282,9 +282,9 @@ void PerfDataManager::destroy() {
delete(_sampled);
delete(_constants);
_all = NULL;
_sampled = NULL;
_constants = NULL;
_all = nullptr;
_sampled = nullptr;
_constants = nullptr;
}
void PerfDataManager::add_item(PerfData* p, bool sampled) {
@ -292,7 +292,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) {
MutexLocker ml(PerfDataManager_lock);
// Default sizes determined using -Xlog:perf+datacreation=debug
if (_all == NULL) {
if (_all == nullptr) {
_all = new PerfDataList(191);
_has_PerfData = true;
}
@ -303,7 +303,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) {
_all->append(p);
if (p->variability() == PerfData::V_Constant) {
if (_constants == NULL) {
if (_constants == nullptr) {
_constants = new PerfDataList(51);
}
_constants->append(p);
@ -311,7 +311,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) {
}
if (sampled) {
if (_sampled == NULL) {
if (_sampled == nullptr) {
_sampled = new PerfDataList(1);
}
_sampled->append(p);
@ -322,16 +322,16 @@ PerfDataList* PerfDataManager::sampled() {
MutexLocker ml(PerfDataManager_lock);
if (_sampled == NULL)
return NULL;
if (_sampled == nullptr)
return nullptr;
PerfDataList* clone = _sampled->clone();
return clone;
}
char* PerfDataManager::counter_name(const char* ns, const char* name) {
assert(ns != NULL, "ns string required");
assert(name != NULL, "name string required");
assert(ns != nullptr, "ns string required");
assert(name != nullptr, "name string required");
size_t len = strlen(ns) + strlen(name) + 2;
char* result = NEW_RESOURCE_ARRAY(char, len);
@ -394,7 +394,7 @@ PerfStringVariable* PerfDataManager::create_string_variable(CounterNS ns,
const char* s,
TRAPS) {
if (max_length == 0 && s != NULL) max_length = (int)strlen(s);
if (max_length == 0 && s != nullptr) max_length = (int)strlen(s);
assert(max_length != 0, "PerfStringVariable with length 0");
@ -436,7 +436,7 @@ PerfLongVariable* PerfDataManager::create_long_variable(CounterNS ns,
TRAPS) {
// Sampled counters not supported if UsePerfData is false
if (!UsePerfData) return NULL;
if (!UsePerfData) return nullptr;
PerfLongVariable* p = new PerfLongVariable(ns, name, u, sh);
@ -476,7 +476,7 @@ PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns,
TRAPS) {
// Sampled counters not supported if UsePerfData is false
if (!UsePerfData) return NULL;
if (!UsePerfData) return nullptr;
PerfLongCounter* p = new PerfLongCounter(ns, name, u, sh);
@ -511,7 +511,7 @@ PerfDataList::~PerfDataList() {
bool PerfDataList::by_name(void* name, PerfData* pd) {
if (pd == NULL)
if (pd == nullptr)
return false;
return strcmp((const char*)name, pd->name()) == 0;
@ -524,14 +524,14 @@ PerfData* PerfDataList::find_by_name(const char* name) {
if (i >= 0 && i <= _set->length())
return _set->at(i);
else
return NULL;
return nullptr;
}
PerfDataList* PerfDataList::clone() {
PerfDataList* copy = new PerfDataList(this);
assert(copy != NULL, "just checking");
assert(copy != nullptr, "just checking");
return copy;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -307,7 +307,7 @@ class PerfData : public CHeapObj<mtInternal> {
// returns a boolean indicating the validity of this object.
// the object is valid if and only if memory in PerfMemory
// region was successfully allocated.
inline bool is_valid() { return _valuep != NULL; }
inline bool is_valid() { return _valuep != nullptr; }
// returns a boolean indicating whether the underlying object
// was allocated in the PerfMemory region or on the C heap.
@ -604,12 +604,12 @@ class PerfDataList : public CHeapObj<mtInternal> {
~PerfDataList();
// return the PerfData item indicated by name,
// or NULL if it doesn't exist.
// or null if it doesn't exist.
PerfData* find_by_name(const char* name);
// return true if a PerfData item with the name specified in the
// argument exists, otherwise return false.
bool contains(const char* name) { return find_by_name(name) != NULL; }
bool contains(const char* name) { return find_by_name(name) != nullptr; }
// return the number of PerfData items in this list
inline int length();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ inline PerfData* PerfDataList::at(int index) {
}
inline bool PerfDataManager::exists(const char* name) {
if (_all != NULL) {
if (_all != nullptr) {
return _all->contains(name);
} else {
return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,12 +47,12 @@ const char PERFDATA_NAME[] = "hsperfdata";
static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) +
UINT_CHARS + 1;
char* PerfMemory::_start = NULL;
char* PerfMemory::_end = NULL;
char* PerfMemory::_top = NULL;
char* PerfMemory::_start = nullptr;
char* PerfMemory::_end = nullptr;
char* PerfMemory::_top = nullptr;
size_t PerfMemory::_capacity = 0;
int PerfMemory::_initialized = false;
PerfDataPrologue* PerfMemory::_prologue = NULL;
PerfDataPrologue* PerfMemory::_prologue = nullptr;
bool PerfMemory::_destroyed = false;
void perfMemory_init() {
@ -106,7 +106,7 @@ void PerfMemory::initialize() {
// allocate PerfData memory region
create_memory_region(capacity);
if (_start == NULL) {
if (_start == nullptr) {
// the PerfMemory region could not be created as desired. Rather
// than terminating the JVM, we revert to creating the instrumentation
@ -136,7 +136,7 @@ void PerfMemory::initialize() {
_top = _start + sizeof(PerfDataPrologue);
}
assert(_prologue != NULL, "prologue pointer must be initialized");
assert(_prologue != nullptr, "prologue pointer must be initialized");
#ifdef VM_LITTLE_ENDIAN
_prologue->magic = (jint)0xc0c0feca;
@ -163,7 +163,7 @@ void PerfMemory::destroy() {
if (!is_usable()) return;
if (_start != NULL && _prologue->overflow != 0) {
if (_start != nullptr && _prologue->overflow != 0) {
// This state indicates that the contiguous memory region exists and
// that it wasn't large enough to hold all the counters. In this case,
@ -188,7 +188,7 @@ void PerfMemory::destroy() {
}
}
if (_start != NULL) {
if (_start != nullptr) {
// this state indicates that the contiguous memory region was successfully
// and that persistent resources may need to be cleaned up. This is
@ -206,7 +206,7 @@ void PerfMemory::destroy() {
//
char* PerfMemory::alloc(size_t size) {
if (!UsePerfData) return NULL;
if (!UsePerfData) return nullptr;
MutexLocker ml(PerfDataMemAlloc_lock);
@ -217,7 +217,7 @@ char* PerfMemory::alloc(size_t size) {
_prologue->overflow += (jint)size;
return NULL;
return nullptr;
}
char* result = _top;
@ -243,9 +243,9 @@ void PerfMemory::mark_updated() {
// Returns the complete path including the file name of performance data file.
// Caller is expected to release the allocated memory.
char* PerfMemory::get_perfdata_file_path() {
char* dest_file = NULL;
char* dest_file = nullptr;
if (PerfDataSaveFile != NULL) {
if (PerfDataSaveFile != nullptr) {
// dest_file_name stores the validated file name if file_name
// contains %p which will be replaced by pid.
dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN, mtInternal);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -137,7 +137,7 @@ class PerfMemory : AllStatic {
static bool is_destroyed() { return _destroyed; }
static bool is_usable() { return is_initialized() && !is_destroyed(); }
static bool contains(char* addr) {
return ((_start != NULL) && (addr >= _start) && (addr < _end));
return ((_start != nullptr) && (addr >= _start) && (addr < _end));
}
static void mark_updated();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,14 +54,14 @@
#include "utilities/formatBuffer.hpp"
static void trace_class_resolution(oop mirror) {
if (mirror == NULL || java_lang_Class::is_primitive(mirror)) {
if (mirror == nullptr || java_lang_Class::is_primitive(mirror)) {
return;
}
Klass* to_class = java_lang_Class::as_Klass(mirror);
ResourceMark rm;
int line_number = -1;
const char * source_file = NULL;
Klass* caller = NULL;
const char * source_file = nullptr;
Klass* caller = nullptr;
JavaThread* jthread = JavaThread::current();
if (jthread->has_last_Java_frame()) {
vframeStream vfst(jthread);
@ -75,16 +75,16 @@ static void trace_class_resolution(oop mirror) {
caller = vfst.method()->method_holder();
line_number = vfst.method()->line_number_from_bci(vfst.bci());
Symbol* s = vfst.method()->method_holder()->source_file_name();
if (s != NULL) {
if (s != nullptr) {
source_file = s->as_C_string();
}
}
}
if (caller != NULL) {
if (caller != nullptr) {
const char * from = caller->external_name();
const char * to = to_class->external_name();
// print in a single call to reduce interleaving between threads
if (source_file != NULL) {
if (source_file != nullptr) {
log_debug(class, resolve)("%s %s %s:%d (reflection)", from, to, source_file, line_number);
} else {
log_debug(class, resolve)("%s %s (reflection)", from, to);
@ -95,14 +95,14 @@ static void trace_class_resolution(oop mirror) {
oop Reflection::box(jvalue* value, BasicType type, TRAPS) {
if (type == T_VOID) {
return NULL;
return nullptr;
}
if (is_reference_type(type)) {
// regular objects are not boxed
return cast_to_oop(value->l);
}
oop result = java_lang_boxing_object::create(type, value, CHECK_NULL);
if (result == NULL) {
if (result == nullptr) {
THROW_(vmSymbols::java_lang_IllegalArgumentException(), result);
}
return result;
@ -110,7 +110,7 @@ oop Reflection::box(jvalue* value, BasicType type, TRAPS) {
BasicType Reflection::unbox_for_primitive(oop box, jvalue* value, TRAPS) {
if (box == NULL) {
if (box == nullptr) {
THROW_(vmSymbols::java_lang_IllegalArgumentException(), T_ILLEGAL);
}
return java_lang_boxing_object::get_value(box, value);
@ -274,7 +274,7 @@ void Reflection::array_set(jvalue* value, arrayOop a, int index, BasicType value
if (a->is_objArray()) {
if (value_type == T_OBJECT) {
oop obj = cast_to_oop(value->l);
if (obj != NULL) {
if (obj != nullptr) {
Klass* element_klass = ObjArrayKlass::cast(a->klass())->element_klass();
if (!obj->is_a(element_klass)) {
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "array element type mismatch");
@ -333,7 +333,7 @@ static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) {
}
arrayOop Reflection::reflect_new_array(oop element_mirror, jint length, TRAPS) {
if (element_mirror == NULL) {
if (element_mirror == nullptr) {
THROW_0(vmSymbols::java_lang_NullPointerException());
}
if (length < 0) {
@ -356,7 +356,7 @@ arrayOop Reflection::reflect_new_multi_array(oop element_mirror, typeArrayOop di
assert(dim_array->is_typeArray(), "just checking");
assert(TypeArrayKlass::cast(dim_array->klass())->element_type() == T_INT, "just checking");
if (element_mirror == NULL) {
if (element_mirror == nullptr) {
THROW_0(vmSymbols::java_lang_NullPointerException());
}
@ -443,7 +443,7 @@ Reflection::VerifyClassAccessResults Reflection::verify_class_access(
// Verify that current_class can access new_class. If the classloader_only
// flag is set, we automatically allow any accesses in which current_class
// doesn't have a classloader.
if ((current_class == NULL) ||
if ((current_class == nullptr) ||
(current_class == new_class) ||
is_same_class_package(current_class, new_class)) {
return ACCESS_OK;
@ -492,7 +492,7 @@ Reflection::VerifyClassAccessResults Reflection::verify_class_access(
}
PackageEntry* package_to = new_class->package();
assert(package_to != NULL, "can not obtain new_class' package");
assert(package_to != nullptr, "can not obtain new_class' package");
{
MutexLocker m1(Module_lock);
@ -530,15 +530,15 @@ char* Reflection::verify_class_access_msg(const Klass* current_class,
const InstanceKlass* new_class,
const VerifyClassAccessResults result) {
assert(result != ACCESS_OK, "must be failure result");
char * msg = NULL;
if (result != OTHER_PROBLEM && new_class != NULL && current_class != NULL) {
char * msg = nullptr;
if (result != OTHER_PROBLEM && new_class != nullptr && current_class != nullptr) {
// Find the module entry for current_class, the accessor
ModuleEntry* module_from = current_class->module();
const char * module_from_name = module_from->is_named() ? module_from->name()->as_C_string() : UNNAMED_MODULE;
const char * current_class_name = current_class->external_name();
// Find the module entry for new_class, the accessee
ModuleEntry* module_to = NULL;
ModuleEntry* module_to = nullptr;
module_to = new_class->module();
const char * module_to_name = module_to->is_named() ? module_to->name()->as_C_string() : UNNAMED_MODULE;
const char * new_class_name = new_class->external_name();
@ -555,7 +555,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class,
module_to_name, module_from_name, module_to_name);
} else {
oop jlm = module_to->module();
assert(jlm != NULL, "Null jlm in module_to ModuleEntry");
assert(jlm != nullptr, "Null jlm in module_to ModuleEntry");
intptr_t identity_hash = jlm->identity_hash();
size_t len = 160 + strlen(current_class_name) + 2*strlen(module_from_name) +
strlen(new_class_name) + 2*sizeof(uintx);
@ -567,7 +567,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class,
}
} else if (result == TYPE_NOT_EXPORTED) {
assert(new_class->package() != NULL,
assert(new_class->package() != nullptr,
"Unnamed packages are always exported");
const char * package_name =
new_class->package()->name()->as_klass_external_name();
@ -582,7 +582,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class,
module_to_name, module_to_name, package_name, module_from_name);
} else {
oop jlm = module_from->module();
assert(jlm != NULL, "Null jlm in module_from ModuleEntry");
assert(jlm != nullptr, "Null jlm in module_from ModuleEntry");
intptr_t identity_hash = jlm->identity_hash();
size_t len = 170 + strlen(current_class_name) + strlen(new_class_name) +
2*strlen(module_to_name) + strlen(package_name) + 2*sizeof(uintx);
@ -618,7 +618,7 @@ bool Reflection::verify_member_access(const Klass* current_class,
// class file parsing when we only care about the static type); in that case
// callers should ensure that resolved_class == member_class.
//
if ((current_class == NULL) ||
if ((current_class == nullptr) ||
(current_class == member_class) ||
access.is_public()) {
return true;
@ -748,7 +748,7 @@ static objArrayHandle get_parameter_types(const methodHandle& method,
}
if (!ss.at_return_type()) {
mirrors->obj_at_put(index++, mirror);
} else if (return_type != NULL) {
} else if (return_type != nullptr) {
// Collect return type as well
assert(ss.at_return_type(), "return type should be present");
*return_type = mirror;
@ -781,9 +781,9 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac
Symbol* signature = method->signature();
int parameter_count = ArgumentCount(signature).size();
oop return_type_oop = NULL;
oop return_type_oop = nullptr;
objArrayHandle parameter_types = get_parameter_types(method, parameter_count, &return_type_oop, CHECK_NULL);
if (parameter_types.is_null() || return_type_oop == NULL) return NULL;
if (parameter_types.is_null() || return_type_oop == nullptr) return nullptr;
Handle return_type(THREAD, return_type_oop);
@ -793,7 +793,7 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac
Symbol* method_name = method->name();
oop name_oop = StringTable::intern(method_name, CHECK_NULL);
Handle name = Handle(THREAD, name_oop);
if (name == NULL) return NULL;
if (name == nullptr) return nullptr;
const int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
@ -807,7 +807,7 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac
java_lang_reflect_Method::set_exception_types(mh(), exception_types());
java_lang_reflect_Method::set_modifiers(mh(), modifiers);
java_lang_reflect_Method::set_override(mh(), false);
if (method->generic_signature() != NULL) {
if (method->generic_signature() != nullptr) {
Symbol* gs = method->generic_signature();
Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL);
java_lang_reflect_Method::set_signature(mh(), sig());
@ -830,8 +830,8 @@ oop Reflection::new_constructor(const methodHandle& method, TRAPS) {
Symbol* signature = method->signature();
int parameter_count = ArgumentCount(signature).size();
objArrayHandle parameter_types = get_parameter_types(method, parameter_count, NULL, CHECK_NULL);
if (parameter_types.is_null()) return NULL;
objArrayHandle parameter_types = get_parameter_types(method, parameter_count, nullptr, CHECK_NULL);
if (parameter_types.is_null()) return nullptr;
objArrayHandle exception_types = get_exception_types(method, CHECK_NULL);
assert(!exception_types.is_null(), "cannot return null");
@ -846,7 +846,7 @@ oop Reflection::new_constructor(const methodHandle& method, TRAPS) {
java_lang_reflect_Constructor::set_exception_types(ch(), exception_types());
java_lang_reflect_Constructor::set_modifiers(ch(), modifiers);
java_lang_reflect_Constructor::set_override(ch(), false);
if (method->generic_signature() != NULL) {
if (method->generic_signature() != nullptr) {
Symbol* gs = method->generic_signature();
Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL);
java_lang_reflect_Constructor::set_signature(ch(), sig());
@ -893,11 +893,11 @@ oop Reflection::new_parameter(Handle method, int index, Symbol* sym,
Handle rh = java_lang_reflect_Parameter::create(CHECK_NULL);
if(NULL != sym) {
if(nullptr != sym) {
Handle name = java_lang_String::create_from_symbol(sym, CHECK_NULL);
java_lang_reflect_Parameter::set_name(rh(), name());
} else {
java_lang_reflect_Parameter::set_name(rh(), NULL);
java_lang_reflect_Parameter::set_name(rh(), nullptr);
}
java_lang_reflect_Parameter::set_modifiers(rh(), flags);
@ -1094,7 +1094,7 @@ static oop invoke(InstanceKlass* klass,
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch");
}
} else {
if (arg != NULL) {
if (arg != nullptr) {
Klass* k = java_lang_Class::as_Klass(type_mirror);
if (!arg->is_a(k)) {
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
@ -1153,7 +1153,7 @@ oop Reflection::invoke_method(oop method_mirror, Handle receiver, objArrayHandle
InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror));
Method* m = klass->method_with_idnum(slot);
if (m == NULL) {
if (m == nullptr) {
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke");
}
methodHandle method(THREAD, m);
@ -1170,7 +1170,7 @@ oop Reflection::invoke_constructor(oop constructor_mirror, objArrayHandle args,
InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror));
Method* m = klass->method_with_idnum(slot);
if (m == NULL) {
if (m == nullptr) {
THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke");
}
methodHandle method(THREAD, m);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,11 +47,11 @@ KlassStream::KlassStream(InstanceKlass* klass, bool local_only,
bool KlassStream::eos() {
if (index() >= 0) return false;
if (_local_only) return true;
if (!_klass->is_interface() && _klass->super() != NULL) {
if (!_klass->is_interface() && _klass->super() != nullptr) {
// go up superclass chain (not for interfaces)
_klass = _klass->java_super();
// Next for method walks, walk default methods
} else if (_walk_defaults && (_defaults_checked == false) && (_base_klass->default_methods() != NULL)) {
} else if (_walk_defaults && (_defaults_checked == false) && (_base_klass->default_methods() != nullptr)) {
_base_class_search_defaults = true;
_klass = _base_klass;
_defaults_checked = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,7 +143,7 @@ class RegisterMap : public StackObj {
void set_walk_cont(bool value) { _walk_cont = value; }
bool in_cont() const { return _chunk() != NULL; } // Whether we are currently on the hstack; if true, frames are relativized
bool in_cont() const { return _chunk() != nullptr; } // Whether we are currently on the hstack; if true, frames are relativized
oop cont() const;
stackChunkHandle stack_chunk() const { return _chunk; }
void set_stack_chunk(stackChunkOop chunk);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -122,14 +122,14 @@ class ChangeSwitchPad : public ChangeItem {
Relocator::Relocator(const methodHandle& m, RelocatorListener* listener) {
set_method(m);
set_code_length(method()->code_size());
set_code_array(NULL);
set_code_array(nullptr);
// Allocate code array and copy bytecodes
if (!expand_code_array(0)) {
// Should have at least MAX_METHOD_LENGTH available or the verifier
// would have failed.
ShouldNotReachHere();
}
set_compressed_line_number_table(NULL);
set_compressed_line_number_table(nullptr);
set_compressed_line_number_table_size(0);
_listener = listener;
}
@ -173,7 +173,7 @@ methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[],
bool Relocator::handle_code_changes() {
assert(_changes != NULL, "changes vector must be initialized");
assert(_changes != nullptr, "changes vector must be initialized");
while (!_changes->is_empty()) {
// Inv: everything is aligned.
@ -501,7 +501,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
ClassLoaderData* loader_data = method()->method_holder()->class_loader_data();
Array<u1>* new_data = insert_hole_at(loader_data, frame_offset + 1, 2, data);
if (new_data == NULL) {
if (new_data == nullptr) {
return; // out-of-memory?
}
// Deallocate old data
@ -517,7 +517,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
same_frame_extended::create_at(frame_addr, new_offset_delta);
} else {
same_locals_1_stack_item_extended::create_at(
frame_addr, new_offset_delta, NULL);
frame_addr, new_offset_delta, nullptr);
// the verification_info_type should already be at the right spot
}
}
@ -539,7 +539,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) {
// Full frame has stack values too
full_frame* ff = frame->as_full_frame();
if (ff != NULL) {
if (ff != nullptr) {
address eol = (address)types;
number_of_types = ff->stack_slots(eol);
types = ff->stack(eol);
@ -574,7 +574,7 @@ bool Relocator::expand_code_array(int delta) {
if (!new_code_array) return false;
// Expanding current array
if (code_array() != NULL) {
if (code_array() != nullptr) {
memcpy(new_code_array, code_array(), code_length());
} else {
// Initial copy. Copy directly from Method*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -97,7 +97,7 @@ class Relocator : public ResourceObj {
// get the address of in the code_array
inline char* addr_at(int bci) const { return (char*) &code_array()[bci]; }
int instruction_length_at(int bci) { return Bytecodes::length_at(NULL, code_array() + bci); }
int instruction_length_at(int bci) { return Bytecodes::length_at(nullptr, code_array() + bci); }
// Helper methods
int align(int n) const { return (n+3) & ~3; }
@ -119,7 +119,7 @@ class Relocator : public ResourceObj {
// Callback support
RelocatorListener *_listener;
void notify(int bci, int delta, int new_code_length) {
if (_listener != NULL)
if (_listener != nullptr)
_listener->relocated(bci, delta, new_code_length);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,7 +196,7 @@ bool SafepointSynchronize::thread_not_running(ThreadSafepointState *cur_state) {
static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) {
int a = 0;
const ThreadSafepointState *tmp_tss = tss_head;
while (tmp_tss != NULL) {
while (tmp_tss != nullptr) {
++a;
assert(tmp_tss->is_running(), "Illegal initial state");
tmp_tss = tmp_tss->get_next();
@ -228,11 +228,11 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
// Iterate through all threads until it has been determined how to stop them all at a safepoint.
int still_running = nof_threads;
ThreadSafepointState *tss_head = NULL;
ThreadSafepointState *tss_head = nullptr;
ThreadSafepointState **p_prev = &tss_head;
for (; JavaThread *cur = jtiwh.next(); ) {
ThreadSafepointState *cur_tss = cur->safepoint_state();
assert(cur_tss->get_next() == NULL, "Must be NULL");
assert(cur_tss->get_next() == nullptr, "Must be nullptr");
if (thread_not_running(cur_tss)) {
--still_running;
} else {
@ -240,7 +240,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
p_prev = cur_tss->next_ptr();
}
}
*p_prev = NULL;
*p_prev = nullptr;
DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);)
@ -248,7 +248,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
// If there is no thread still running, we are already done.
if (still_running <= 0) {
assert(tss_head == NULL, "Must be empty");
assert(tss_head == nullptr, "Must be empty");
return 1;
}
@ -263,14 +263,14 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
p_prev = &tss_head;
ThreadSafepointState *cur_tss = tss_head;
while (cur_tss != NULL) {
while (cur_tss != nullptr) {
assert(cur_tss->is_running(), "Illegal initial state");
if (thread_not_running(cur_tss)) {
--still_running;
*p_prev = NULL;
*p_prev = nullptr;
ThreadSafepointState *tmp = cur_tss;
cur_tss = cur_tss->get_next();
tmp->set_next(NULL);
tmp->set_next(nullptr);
} else {
*p_prev = cur_tss;
p_prev = cur_tss->next_ptr();
@ -287,7 +287,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
iterations++;
} while (still_running > 0);
assert(tss_head == NULL, "Must be empty");
assert(tss_head == nullptr, "Must be empty");
return iterations;
}
@ -598,10 +598,10 @@ void SafepointSynchronize::do_cleanup_tasks() {
TraceTime timer("safepoint cleanup tasks", TRACETIME_LOG(Info, safepoint, cleanup));
CollectedHeap* heap = Universe::heap();
assert(heap != NULL, "heap not initialized yet?");
assert(heap != nullptr, "heap not initialized yet?");
ParallelCleanupTask cleanup;
WorkerThreads* cleanup_workers = heap->safepoint_workers();
if (cleanup_workers != NULL) {
if (cleanup_workers != nullptr) {
// Parallel cleanup using GC provided thread pool.
cleanup_workers->run_task(&cleanup);
} else {
@ -684,7 +684,7 @@ bool SafepointSynchronize::handshake_safe(JavaThread *thread) {
// Implementation of Safepoint blocking point
void SafepointSynchronize::block(JavaThread *thread) {
assert(thread != NULL, "thread must be set");
assert(thread != nullptr, "thread must be set");
// Threads shouldn't block if they are in the middle of printing, but...
ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id());
@ -805,7 +805,7 @@ void SafepointSynchronize::print_safepoint_timeout() {
ThreadSafepointState::ThreadSafepointState(JavaThread *thread)
: _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false),
_safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(NULL) {
_safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(nullptr) {
}
void ThreadSafepointState::create(JavaThread *thread) {
@ -816,7 +816,7 @@ void ThreadSafepointState::create(JavaThread *thread) {
void ThreadSafepointState::destroy(JavaThread *thread) {
if (thread->safepoint_state()) {
delete(thread->safepoint_state());
thread->set_safepoint_state(NULL);
thread->set_safepoint_state(nullptr);
}
}
@ -893,7 +893,7 @@ void ThreadSafepointState::handle_polling_page_exception() {
address real_return_addr = self->saved_exception_pc();
CodeBlob *cb = CodeCache::find_blob(real_return_addr);
assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod");
assert(cb != nullptr && cb->is_compiled(), "return address should be in nmethod");
CompiledMethod* nm = (CompiledMethod*)cb;
// Find frame of caller

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,8 @@
#include "services/lowMemoryDetector.hpp"
#include "services/threadIdTable.hpp"
DEBUG_ONLY(JavaThread* ServiceThread::_instance = NULL;)
JvmtiDeferredEvent* ServiceThread::_jvmti_event = NULL;
DEBUG_ONLY(JavaThread* ServiceThread::_instance = nullptr;)
JvmtiDeferredEvent* ServiceThread::_jvmti_event = nullptr;
// The service thread has it's own static deferred event queue.
// Events can be posted before JVMTI vm_start, so it's too early to call JvmtiThreadState::state_for
// to add this field to the per-JavaThread event queue. TODO: fix this sometime later
@ -151,7 +151,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
if (has_jvmti_events) {
_jvmti_event->post();
_jvmti_event = NULL; // reset
_jvmti_event = nullptr; // reset
}
if (!UseNotificationThread) {
@ -203,7 +203,7 @@ void ServiceThread::enqueue_deferred_event(JvmtiDeferredEvent* event) {
// If you enqueue events before the service thread runs, gc
// cannot keep the nmethod alive. This could be restricted to compiled method
// load and unload events, if we wanted to be picky.
assert(_instance != NULL, "cannot enqueue events before the service thread runs");
assert(_instance != nullptr, "cannot enqueue events before the service thread runs");
_jvmti_service_queue.enqueue(*event);
Service_lock->notify_all();
}
@ -212,7 +212,7 @@ void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
JavaThread::oops_do_no_frames(f, cf);
// The ServiceThread "owns" the JVMTI Deferred events, scan them here
// to keep them alive until they are processed.
if (_jvmti_event != NULL) {
if (_jvmti_event != nullptr) {
_jvmti_event->oops_do(f, cf);
}
// Requires a lock, because threads can be adding to this queue.
@ -222,8 +222,8 @@ void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) {
void ServiceThread::nmethods_do(CodeBlobClosure* cf) {
JavaThread::nmethods_do(cf);
if (cf != NULL) {
if (_jvmti_event != NULL) {
if (cf != nullptr) {
if (_jvmti_event != nullptr) {
_jvmti_event->nmethods_do(cf);
}
// Requires a lock, because threads can be adding to this queue.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -559,7 +559,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
#if INCLUDE_JVMCI
// JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
// and other exception handler continuations do not read it
current->set_exception_pc(NULL);
current->set_exception_pc(nullptr);
#endif // INCLUDE_JVMCI
if (Continuation::is_return_barrier_entry(return_address)) {
@ -568,8 +568,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
// The fastest case first
CodeBlob* blob = CodeCache::find_blob(return_address);
CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL;
if (nm != NULL) {
CompiledMethod* nm = (blob != nullptr) ? blob->as_compiled_method_or_null() : nullptr;
if (nm != nullptr) {
// Set flag if return address is a method handle call site.
current->set_is_method_handle_return(nm->is_method_handle_return(return_address));
// native nmethods don't have exception handlers
@ -603,7 +603,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
// JavaCallWrapper::~JavaCallWrapper
return StubRoutines::catch_exception_entry();
}
if (blob != NULL && blob->is_upcall_stub()) {
if (blob != nullptr && blob->is_upcall_stub()) {
return ((UpcallStub*)blob)->exception_handler();
}
// Interpreted code
@ -613,8 +613,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
return Interpreter::rethrow_exception_entry();
}
guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
#ifndef PRODUCT
{ ResourceMark rm;
@ -626,7 +626,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
#endif // PRODUCT
ShouldNotReachHere();
return NULL;
return nullptr;
}
@ -641,7 +641,7 @@ address SharedRuntime::get_poll_stub(address pc) {
CodeBlob *cb = CodeCache::find_blob(pc);
// Should be an nmethod
guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
guarantee(cb != nullptr && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
// Look up the relocation information
assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
@ -658,15 +658,15 @@ address SharedRuntime::get_poll_stub(address pc) {
bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
if (at_poll_return) {
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
} else if (has_wide_vectors) {
assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL,
assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
"polling page vectors safepoint stub not created yet");
stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
} else {
assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
"polling page safepoint stub not created yet");
stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
}
@ -691,13 +691,13 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h
methodHandle method = methodHandle(current, vfst.method());
int bci = vfst.bci();
MethodData* trap_mdo = method->method_data();
if (trap_mdo != NULL) {
if (trap_mdo != nullptr) {
// Set exception_seen if the exceptional bytecode is an invoke
Bytecode_invoke call = Bytecode_invoke_check(method, bci);
if (call.is_valid()) {
ResourceMark rm(current);
ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, NULL);
if (pdata != NULL && pdata->is_BitData()) {
ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
if (pdata != nullptr && pdata->is_BitData()) {
BitData* bit_data = (BitData*) pdata;
bit_data->set_exception_seen();
}
@ -735,7 +735,7 @@ JRT_END
// for given exception
address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception,
bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
assert(cm != NULL, "must exist");
assert(cm != nullptr, "must exist");
ResourceMark rm;
#if INCLUDE_JVMCI
@ -744,7 +744,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
int catch_pco = ret_pc - cm->code_begin();
ExceptionHandlerTable table(cm);
HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
if (t != NULL) {
if (t != nullptr) {
return cm->code_begin() + t->pco();
} else {
return Deoptimization::deoptimize_for_missing_exception_handler(cm);
@ -791,12 +791,12 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
}
if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
sd = sd->sender();
if (sd != NULL) {
if (sd != nullptr) {
bci = sd->bci();
}
++scope_depth;
}
} while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != NULL));
} while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
}
// found handling method => lookup exception handler
@ -804,7 +804,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
ExceptionHandlerTable table(nm);
HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
// Allow abbreviated catch tables. The idea is to allow a method
// to materialize its exceptions without committing to the exact
// routing of exceptions. In particular this is needed for adding
@ -815,13 +815,13 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
}
#ifdef COMPILER1
if (t == NULL && nm->is_compiled_by_c1()) {
assert(nm->unwind_handler_begin() != NULL, "");
if (t == nullptr && nm->is_compiled_by_c1()) {
assert(nm->unwind_handler_begin() != nullptr, "");
return nm->unwind_handler_begin();
}
#endif
if (t == NULL) {
if (t == nullptr) {
ttyLocker ttyl;
tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
tty->print_cr(" Exception:");
@ -832,7 +832,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
nm->print();
nm->print_code();
guarantee(false, "missing exception handler");
return NULL;
return nullptr;
}
return nm->code_begin() + t->pco();
@ -853,13 +853,13 @@ JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), NULL);
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
// This entry point is effectively only used for NullPointerExceptions which occur at inline
// cache sites (when the callee activation is not yet set up) so we are at a call site
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), NULL);
throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
JRT_END
JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
@ -897,7 +897,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
address pc,
ImplicitExceptionKind exception_kind)
{
address target_pc = NULL;
address target_pc = nullptr;
if (Interpreter::contains(pc)) {
switch (exception_kind) {
@ -919,7 +919,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
// deoptimization blob and uncommon trap blob bang the stack
// in a debug VM to verify the correctness of the compiled
// method stack banging.
assert(current->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap");
assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
return StubRoutines::throw_StackOverflowError_entry();
}
@ -932,8 +932,8 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
// caller-saved registers, as these entry points do.
VtableStub* vt_stub = VtableStubs::stub_containing(pc);
// If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
if (vt_stub == NULL) return NULL;
// If vt_stub is null, then return null to signal handler to report the SEGV error.
if (vt_stub == nullptr) return nullptr;
if (vt_stub->is_abstract_method_error(pc)) {
assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
@ -952,8 +952,8 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
} else {
CodeBlob* cb = CodeCache::find_blob(pc);
// If code blob is NULL, then return NULL to signal handler to report the SEGV error.
if (cb == NULL) return NULL;
// If code blob is null, then return null to signal handler to report the SEGV error.
if (cb == nullptr) return nullptr;
// Exception happened in CodeCache. Must be either:
// 1. Inline-cache check in C2I handler blob,
@ -964,7 +964,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
if (!is_in_blob) {
// Allow normal crash reporting to handle this
return NULL;
return nullptr;
}
Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
// There is no handler here, so we will simply unwind.
@ -992,7 +992,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
_implicit_null_throws++;
#endif
target_pc = cm->continuation_for_implicit_null_exception(pc);
// If there's an unexpected fault, target_pc might be NULL,
// If there's an unexpected fault, target_pc might be null,
// in which case we want to fall through into the normal
// error handling code.
}
@ -1003,12 +1003,12 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
case IMPLICIT_DIVIDE_BY_ZERO: {
CompiledMethod* cm = CodeCache::find_compiled(pc);
guarantee(cm != NULL, "must have containing compiled method for implicit division-by-zero exceptions");
guarantee(cm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
#ifndef PRODUCT
_implicit_div0_throws++;
#endif
target_pc = cm->continuation_for_implicit_div0_exception(pc);
// If there's an unexpected fault, target_pc might be NULL,
// If there's an unexpected fault, target_pc might be null,
// in which case we want to fall through into the normal
// error handling code.
break; // fall through
@ -1036,7 +1036,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
}
ShouldNotReachHere();
return NULL;
return nullptr;
}
@ -1076,14 +1076,14 @@ JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current,
JRT_END
jlong SharedRuntime::get_java_tid(JavaThread* thread) {
assert(thread != NULL, "No thread");
if (thread == NULL) {
assert(thread != nullptr, "No thread");
if (thread == nullptr) {
return 0;
}
guarantee(Thread::current() != thread || thread->is_oop_safe(),
"current cannot touch oops after its GC barrier is detached.");
oop obj = thread->threadObj();
return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
}
/**
@ -1163,7 +1163,7 @@ Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
CompiledICLocker ic_locker(caller);
return caller->attached_method_before_pc(pc);
}
return NULL;
return nullptr;
}
// Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
@ -1245,7 +1245,7 @@ Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Cod
if (attached_method.is_null()) {
Method* callee = bytecode.static_target(CHECK_NH);
if (callee == NULL) {
if (callee == nullptr) {
THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
}
}
@ -1274,7 +1274,7 @@ Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Cod
if (has_receiver) {
assert(receiver.not_null(), "should have thrown exception");
Klass* receiver_klass = receiver->klass();
Klass* rk = NULL;
Klass* rk = nullptr;
if (attached_method.not_null()) {
// In case there's resolved method attached, use its holder during the check.
rk = attached_method->method_holder();
@ -1368,16 +1368,16 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons
// we are done patching the code.
CompiledMethod* callee = callee_method->code();
if (callee != NULL) {
if (callee != nullptr) {
assert(callee->is_compiled(), "must be nmethod for patching");
}
if (callee != NULL && !callee->is_in_use()) {
if (callee != nullptr && !callee->is_in_use()) {
// Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
callee = NULL;
callee = nullptr;
}
#ifdef ASSERT
address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below
address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
#endif
bool is_nmethod = caller_nm->is_nmethod();
@ -1385,7 +1385,7 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons
if (is_virtual) {
assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
bool static_bound = call_info.resolved_method()->can_be_statically_bound();
Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass();
Klass* klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
CompiledIC::compute_monomorphic_entry(callee_method, klass,
is_optimized, static_bound, is_nmethod, virtual_call_info,
CHECK_false);
@ -1407,13 +1407,13 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons
// which may happen when multiply alive nmethod (tiered compilation)
// will be supported.
if (!callee_method->is_old() &&
(callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) {
(callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
NoSafepointVerifier nsv;
#ifdef ASSERT
// We must not try to patch to jump to an already unloaded method.
if (dest_entry_point != 0) {
CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
assert((cb != nullptr) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
"should not call unloaded nmethod");
}
#endif
@ -1428,7 +1428,7 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons
if (VM_Version::supports_fast_class_init_checks() &&
invoke_code == Bytecodes::_invokestatic &&
callee_method->needs_clinit_barrier() &&
callee != NULL && callee->is_compiled_by_jvmci()) {
callee != nullptr && callee->is_compiled_by_jvmci()) {
return true; // skip patching for JVMCI
}
CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
@ -1454,12 +1454,12 @@ methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimize
frame caller_frame = current->last_frame().sender(&cbl_map);
CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
// determine call info & receiver
// note: a) receiver is NULL for static calls
// b) an exception is thrown if receiver is NULL for non-static calls
// note: a) receiver is null for static calls
// b) an exception is thrown if receiver is null for non-static calls
CallInfo call_info;
Bytecodes::Code invoke_code = Bytecodes::_illegal;
Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
@ -1560,7 +1560,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread*
current->set_vm_result_2(callee_method());
JRT_BLOCK_END
// return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
return callee_method->verified_code_entry();
JRT_END
@ -1588,9 +1588,9 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current)
caller_frame.is_entry_frame() ||
caller_frame.is_upcall_stub_frame()) {
Method* callee = current->callee_target();
guarantee(callee != NULL && callee->is_method(), "bad handshake");
guarantee(callee != nullptr && callee->is_method(), "bad handshake");
current->set_vm_result_2(callee);
current->set_callee_target(NULL);
current->set_callee_target(nullptr);
if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
// Bypass class initialization checks in c2i when caller is in native.
// JNI calls to static methods don't have class initialization checks.
@ -1614,7 +1614,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current)
current->set_vm_result_2(callee_method());
JRT_BLOCK_END
// return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
return callee_method->verified_code_entry();
JRT_END
@ -1644,7 +1644,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread*
methodHandle callee(current, invoke.static_target(current));
if (!callee.is_null()) {
oop recv = callerFrame.retrieve_receiver(&reg_map);
Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
res = StubRoutines::forward_exception_entry();
LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
}
@ -1669,7 +1669,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* curren
frame stub_frame = current->last_frame();
assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
frame caller = stub_frame.sender(&reg_map);
enter_special = caller.cb() != NULL && caller.cb()->is_compiled()
enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
&& caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
}
JRT_BLOCK_END
@ -1686,7 +1686,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* curren
}
// return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
return callee_method->verified_code_entry();
JRT_END
@ -1699,7 +1699,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* curre
current->set_vm_result_2(callee_method());
JRT_BLOCK_END
// return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
return callee_method->verified_code_entry();
JRT_END
@ -1713,7 +1713,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* c
current->set_vm_result_2(callee_method());
JRT_BLOCK_END
// return compiled code entry point after potential safepoints
assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
return callee_method->verified_code_entry();
JRT_END
@ -1739,7 +1739,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMeth
should_be_mono = true;
} else if (inline_cache->is_icholder_call()) {
CompiledICHolder* ic_oop = inline_cache->cached_icholder();
if (ic_oop != NULL) {
if (ic_oop != nullptr) {
if (!ic_oop->is_loader_alive()) {
// Deferred IC cleaning due to concurrent class unloading
if (!inline_cache->set_to_clean()) {
@ -1750,7 +1750,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMeth
// This isn't a real miss. We must have seen that compiled code
// is now available and we want the call site converted to a
// monomorphic compiled call site.
// We can't assert for callee_method->code() != NULL because it
// We can't assert for callee_method->code() != nullptr because it
// could have been deoptimized in the meantime
if (TraceCallFixup) {
ResourceMark rm(THREAD);
@ -1803,7 +1803,7 @@ methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
CallInfo call_info;
Bytecodes::Code bc;
// receiver is NULL for static calls. An exception is thrown for NULL
// receiver is null for static calls. An exception is thrown for null
// receivers for non-static calls
Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
// Compiler1 can produce virtual call sites that can actually be statically bound
@ -1954,7 +1954,7 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
// we jump to it the target gets deoptimized. Similar to 1
// we will wind up in the interprter (thru a c2i with c2).
//
address call_addr = NULL;
address call_addr = nullptr;
{
// Get call instruction under lock because another thread may be
// busy patching it.
@ -1965,7 +1965,7 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
// Check relocations for the matching call to 1) avoid false positives,
// and 2) determine the type.
if (call_addr != NULL) {
if (call_addr != nullptr) {
// On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
// bytes back in the instruction stream so we must also check for reloc info.
RelocIterator iter(caller_nm, call_addr, call_addr+1);
@ -2061,7 +2061,7 @@ bool SharedRuntime::should_fixup_call_destination(address destination, address e
if (destination != entry_point) {
CodeBlob* callee = CodeCache::find_blob(destination);
// callee == cb seems weird. It means calling interpreter thru stub.
if (callee != NULL && (callee == cb || callee->is_adapter_blob())) {
if (callee != nullptr && (callee == cb || callee->is_adapter_blob())) {
// static call or optimized virtual
if (TraceCallFixup) {
tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
@ -2113,12 +2113,12 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal
NoSafepointVerifier nsv;
CompiledMethod* callee = moop->code();
if (callee == NULL) {
if (callee == nullptr) {
return;
}
CodeBlob* cb = CodeCache::find_blob(caller_pc);
if (cb == NULL || !cb->is_compiled() || callee->is_unloading()) {
if (cb == nullptr || !cb->is_compiled() || callee->is_unloading()) {
return;
}
@ -2135,13 +2135,13 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal
// There is a benign race here. We could be attempting to patch to a compiled
// entry point at the same time the callee is being deoptimized. If that is
// the case then entry_point may in fact point to a c2i and we'd patch the
// call site with the same old data. clear_code will set code() to NULL
// at the end of it. If we happen to see that NULL then we can skip trying
// call site with the same old data. clear_code will set code() to null
// at the end of it. If we happen to see that null then we can skip trying
// to patch. If we hit the window where the callee has a c2i in the
// from_compiled_entry and the NULL isn't present yet then we lose the race
// from_compiled_entry and the null isn't present yet then we lose the race
// and patch the code with the same old data. Asi es la vida.
if (moop->code() == NULL) return;
if (moop->code() == nullptr) return;
if (nm->is_in_use()) {
// Expect to find a native call there (unless it was no-inline cache vtable dispatch)
@ -2194,7 +2194,7 @@ JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
_slow_array_copy_ctr++;
#endif
// Check if we have null pointers
if (src == NULL || dest == NULL) {
if (src == nullptr || dest == nullptr) {
THROW(vmSymbols::java_lang_NullPointerException());
}
// Do the copy. The casts to arrayOop are necessary to the copy_array API,
@ -2220,8 +2220,8 @@ char* SharedRuntime::generate_class_cast_message(
Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
constantPoolHandle cpool(thread, vfst.method()->constants());
Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
Symbol* target_klass_name = NULL;
if (target_klass == NULL) {
Symbol* target_klass_name = nullptr;
if (target_klass == nullptr) {
// This klass should be resolved, but just in case, get the name in the klass slot.
target_klass_name = cpool->klass_name_at(cc.index());
}
@ -2235,8 +2235,8 @@ char* SharedRuntime::generate_class_cast_message(
Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
const char* caster_name = caster_klass->external_name();
assert(target_klass != NULL || target_klass_name != NULL, "one must be provided");
const char* target_name = target_klass == NULL ? target_klass_name->as_klass_external_name() :
assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
target_klass->external_name();
size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
@ -2244,19 +2244,19 @@ char* SharedRuntime::generate_class_cast_message(
const char* caster_klass_description = "";
const char* target_klass_description = "";
const char* klass_separator = "";
if (target_klass != NULL && caster_klass->module() == target_klass->module()) {
if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
} else {
caster_klass_description = caster_klass->class_in_module_of_loader();
target_klass_description = (target_klass != NULL) ? target_klass->class_in_module_of_loader() : "";
klass_separator = (target_klass != NULL) ? "; " : "";
target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
klass_separator = (target_klass != nullptr) ? "; " : "";
}
// add 3 for parenthesis and preceding space
msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
if (message == NULL) {
if (message == nullptr) {
// Shouldn't happen, but don't cause even more problems if it does
message = const_cast<char*>(caster_klass->external_name());
} else {
@ -2326,7 +2326,7 @@ JRT_END
void SharedRuntime::print_statistics() {
ttyLocker ttyl;
if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'");
SharedRuntime::print_ic_miss_histogram();
@ -2363,7 +2363,7 @@ void SharedRuntime::print_statistics() {
AdapterHandlerLibrary::print_statistics();
if (xtty != NULL) xtty->tail("statistics");
if (xtty != nullptr) xtty->tail("statistics");
}
inline double percent(int64_t x, int64_t y) {
@ -2382,8 +2382,8 @@ class MethodArityHistogram {
static int _max_size; // max. arg size seen
static void add_method_to_histogram(nmethod* nm) {
Method* method = (nm == NULL) ? NULL : nm->method();
if (method != NULL) {
Method* method = (nm == nullptr) ? nullptr : nm->method();
if (method != nullptr) {
ArgumentCount args(method->signature());
int arity = args.size() + (method->is_static() ? 0 : 1);
int argsize = method->size_of_parameters();
@ -2717,14 +2717,14 @@ static void print_table_statistics() {
// ---------------------------------------------------------------------------
// Implementation of AdapterHandlerLibrary
AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL;
AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
const int AdapterHandlerLibrary_size = 16*K;
BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
BufferBlob* AdapterHandlerLibrary::buffer_blob() {
return _buffer;
@ -2751,11 +2751,11 @@ static void post_adapter_creation(const AdapterBlob* new_adapter,
void AdapterHandlerLibrary::initialize() {
ResourceMark rm;
AdapterBlob* no_arg_blob = NULL;
AdapterBlob* int_arg_blob = NULL;
AdapterBlob* obj_arg_blob = NULL;
AdapterBlob* obj_int_arg_blob = NULL;
AdapterBlob* obj_obj_arg_blob = NULL;
AdapterBlob* no_arg_blob = nullptr;
AdapterBlob* int_arg_blob = nullptr;
AdapterBlob* obj_arg_blob = nullptr;
AdapterBlob* obj_int_arg_blob = nullptr;
AdapterBlob* obj_obj_arg_blob = nullptr;
{
MutexLocker mu(AdapterHandlerLibrary_lock);
@ -2765,12 +2765,12 @@ void AdapterHandlerLibrary::initialize() {
// Pass wrong_method_abstract for the c2i transitions to return
// AbstractMethodError for invalid invocations.
address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
_abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
_abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
StubRoutines::throw_AbstractMethodError_entry(),
wrong_method_abstract, wrong_method_abstract);
_buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
_no_arg_handler = create_adapter(no_arg_blob, 0, NULL, true);
_no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true);
BasicType obj_args[] = { T_OBJECT };
_obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true);
@ -2784,11 +2784,11 @@ void AdapterHandlerLibrary::initialize() {
BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
_obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true);
assert(no_arg_blob != NULL &&
obj_arg_blob != NULL &&
int_arg_blob != NULL &&
obj_int_arg_blob != NULL &&
obj_obj_arg_blob != NULL, "Initial adapters must be properly created");
assert(no_arg_blob != nullptr &&
obj_arg_blob != nullptr &&
int_arg_blob != nullptr &&
obj_int_arg_blob != nullptr &&
obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
}
// Outside of the lock
@ -2845,7 +2845,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandl
return _obj_int_arg_handler;
}
}
return NULL;
return nullptr;
}
class AdapterSignatureIterator : public SignatureIterator {
@ -2898,12 +2898,12 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth
// Fast-path for trivial adapters
AdapterHandlerEntry* entry = get_simple_adapter(method);
if (entry != NULL) {
if (entry != nullptr) {
return entry;
}
ResourceMark rm;
AdapterBlob* new_adapter = NULL;
AdapterBlob* new_adapter = nullptr;
// Fill in the signature array, for the calling-convention call.
int total_args_passed = method->size_of_parameters(); // All args on stack
@ -2918,12 +2918,12 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth
// Lookup method signature's fingerprint
entry = lookup(total_args_passed, sig_bt);
if (entry != NULL) {
if (entry != nullptr) {
#ifdef ASSERT
if (VerifyAdapterSharing) {
AdapterBlob* comparison_blob = NULL;
AdapterBlob* comparison_blob = nullptr;
AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false);
assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison");
assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
assert(comparison_entry->compare_code(entry), "code must match");
// Release the one just created and return the original
delete comparison_entry;
@ -2936,7 +2936,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth
}
// Outside of the lock
if (new_adapter != NULL) {
if (new_adapter != nullptr) {
post_adapter_creation(new_adapter, entry);
}
return entry;
@ -2951,7 +2951,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada
// VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
// prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
// stub that ensure that an I2C stub is called from an interpreter frame.
bool contains_all_checks = StubRoutines::code2() != NULL;
bool contains_all_checks = StubRoutines::code2() != nullptr;
VMRegPair stack_regs[16];
VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
@ -2985,11 +2985,11 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada
new_adapter = AdapterBlob::create(&buffer);
NOT_PRODUCT(int insts_size = buffer.insts_size());
if (new_adapter == NULL) {
if (new_adapter == nullptr) {
// CodeCache is full, disable compilation
// Ought to log this but compile log is only per compile thread
// and we're some non descript Java thread.
return NULL;
return nullptr;
}
entry->relocate(new_adapter->content_begin());
#ifndef PRODUCT
@ -3003,7 +3003,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada
tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
if (Verbose || PrintStubCode) {
address first_pc = entry->base_address();
if (first_pc != NULL) {
if (first_pc != nullptr) {
Disassembler::decode(first_pc, first_pc + insts_size, tty
NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
tty->cr();
@ -3023,24 +3023,24 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada
address AdapterHandlerEntry::base_address() {
address base = _i2c_entry;
if (base == NULL) base = _c2i_entry;
assert(base <= _c2i_entry || _c2i_entry == NULL, "");
assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, "");
if (base == nullptr) base = _c2i_entry;
assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
return base;
}
void AdapterHandlerEntry::relocate(address new_base) {
address old_base = base_address();
assert(old_base != NULL, "");
assert(old_base != nullptr, "");
ptrdiff_t delta = new_base - old_base;
if (_i2c_entry != NULL)
if (_i2c_entry != nullptr)
_i2c_entry += delta;
if (_c2i_entry != NULL)
if (_c2i_entry != nullptr)
_c2i_entry += delta;
if (_c2i_unverified_entry != NULL)
if (_c2i_unverified_entry != nullptr)
_c2i_unverified_entry += delta;
if (_c2i_no_clinit_check_entry != NULL)
if (_c2i_no_clinit_check_entry != nullptr)
_c2i_no_clinit_check_entry += delta;
assert(base_address() == new_base, "");
}
@ -3066,7 +3066,7 @@ void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved");
assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
if (other->_saved_code_length != _saved_code_length) {
return false;
@ -3085,7 +3085,7 @@ bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
*/
void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
ResourceMark rm;
nmethod* nm = NULL;
nmethod* nm = nullptr;
// Check if memory should be freed before allocation
CodeCache::gc_on_allocation();
@ -3098,7 +3098,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
// Perform the work while holding the lock, but perform any printing outside the lock
MutexLocker mu(AdapterHandlerLibrary_lock);
// See if somebody beat us to it
if (method->code() != NULL) {
if (method->code() != nullptr) {
return;
}
@ -3108,7 +3108,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
ResourceMark rm;
BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
if (buf != NULL) {
if (buf != nullptr) {
CodeBuffer buffer(buf);
if (method->is_continuation_enter_intrinsic()) {
@ -3149,7 +3149,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
// Generate the compiled-to-native wrapper code
nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
if (nm != NULL) {
if (nm != nullptr) {
{
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (nm->make_in_use()) {
@ -3168,7 +3168,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
// Install the generated code.
if (nm != NULL) {
if (nm != nullptr) {
const char *msg = method->is_static() ? "(static)" : "";
CompileTask::print_ul(nm, msg);
if (PrintCompilation) {
@ -3286,7 +3286,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
kptr < fr.interpreter_frame_monitor_begin();
kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
if (kptr->obj() != NULL) active_monitor_count++;
if (kptr->obj() != nullptr) active_monitor_count++;
}
// QQQ we could place number of active monitors in the array so that compiled code
@ -3310,7 +3310,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
kptr2 < fr.interpreter_frame_monitor_begin();
kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array
BasicLock *lock = kptr2->lock();
// Inflate so the object's header no longer refers to the BasicLock.
if (lock->displaced_header().is_unlocked()) {
@ -3372,16 +3372,16 @@ void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b
void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
if (get_i2c_entry() != NULL) {
if (get_i2c_entry() != nullptr) {
st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
}
if (get_c2i_entry() != NULL) {
if (get_c2i_entry() != nullptr) {
st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
}
if (get_c2i_unverified_entry() != NULL) {
if (get_c2i_unverified_entry() != nullptr) {
st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
}
if (get_c2i_no_clinit_check_entry() != NULL) {
if (get_c2i_no_clinit_check_entry() != nullptr) {
st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
}
st->cr();
@ -3405,7 +3405,7 @@ JRT_END
frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
ResourceMark rm(current);
frame activation;
CompiledMethod* nm = NULL;
CompiledMethod* nm = nullptr;
int count = 1;
assert(fr.is_java_frame(), "Must start on Java frame");
@ -3419,23 +3419,23 @@ frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* curren
continue;
}
Method* method = NULL;
Method* method = nullptr;
bool found = false;
if (fr.is_interpreted_frame()) {
method = fr.interpreter_frame_method();
if (method != NULL && method->has_reserved_stack_access()) {
if (method != nullptr && method->has_reserved_stack_access()) {
found = true;
}
} else {
CodeBlob* cb = fr.cb();
if (cb != NULL && cb->is_compiled()) {
if (cb != nullptr && cb->is_compiled()) {
nm = cb->as_compiled_method();
method = nm->method();
// scope_desc_near() must be used, instead of scope_desc_at() because on
// SPARC, the pcDesc can be on the delay slot after the call instruction.
for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) {
for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
method = sd->method();
if (method != NULL && method->has_reserved_stack_access()) {
if (method != nullptr && method->has_reserved_stack_access()) {
found = true;
}
}
@ -3463,7 +3463,7 @@ void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
// GC may take any compensating steps.
oop new_obj = current->vm_result();
if (new_obj == NULL) return;
if (new_obj == nullptr) return;
BarrierSet *bs = BarrierSet::barrier_set();
bs->on_slowpath_allocation_exit(current, new_obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -216,17 +216,17 @@ class SharedRuntime: AllStatic {
static address get_poll_stub(address pc);
static address get_ic_miss_stub() {
assert(_ic_miss_blob!= NULL, "oops");
assert(_ic_miss_blob!= nullptr, "oops");
return _ic_miss_blob->entry_point();
}
static address get_handle_wrong_method_stub() {
assert(_wrong_method_blob!= NULL, "oops");
assert(_wrong_method_blob!= nullptr, "oops");
return _wrong_method_blob->entry_point();
}
static address get_handle_wrong_method_abstract_stub() {
assert(_wrong_method_abstract_blob!= NULL, "oops");
assert(_wrong_method_abstract_blob!= nullptr, "oops");
return _wrong_method_abstract_blob->entry_point();
}
@ -236,15 +236,15 @@ class SharedRuntime: AllStatic {
#endif // COMPILER2
static address get_resolve_opt_virtual_call_stub() {
assert(_resolve_opt_virtual_call_blob != NULL, "oops");
assert(_resolve_opt_virtual_call_blob != nullptr, "oops");
return _resolve_opt_virtual_call_blob->entry_point();
}
static address get_resolve_virtual_call_stub() {
assert(_resolve_virtual_call_blob != NULL, "oops");
assert(_resolve_virtual_call_blob != nullptr, "oops");
return _resolve_virtual_call_blob->entry_point();
}
static address get_resolve_static_call_stub() {
assert(_resolve_static_call_blob != NULL, "oops");
assert(_resolve_static_call_blob != nullptr, "oops");
return _resolve_static_call_blob->entry_point();
}
@ -264,7 +264,7 @@ class SharedRuntime: AllStatic {
// Helper routine for full-speed JVMTI exception throwing support
static void throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception);
static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = NULL);
static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = nullptr);
// RedefineClasses() tracing support for obsolete method entry
static int rc_trace_method_entry(JavaThread* thread, Method* m);
@ -315,7 +315,7 @@ class SharedRuntime: AllStatic {
// The caller (or one of it's callers) must use a ResourceMark
// in order to correctly free the result.
//
static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = NULL);
static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr);
// Resolves a call site- may patch in the destination of the call into the
// compiled code.
@ -379,7 +379,7 @@ class SharedRuntime: AllStatic {
// Some architectures require that an argument must be passed in a register
// AND in a stack slot. These architectures provide a second VMRegPair array
// to be filled by the c_calling_convention method. On other architectures,
// NULL is being passed as the second VMRegPair array, so arguments are either
// null is being passed as the second VMRegPair array, so arguments are either
// passed in a register OR in a stack slot.
static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2,
int total_args_passed);
@ -686,7 +686,7 @@ class AdapterHandlerLibrary: public AllStatic {
address i2c_entry,
address c2i_entry,
address c2i_unverified_entry,
address c2i_no_clinit_check_entry = NULL);
address c2i_no_clinit_check_entry = nullptr);
static void create_native_wrapper(const methodHandle& method);
static AdapterHandlerEntry* get_adapter(const methodHandle& method);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,7 +142,7 @@ static int compute_num_stack_arg_slots(Symbol* signature, int sizeargs, bool is_
void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
// See if we fingerprinted this method already
if (_method != NULL) {
if (_method != nullptr) {
assert(!static_flag, "must not be passed by caller");
static_flag = _method->is_static();
_fingerprint = _method->constMethod()->fingerprint();
@ -189,7 +189,7 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
#endif
// Detect overflow. (We counted _param_size correctly.)
if (_method == NULL && _param_size > fp_max_size_of_parameters) {
if (_method == nullptr && _param_size > fp_max_size_of_parameters) {
// We did a one-pass computation of argument size, return type,
// and fingerprint.
_fingerprint = overflow_fingerprint();
@ -206,7 +206,7 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
_fingerprint = _accumulator;
// Cache the result on the method itself:
if (_method != NULL) {
if (_method != nullptr) {
_method->constMethod()->set_fingerprint(_fingerprint);
}
}
@ -304,22 +304,22 @@ SignatureStream::SignatureStream(const Symbol* signature,
_array_prefix = 0; // just for definiteness
// assigning java/lang/Object to _previous_name means we can
// avoid a number of NULL checks in the parser
// avoid a number of null checks in the parser
_previous_name = vmSymbols::java_lang_Object();
_names = NULL;
_names = nullptr;
next();
}
SignatureStream::~SignatureStream() {
if (_previous_name == vmSymbols::java_lang_Object()) {
// no names were created
assert(_names == NULL, "_names unexpectedly created");
assert(_names == nullptr, "_names unexpectedly created");
return;
}
// decrement refcount for names created during signature parsing
_previous_name->decrement_refcount();
if (_names != NULL) {
if (_names != nullptr) {
for (int i = 0; i < _names->length(); i++) {
_names->at(i)->decrement_refcount();
}
@ -334,7 +334,7 @@ inline int SignatureStream::scan_type(BasicType type) {
switch (type) {
case T_OBJECT:
tem = (const u1*) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
return (tem == NULL ? limit : tem + 1 - base);
return (tem == nullptr ? limit : tem + 1 - base);
case T_ARRAY:
while ((end < limit) && ((char)base[end] == JVM_SIGNATURE_ARRAY)) { end++; }
@ -346,7 +346,7 @@ inline int SignatureStream::scan_type(BasicType type) {
_array_prefix = end - _end; // number of '[' chars just skipped
if (Signature::has_envelope(base[end])) {
tem = (const u1 *) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
return (tem == NULL ? limit : tem + 1 - base);
return (tem == nullptr ? limit : tem + 1 - base);
}
// Skipping over a single character for a primitive type.
assert(is_java_primitive(decode_signature_char(base[end])), "only primitives expected");
@ -489,7 +489,7 @@ Symbol* SignatureStream::find_symbol() {
// Only allocate the GrowableArray for the _names buffer if more than
// one name is being processed in the signature.
if (!_previous_name->is_permanent()) {
if (_names == NULL) {
if (_names == nullptr) {
_names = new GrowableArray<Symbol*>(10);
}
_names->push(_previous_name);
@ -501,12 +501,12 @@ Symbol* SignatureStream::find_symbol() {
Klass* SignatureStream::as_klass(Handle class_loader, Handle protection_domain,
FailureMode failure_mode, TRAPS) {
if (!is_reference()) {
return NULL;
return nullptr;
}
Symbol* name = as_symbol();
Klass* k = NULL;
Klass* k = nullptr;
if (failure_mode == ReturnNull) {
// Note: SD::resolve_or_null returns NULL for most failure modes,
// Note: SD::resolve_or_null returns null for most failure modes,
// but not all. Circularity errors, invalid PDs, etc., throw.
k = SystemDictionary::resolve_or_null(name, class_loader, protection_domain, CHECK_NULL);
} else if (failure_mode == CachedOrNull) {
@ -534,8 +534,8 @@ oop SignatureStream::as_java_mirror(Handle class_loader, Handle protection_domai
return Universe::java_mirror(type());
}
Klass* klass = as_klass(class_loader, protection_domain, failure_mode, CHECK_NULL);
if (klass == NULL) {
return NULL;
if (klass == nullptr) {
return nullptr;
}
return klass->java_mirror();
}
@ -553,13 +553,13 @@ ResolvingSignatureStream::ResolvingSignatureStream(Symbol* signature,
: SignatureStream(signature, is_method),
_class_loader(class_loader), _protection_domain(protection_domain)
{
initialize_load_origin(NULL);
initialize_load_origin(nullptr);
}
ResolvingSignatureStream::ResolvingSignatureStream(Symbol* signature, Klass* load_origin, bool is_method)
: SignatureStream(signature, is_method)
{
assert(load_origin != NULL, "");
assert(load_origin != nullptr, "");
initialize_load_origin(load_origin);
}
@ -570,7 +570,7 @@ ResolvingSignatureStream::ResolvingSignatureStream(const Method* method)
}
void ResolvingSignatureStream::cache_handles() {
assert(_load_origin != NULL, "");
assert(_load_origin != nullptr, "");
JavaThread* current = JavaThread::current();
_class_loader = Handle(current, _load_origin->class_loader());
_protection_domain = Handle(current, _load_origin->protection_domain());
@ -600,7 +600,7 @@ bool SignatureVerifier::is_valid_method_signature(Symbol* sig) {
const char* method_sig = (const char*)sig->bytes();
ssize_t len = sig->utf8_length();
ssize_t index = 0;
if (method_sig != NULL && len > 1 && method_sig[index] == JVM_SIGNATURE_FUNC) {
if (method_sig != nullptr && len > 1 && method_sig[index] == JVM_SIGNATURE_FUNC) {
++index;
while (index < len && method_sig[index] != JVM_SIGNATURE_ENDFUNC) {
ssize_t res = is_valid_type(&method_sig[index], len - index);
@ -622,7 +622,7 @@ bool SignatureVerifier::is_valid_method_signature(Symbol* sig) {
bool SignatureVerifier::is_valid_type_signature(Symbol* sig) {
const char* type_sig = (const char*)sig->bytes();
ssize_t len = sig->utf8_length();
return (type_sig != NULL && len >= 1 &&
return (type_sig != nullptr && len >= 1 &&
(is_valid_type(type_sig, len) == len));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -359,7 +359,7 @@ class Fingerprinter: public SignatureIterator {
}
Fingerprinter(Symbol* signature, bool is_static)
: SignatureIterator(signature),
_method(NULL) {
_method(nullptr) {
compute_fingerprint_and_return_type(is_static);
}
};
@ -575,7 +575,7 @@ class ResolvingSignatureStream : public SignatureStream {
void initialize_load_origin(Klass* load_origin) {
_load_origin = load_origin;
_handles_cached = (load_origin == NULL);
_handles_cached = (load_origin == nullptr);
}
void need_handles() {
if (!_handles_cached) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,7 @@ public:
inline bool is_stub() const;
inline bool is_compiled() const;
CodeBlob* cb() const { return _cb; }
const ImmutableOopMap* oopmap() const { if (_oopmap == NULL) get_oopmap(); return _oopmap; }
const ImmutableOopMap* oopmap() const { if (_oopmap == nullptr) get_oopmap(); return _oopmap; }
inline int frame_size() const;
inline int stack_argsize() const;
inline int num_oops() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ static oop oop_from_oop_location(stackChunkOop chunk, void* addr) {
// When compressed oops is enabled, an oop location may
// contain narrow oop values - we deal with that here
if (chunk != NULL && chunk->has_bitmap()) {
if (chunk != nullptr && chunk->has_bitmap()) {
// Transformed stack chunk with narrow oops
return chunk->load_oop((narrowOop*)addr);
}
@ -67,14 +67,14 @@ static oop oop_from_oop_location(stackChunkOop chunk, void* addr) {
if (CompressedOops::is_base(*(void**)addr)) {
// Compiled code may produce decoded oop = narrow_oop_base
// when a narrow oop implicit null check is used.
// The narrow_oop_base could be NULL or be the address
// of the page below heap. Use NULL value for both cases.
// The narrow_oop_base could be null or be the address
// of the page below heap. Use null value for both cases.
return nullptr;
}
#endif
}
if (chunk != NULL) {
if (chunk != nullptr) {
// Load oop from chunk
return chunk->load_oop((oop*)addr);
}
@ -99,7 +99,7 @@ static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_
narrow_addr = (narrowOop*)addr;
}
if (chunk != NULL) {
if (chunk != nullptr) {
// Load oop from chunk
return chunk->load_oop(narrow_addr);
}
@ -111,7 +111,7 @@ static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_
StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) {
oop val = oop_from_oop_location(chunk, addr);
assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops);
p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
@ -119,7 +119,7 @@ StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk
StackValue* StackValue::create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) {
oop val = oop_from_narrowOop_location(chunk, addr, is_register);
assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d",
p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops);
p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops);
Handle h(Thread::current(), val); // Wrap a handle around the oop
return new StackValue(h);
}
@ -241,11 +241,11 @@ template address StackValue::stack_value_address(const frame* fr, const SmallReg
template<typename RegisterMapT>
address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) {
if (!sv->is_location()) {
return NULL;
return nullptr;
}
Location loc = ((LocationValue *)sv)->location();
if (loc.type() == Location::invalid) {
return NULL;
return nullptr;
}
if (!reg_map->in_cont()) {
@ -256,7 +256,7 @@ address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg
// before any extension by its callee (due to Compiler1 linkage on SPARC), must be used.
: ((address)fr->unextended_sp()) + loc.stack_offset();
assert(value_addr == NULL || reg_map->thread() == NULL || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr));
assert(value_addr == nullptr || reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr));
return value_addr;
}
@ -264,7 +264,7 @@ address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg
? reg_map->as_RegisterMap()->stack_chunk()->reg_to_location(*fr, reg_map->as_RegisterMap(), VMRegImpl::as_VMReg(loc.register_number()))
: reg_map->as_RegisterMap()->stack_chunk()->usp_offset_to_location(*fr, loc.stack_offset());
assert(value_addr == NULL || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != NULL && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr));
assert(value_addr == nullptr || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != nullptr && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr));
return value_addr;
}
@ -292,10 +292,10 @@ void StackValue::print_on(outputStream* st) const {
break;
case T_OBJECT:
if (_handle_value() != NULL) {
if (_handle_value() != nullptr) {
_handle_value()->print_value_on(st);
} else {
st->print("NULL");
st->print("null");
}
st->print(" <" INTPTR_FORMAT ">", p2i(_handle_value()));
break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -161,9 +161,9 @@ void StackWatermarkFramesIterator::next() {
StackWatermark::StackWatermark(JavaThread* jt, StackWatermarkKind kind, uint32_t epoch) :
_state(StackWatermarkState::create(epoch, true /* is_done */)),
_watermark(0),
_next(NULL),
_next(nullptr),
_jt(jt),
_iterator(NULL),
_iterator(nullptr),
_lock(Mutex::stackwatermark, "StackWatermark_lock"),
_kind(kind),
_linked_watermarks() {
@ -216,7 +216,7 @@ void StackWatermark::start_processing_impl(void* context) {
_iterator->process_one(context);
_iterator->process_one(context);
} else {
_iterator = NULL;
_iterator = nullptr;
}
update_watermark();
}
@ -228,7 +228,7 @@ void StackWatermark::yield_processing() {
void StackWatermark::update_watermark() {
assert(_lock.owned_by_self(), "invariant");
if (_iterator != NULL && _iterator->has_next()) {
if (_iterator != nullptr && _iterator->has_next()) {
assert(_iterator->callee() != 0, "sanity");
Atomic::release_store(&_watermark, _iterator->callee());
Atomic::release_store(&_state, StackWatermarkState::create(epoch_id(), false /* is_done */)); // release watermark w.r.t. epoch
@ -243,9 +243,9 @@ void StackWatermark::update_watermark() {
void StackWatermark::process_one() {
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
if (!processing_started()) {
start_processing_impl(NULL /* context */);
start_processing_impl(nullptr /* context */);
} else if (!processing_completed()) {
_iterator->process_one(NULL /* context */);
_iterator->process_one(nullptr /* context */);
update_watermark();
}
}
@ -299,7 +299,7 @@ void StackWatermark::process_linked_watermarks() {
// Finish processing all linked stack watermarks
for (StackWatermark* watermark : _linked_watermarks) {
watermark->finish_processing(NULL /* context */);
watermark->finish_processing(nullptr /* context */);
}
}
@ -316,7 +316,7 @@ void StackWatermark::start_processing() {
if (!processing_started_acquire()) {
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
if (!processing_started()) {
start_processing_impl(NULL /* context */);
start_processing_impl(nullptr /* context */);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,11 +37,11 @@
#include "utilities/vmError.hpp"
StackWatermarks::StackWatermarks() :
_head(NULL) {}
_head(nullptr) {}
StackWatermarks::~StackWatermarks() {
StackWatermark* current = _head;
while (current != NULL) {
while (current != nullptr) {
StackWatermark* next = current->next();
delete current;
current = next;
@ -81,7 +81,7 @@ static void verify_processing_context() {
void StackWatermarkSet::before_unwind(JavaThread* jt) {
verify_processing_context();
assert(jt->has_last_Java_frame(), "must have a Java frame");
for (StackWatermark* current = head(jt); current != NULL; current = current->next()) {
for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) {
current->before_unwind();
}
SafepointMechanism::update_poll_values(jt);
@ -90,7 +90,7 @@ void StackWatermarkSet::before_unwind(JavaThread* jt) {
void StackWatermarkSet::after_unwind(JavaThread* jt) {
verify_processing_context();
assert(jt->has_last_Java_frame(), "must have a Java frame");
for (StackWatermark* current = head(jt); current != NULL; current = current->next()) {
for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) {
current->after_unwind();
}
SafepointMechanism::update_poll_values(jt);
@ -102,7 +102,7 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) {
return;
}
verify_processing_context();
for (StackWatermark* current = head(jt); current != NULL; current = current->next()) {
for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) {
current->on_iteration(fr);
}
// We don't call SafepointMechanism::update_poll_values here, because the thread
@ -111,7 +111,7 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) {
void StackWatermarkSet::on_safepoint(JavaThread* jt) {
StackWatermark* watermark = get(jt, StackWatermarkKind::gc);
if (watermark != NULL) {
if (watermark != nullptr) {
watermark->on_safepoint();
}
}
@ -120,7 +120,7 @@ void StackWatermarkSet::start_processing(JavaThread* jt, StackWatermarkKind kind
verify_processing_context();
assert(!jt->is_terminated(), "Poll after termination is a bug");
StackWatermark* watermark = get(jt, kind);
if (watermark != NULL) {
if (watermark != nullptr) {
watermark->start_processing();
}
// We don't call SafepointMechanism::update_poll_values here, because the thread
@ -129,7 +129,7 @@ void StackWatermarkSet::start_processing(JavaThread* jt, StackWatermarkKind kind
}
bool StackWatermarkSet::processing_started(JavaThread* jt) {
for (StackWatermark* current = head(jt); current != NULL; current = current->next()) {
for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) {
if (!current->processing_started()) {
return false;
}
@ -139,7 +139,7 @@ bool StackWatermarkSet::processing_started(JavaThread* jt) {
void StackWatermarkSet::finish_processing(JavaThread* jt, void* context, StackWatermarkKind kind) {
StackWatermark* watermark = get(jt, kind);
if (watermark != NULL) {
if (watermark != nullptr) {
watermark->finish_processing(context);
}
// We don't call SafepointMechanism::update_poll_values here, because the thread
@ -149,7 +149,7 @@ void StackWatermarkSet::finish_processing(JavaThread* jt, void* context, StackWa
uintptr_t StackWatermarkSet::lowest_watermark(JavaThread* jt) {
uintptr_t max_watermark = uintptr_t(0) - 1;
uintptr_t watermark = max_watermark;
for (StackWatermark* current = head(jt); current != NULL; current = current->next()) {
for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) {
watermark = MIN2(watermark, current->watermark());
}
if (watermark == max_watermark) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,12 +30,12 @@
#include "runtime/stackWatermark.hpp"
inline StackWatermark* StackWatermarkSet::get(JavaThread* jt, StackWatermarkKind kind) {
for (StackWatermark* stack_watermark = head(jt); stack_watermark != NULL; stack_watermark = stack_watermark->next()) {
for (StackWatermark* stack_watermark = head(jt); stack_watermark != nullptr; stack_watermark = stack_watermark->next()) {
if (stack_watermark->kind() == kind) {
return stack_watermark;
}
}
return NULL;
return nullptr;
}
template <typename T>
@ -44,7 +44,7 @@ inline T* StackWatermarkSet::get(JavaThread* jt, StackWatermarkKind kind) {
}
inline bool StackWatermarkSet::has_watermark(JavaThread* jt, StackWatermarkKind kind) {
return get(jt, kind) != NULL;
return get(jt, kind) != nullptr;
}
#endif // SHARE_RUNTIME_STACKWATERMARKSET_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,8 +50,8 @@ class StatSamplerTask : public PeriodicTask {
//----------------------------------------------------------
// Implementation of StatSampler
StatSamplerTask* StatSampler::_task = NULL;
PerfDataList* StatSampler::_sampled = NULL;
StatSamplerTask* StatSampler::_task = nullptr;
PerfDataList* StatSampler::_sampled = nullptr;
/*
* the initialize method is called from the engage() method
@ -107,7 +107,7 @@ void StatSampler::disengage() {
// remove StatSamplerTask
_task->disenroll();
delete _task;
_task = NULL;
_task = nullptr;
// force a final sample
sample_data(_sampled);
@ -123,9 +123,9 @@ void StatSampler::destroy() {
if (!UsePerfData) return;
if (_sampled != NULL) {
if (_sampled != nullptr) {
delete(_sampled);
_sampled = NULL;
_sampled = nullptr;
}
}
@ -135,7 +135,7 @@ void StatSampler::destroy() {
*/
void StatSampler::sample_data(PerfDataList* list) {
assert(list != NULL, "null list unexpected");
assert(list != nullptr, "null list unexpected");
for (int index = 0; index < list->length(); index++) {
PerfData* item = list->at(index);
@ -161,14 +161,14 @@ void StatSampler::collect_sample() {
//
// if (PerfDataManager::count() > previous) {
// // get a new copy of the sampled list
// if (_sampled != NULL) {
// if (_sampled != nullptr) {
// delete(_sampled);
// _sampled = NULL;
// _sampled = nullptr;
// }
// _sampled = PerfDataManager::sampled();
// }
assert(_sampled != NULL, "list not initialized");
assert(_sampled != nullptr, "list not initialized");
sample_data(_sampled);
}
@ -196,7 +196,7 @@ void StatSampler::assert_system_property(const char* name, const char* value, TR
CHECK);
oop value_oop = result.get_oop();
assert(value_oop != NULL, "property must have a value");
assert(value_oop != nullptr, "property must have a value");
// convert Java String to utf8 string
char* system_value = java_lang_String::as_utf8_string(value_oop);
@ -211,9 +211,9 @@ void StatSampler::assert_system_property(const char* name, const char* value, TR
*/
void StatSampler::add_property_constant(CounterNS name_space, const char* name, const char* value, TRAPS) {
// the property must exist
assert(value != NULL, "property name should be have a value: %s", name);
assert(value != nullptr, "property name should be have a value: %s", name);
assert_system_property(name, value, CHECK);
if (value != NULL) {
if (value != nullptr) {
// create the property counter
PerfDataManager::create_string_constant(name_space, name, value, CHECK);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ class StatSampler : AllStatic {
static void engage();
static void disengage();
static bool is_active() { return _task != NULL; }
static bool is_active() { return _task != nullptr; }
static void initialize();
static void destroy();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,12 +35,12 @@
// Implementation of StubCodeDesc
StubCodeDesc* StubCodeDesc::_list = NULL;
StubCodeDesc* StubCodeDesc::_list = nullptr;
bool StubCodeDesc::_frozen = false;
StubCodeDesc* StubCodeDesc::desc_for(address pc) {
StubCodeDesc* p = _list;
while (p != NULL && !p->contains(pc)) {
while (p != nullptr && !p->contains(pc)) {
p = p->_next;
}
return p;
@ -71,7 +71,7 @@ StubCodeGenerator::~StubCodeGenerator() {
#ifndef PRODUCT
CodeBuffer* cbuf = _masm->code();
CodeBlob* blob = CodeCache::find_blob(cbuf->insts()->start());
if (blob != NULL) {
if (blob != nullptr) {
blob->use_remarks(cbuf->asm_remarks());
blob->use_strings(cbuf->dbg_strings());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ class StubCodeDesc: public CHeapObj<mtCode> {
void set_begin(address begin) {
assert(begin >= _begin, "begin may not decrease");
assert(_end == NULL || begin <= _end, "begin & end not properly ordered");
assert(_end == nullptr || begin <= _end, "begin & end not properly ordered");
_begin = begin;
}
@ -68,11 +68,11 @@ class StubCodeDesc: public CHeapObj<mtCode> {
static StubCodeDesc* first() { return _list; }
static StubCodeDesc* next(StubCodeDesc* desc) { return desc->_next; }
static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or NULL
static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or null
StubCodeDesc(const char* group, const char* name, address begin, address end = NULL) {
StubCodeDesc(const char* group, const char* name, address begin, address end = nullptr) {
assert(!_frozen, "no modifications allowed");
assert(name != NULL, "no name specified");
assert(name != nullptr, "no name specified");
_next = _list;
_group = group;
_name = name;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,37 +40,37 @@
#include "opto/runtime.hpp"
#endif
UnsafeCopyMemory* UnsafeCopyMemory::_table = NULL;
UnsafeCopyMemory* UnsafeCopyMemory::_table = nullptr;
int UnsafeCopyMemory::_table_length = 0;
int UnsafeCopyMemory::_table_max_length = 0;
address UnsafeCopyMemory::_common_exit_stub_pc = NULL;
address UnsafeCopyMemory::_common_exit_stub_pc = nullptr;
// Implementation of StubRoutines - for a description
// of how to extend it, see the header file.
// Class Variables
BufferBlob* StubRoutines::_code1 = NULL;
BufferBlob* StubRoutines::_code2 = NULL;
BufferBlob* StubRoutines::_code3 = NULL;
BufferBlob* StubRoutines::_code1 = nullptr;
BufferBlob* StubRoutines::_code2 = nullptr;
BufferBlob* StubRoutines::_code3 = nullptr;
address StubRoutines::_call_stub_return_address = NULL;
address StubRoutines::_call_stub_entry = NULL;
address StubRoutines::_call_stub_return_address = nullptr;
address StubRoutines::_call_stub_entry = nullptr;
address StubRoutines::_catch_exception_entry = NULL;
address StubRoutines::_forward_exception_entry = NULL;
address StubRoutines::_throw_AbstractMethodError_entry = NULL;
address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
address StubRoutines::_throw_StackOverflowError_entry = NULL;
address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL;
address StubRoutines::_catch_exception_entry = nullptr;
address StubRoutines::_forward_exception_entry = nullptr;
address StubRoutines::_throw_AbstractMethodError_entry = nullptr;
address StubRoutines::_throw_IncompatibleClassChangeError_entry = nullptr;
address StubRoutines::_throw_NullPointerException_at_call_entry = nullptr;
address StubRoutines::_throw_StackOverflowError_entry = nullptr;
address StubRoutines::_throw_delayed_StackOverflowError_entry = nullptr;
jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = NULL;
address StubRoutines::_atomic_xchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_entry = NULL;
address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
address StubRoutines::_atomic_add_entry = NULL;
address StubRoutines::_fence_entry = NULL;
address StubRoutines::_verify_oop_subroutine_entry = nullptr;
address StubRoutines::_atomic_xchg_entry = nullptr;
address StubRoutines::_atomic_cmpxchg_entry = nullptr;
address StubRoutines::_atomic_cmpxchg_long_entry = nullptr;
address StubRoutines::_atomic_add_entry = nullptr;
address StubRoutines::_fence_entry = nullptr;
// Compiled code entry points default values
// The default functions don't have separate disjoint versions.
@ -100,13 +100,13 @@ address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(addr
address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy);
address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit);
address StubRoutines::_data_cache_writeback = NULL;
address StubRoutines::_data_cache_writeback_sync = NULL;
address StubRoutines::_data_cache_writeback = nullptr;
address StubRoutines::_data_cache_writeback_sync = nullptr;
address StubRoutines::_checkcast_arraycopy = NULL;
address StubRoutines::_checkcast_arraycopy_uninit = NULL;
address StubRoutines::_unsafe_arraycopy = NULL;
address StubRoutines::_generic_arraycopy = NULL;
address StubRoutines::_checkcast_arraycopy = nullptr;
address StubRoutines::_checkcast_arraycopy_uninit = nullptr;
address StubRoutines::_unsafe_arraycopy = nullptr;
address StubRoutines::_generic_arraycopy = nullptr;
address StubRoutines::_jbyte_fill;
address StubRoutines::_jshort_fill;
@ -115,68 +115,68 @@ address StubRoutines::_arrayof_jbyte_fill;
address StubRoutines::_arrayof_jshort_fill;
address StubRoutines::_arrayof_jint_fill;
address StubRoutines::_aescrypt_encryptBlock = NULL;
address StubRoutines::_aescrypt_decryptBlock = NULL;
address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL;
address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL;
address StubRoutines::_electronicCodeBook_encryptAESCrypt = NULL;
address StubRoutines::_electronicCodeBook_decryptAESCrypt = NULL;
address StubRoutines::_counterMode_AESCrypt = NULL;
address StubRoutines::_galoisCounterMode_AESCrypt = NULL;
address StubRoutines::_ghash_processBlocks = NULL;
address StubRoutines::_chacha20Block = NULL;
address StubRoutines::_base64_encodeBlock = NULL;
address StubRoutines::_base64_decodeBlock = NULL;
address StubRoutines::_poly1305_processBlocks = NULL;
address StubRoutines::_aescrypt_encryptBlock = nullptr;
address StubRoutines::_aescrypt_decryptBlock = nullptr;
address StubRoutines::_cipherBlockChaining_encryptAESCrypt = nullptr;
address StubRoutines::_cipherBlockChaining_decryptAESCrypt = nullptr;
address StubRoutines::_electronicCodeBook_encryptAESCrypt = nullptr;
address StubRoutines::_electronicCodeBook_decryptAESCrypt = nullptr;
address StubRoutines::_counterMode_AESCrypt = nullptr;
address StubRoutines::_galoisCounterMode_AESCrypt = nullptr;
address StubRoutines::_ghash_processBlocks = nullptr;
address StubRoutines::_chacha20Block = nullptr;
address StubRoutines::_base64_encodeBlock = nullptr;
address StubRoutines::_base64_decodeBlock = nullptr;
address StubRoutines::_poly1305_processBlocks = nullptr;
address StubRoutines::_md5_implCompress = NULL;
address StubRoutines::_md5_implCompressMB = NULL;
address StubRoutines::_sha1_implCompress = NULL;
address StubRoutines::_sha1_implCompressMB = NULL;
address StubRoutines::_sha256_implCompress = NULL;
address StubRoutines::_sha256_implCompressMB = NULL;
address StubRoutines::_sha512_implCompress = NULL;
address StubRoutines::_sha512_implCompressMB = NULL;
address StubRoutines::_sha3_implCompress = NULL;
address StubRoutines::_sha3_implCompressMB = NULL;
address StubRoutines::_md5_implCompress = nullptr;
address StubRoutines::_md5_implCompressMB = nullptr;
address StubRoutines::_sha1_implCompress = nullptr;
address StubRoutines::_sha1_implCompressMB = nullptr;
address StubRoutines::_sha256_implCompress = nullptr;
address StubRoutines::_sha256_implCompressMB = nullptr;
address StubRoutines::_sha512_implCompress = nullptr;
address StubRoutines::_sha512_implCompressMB = nullptr;
address StubRoutines::_sha3_implCompress = nullptr;
address StubRoutines::_sha3_implCompressMB = nullptr;
address StubRoutines::_updateBytesCRC32 = NULL;
address StubRoutines::_crc_table_adr = NULL;
address StubRoutines::_updateBytesCRC32 = nullptr;
address StubRoutines::_crc_table_adr = nullptr;
address StubRoutines::_crc32c_table_addr = NULL;
address StubRoutines::_updateBytesCRC32C = NULL;
address StubRoutines::_updateBytesAdler32 = NULL;
address StubRoutines::_crc32c_table_addr = nullptr;
address StubRoutines::_updateBytesCRC32C = nullptr;
address StubRoutines::_updateBytesAdler32 = nullptr;
address StubRoutines::_multiplyToLen = NULL;
address StubRoutines::_squareToLen = NULL;
address StubRoutines::_mulAdd = NULL;
address StubRoutines::_montgomeryMultiply = NULL;
address StubRoutines::_montgomerySquare = NULL;
address StubRoutines::_bigIntegerRightShiftWorker = NULL;
address StubRoutines::_bigIntegerLeftShiftWorker = NULL;
address StubRoutines::_multiplyToLen = nullptr;
address StubRoutines::_squareToLen = nullptr;
address StubRoutines::_mulAdd = nullptr;
address StubRoutines::_montgomeryMultiply = nullptr;
address StubRoutines::_montgomerySquare = nullptr;
address StubRoutines::_bigIntegerRightShiftWorker = nullptr;
address StubRoutines::_bigIntegerLeftShiftWorker = nullptr;
address StubRoutines::_vectorizedMismatch = NULL;
address StubRoutines::_vectorizedMismatch = nullptr;
address StubRoutines::_dexp = NULL;
address StubRoutines::_dlog = NULL;
address StubRoutines::_dlog10 = NULL;
address StubRoutines::_dpow = NULL;
address StubRoutines::_dsin = NULL;
address StubRoutines::_dcos = NULL;
address StubRoutines::_dlibm_sin_cos_huge = NULL;
address StubRoutines::_dlibm_reduce_pi04l = NULL;
address StubRoutines::_dlibm_tan_cot_huge = NULL;
address StubRoutines::_dtan = NULL;
address StubRoutines::_dexp = nullptr;
address StubRoutines::_dlog = nullptr;
address StubRoutines::_dlog10 = nullptr;
address StubRoutines::_dpow = nullptr;
address StubRoutines::_dsin = nullptr;
address StubRoutines::_dcos = nullptr;
address StubRoutines::_dlibm_sin_cos_huge = nullptr;
address StubRoutines::_dlibm_reduce_pi04l = nullptr;
address StubRoutines::_dlibm_tan_cot_huge = nullptr;
address StubRoutines::_dtan = nullptr;
address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}};
address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}};
address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{nullptr}, {nullptr}};
address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{nullptr}, {nullptr}};
address StubRoutines::_cont_thaw = NULL;
address StubRoutines::_cont_returnBarrier = NULL;
address StubRoutines::_cont_returnBarrierExc = NULL;
address StubRoutines::_cont_thaw = nullptr;
address StubRoutines::_cont_returnBarrier = nullptr;
address StubRoutines::_cont_returnBarrierExc = nullptr;
JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = NULL;)
JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = NULL;)
JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = nullptr;)
JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = nullptr;)
// Initialization
//
@ -208,18 +208,18 @@ address UnsafeCopyMemory::page_error_continue_pc(address pc) {
return entry->error_exit_pc();
}
}
return NULL;
return nullptr;
}
void StubRoutines::initialize1() {
if (_code1 == NULL) {
if (_code1 == nullptr) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 1", TRACETIME_LOG(Info, startuptime));
// Add extra space for large CodeEntryAlignment
int max_aligned_stubs = 10;
int size = code_size1 + CodeEntryAlignment * max_aligned_stubs;
_code1 = BufferBlob::create("StubRoutines (1)", size);
if (_code1 == NULL) {
if (_code1 == nullptr) {
vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)");
}
CodeBuffer buffer(_code1);
@ -268,11 +268,11 @@ static void test_arraycopy_func(address func, int alignment) {
#endif // ASSERT
void StubRoutines::initializeContinuationStubs() {
if (_code3 == NULL) {
if (_code3 == nullptr) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 3", TRACETIME_LOG(Info, startuptime));
_code3 = BufferBlob::create("StubRoutines (3)", code_size2);
if (_code3 == NULL) {
if (_code3 == nullptr) {
vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (3)");
}
CodeBuffer buffer(_code3);
@ -284,14 +284,14 @@ void StubRoutines::initializeContinuationStubs() {
}
void StubRoutines::initialize2() {
if (_code2 == NULL) {
if (_code2 == nullptr) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 2", TRACETIME_LOG(Info, startuptime));
// Add extra space for large CodeEntryAlignment
int max_aligned_stubs = 100;
int size = code_size2 + CodeEntryAlignment * max_aligned_stubs;
_code2 = BufferBlob::create("StubRoutines (2)", size);
if (_code2 == NULL) {
if (_code2 == nullptr) {
vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)");
}
CodeBuffer buffer(_code2);
@ -320,7 +320,7 @@ void StubRoutines::initialize2() {
#undef TEST_ARRAYCOPY
#define TEST_FILL(type) \
if (_##type##_fill != NULL) { \
if (_##type##_fill != nullptr) { \
union { \
double d; \
type body[96]; \
@ -507,11 +507,11 @@ address StubRoutines::select_fill_function(BasicType t, bool aligned, const char
case T_ADDRESS:
case T_VOID:
// Currently unsupported
return NULL;
return nullptr;
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
#undef RETURN_STUB
@ -584,7 +584,7 @@ StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint
}
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
#undef RETURN_STUB
@ -593,21 +593,21 @@ StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint
UnsafeCopyMemoryMark::UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
_cgen = cgen;
_ucm_entry = NULL;
_ucm_entry = nullptr;
if (add_entry) {
address err_exit_pc = NULL;
address err_exit_pc = nullptr;
if (!continue_at_scope_end) {
err_exit_pc = error_exit_pc != NULL ? error_exit_pc : UnsafeCopyMemory::common_exit_stub_pc();
err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeCopyMemory::common_exit_stub_pc();
}
assert(err_exit_pc != NULL || continue_at_scope_end, "error exit not set");
_ucm_entry = UnsafeCopyMemory::add_to_table(_cgen->assembler()->pc(), NULL, err_exit_pc);
assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
_ucm_entry = UnsafeCopyMemory::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
}
}
UnsafeCopyMemoryMark::~UnsafeCopyMemoryMark() {
if (_ucm_entry != NULL) {
if (_ucm_entry != nullptr) {
_ucm_entry->set_end_pc(_cgen->assembler()->pc());
if (_ucm_entry->error_exit_pc() == NULL) {
if (_ucm_entry->error_exit_pc() == nullptr) {
_ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,7 +85,7 @@ class UnsafeCopyMemory : public CHeapObj<mtCode> {
static UnsafeCopyMemory* _table;
static int _table_length;
static int _table_max_length;
UnsafeCopyMemory() : _start_pc(NULL), _end_pc(NULL), _error_exit_pc(NULL) {}
UnsafeCopyMemory() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {}
void set_start_pc(address pc) { _start_pc = pc; }
void set_end_pc(address pc) { _end_pc = pc; }
void set_error_exit_pc(address pc) { _error_exit_pc = pc; }
@ -117,7 +117,7 @@ class UnsafeCopyMemoryMark : public StackObj {
UnsafeCopyMemory* _ucm_entry;
StubCodeGenerator* _cgen;
public:
UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = NULL);
UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr);
~UnsafeCopyMemoryMark();
};
@ -270,8 +270,8 @@ class StubRoutines: AllStatic {
static bool contains(address addr) {
return
(_code1 != NULL && _code1->blob_contains(addr)) ||
(_code2 != NULL && _code2->blob_contains(addr)) ;
(_code1 != nullptr && _code1->blob_contains(addr)) ||
(_code2 != nullptr && _code2->blob_contains(addr)) ;
}
static RuntimeBlob* code1() { return _code1; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,11 +126,11 @@ size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
elapsedTimer* timer_p,
GrowableArray<ObjectMonitor*>* unlinked_list) {
size_t unlinked_count = 0;
ObjectMonitor* prev = NULL;
ObjectMonitor* prev = nullptr;
ObjectMonitor* head = Atomic::load_acquire(&_head);
ObjectMonitor* m = head;
// The in-use list head can be NULL during the final audit.
while (m != NULL) {
// The in-use list head can be null during the final audit.
while (m != nullptr) {
if (m->is_being_async_deflated()) {
// Find next live ObjectMonitor.
ObjectMonitor* next = m;
@ -143,8 +143,8 @@ size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls,
// Reached the max so bail out on the gathering loop.
break;
}
} while (next != NULL && next->is_being_async_deflated());
if (prev == NULL) {
} while (next != nullptr && next->is_being_async_deflated());
if (prev == nullptr) {
ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, head, next);
if (prev_head != head) {
// Find new prev ObjectMonitor that just got inserted.
@ -201,11 +201,11 @@ ObjectMonitor* MonitorList::Iterator::next() {
// TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
#define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
char* bytes = NULL; \
char* bytes = nullptr; \
int len = 0; \
jlong jtid = SharedRuntime::get_java_tid(thread); \
Symbol* klassname = obj->klass()->name(); \
if (klassname != NULL) { \
if (klassname != nullptr) { \
bytes = (char*)klassname->bytes(); \
len = klassname->utf8_length(); \
}
@ -308,7 +308,7 @@ static uintx _no_progress_cnt = 0;
bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) {
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // slow-path for invalid obj
if (obj == nullptr) return false; // slow-path for invalid obj
const markWord mark = obj->mark();
if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) {
@ -322,7 +322,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al
assert(mon->object() == oop(obj), "invariant");
if (mon->owner() != current) return false; // slow-path for IMS exception
if (mon->first_waiter() != NULL) {
if (mon->first_waiter() != nullptr) {
// We have one or more waiters. Since this is an inflated monitor
// that we own, we can transfer one or more threads from the waitset
// to the entrylist here and now, avoiding the slow-path.
@ -335,7 +335,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al
do {
mon->INotify(current);
++free_count;
} while (mon->first_waiter() != NULL && all);
} while (mon->first_waiter() != nullptr && all);
OM_PERFDATA_OP(Notifications, inc(free_count));
}
return true;
@ -356,7 +356,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
BasicLock * lock) {
assert(current->thread_state() == _thread_in_Java, "invariant");
NoSafepointVerifier nsv;
if (obj == NULL) return false; // Need to throw NPE
if (obj == nullptr) return false; // Need to throw NPE
if (obj->klass()->is_value_based()) {
return false;
@ -369,7 +369,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
// An async deflation or GC can race us before we manage to make
// the ObjectMonitor busy by setting the owner below. If we detect
// that race we just bail out to the slow-path here.
if (m->object_peek() == NULL) {
if (m->object_peek() == nullptr) {
return false;
}
JavaThread* const owner = static_cast<JavaThread*>(m->owner_raw());
@ -387,7 +387,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
// This Java Monitor is inflated so obj's header will never be
// displaced to this thread's BasicLock. Make the displaced header
// non-NULL so this BasicLock is not seen as recursive nor as
// non-null so this BasicLock is not seen as recursive nor as
// being locked. We do this unconditionally so that this thread's
// BasicLock cannot be mis-interpreted by any stack walkers. For
// performance reasons, stack walkers generally first check for
@ -396,7 +396,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
// and last are the inflated Java Monitor (ObjectMonitor) checks.
lock->set_displaced_header(markWord::unused_mark());
if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) {
if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) {
assert(m->_recursions == 0, "invariant");
current->inc_held_monitor_count();
return true;
@ -432,7 +432,7 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread
current->print_active_stack_on(&ss);
char* base = (char*)strstr(ss.base(), "at");
char* newline = (char*)strchr(ss.base(), '\n');
if (newline != NULL) {
if (newline != nullptr) {
*newline = '\0';
}
fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
@ -496,7 +496,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
current->is_lock_owned((address)mark.locker())) {
assert(lock != mark.locker(), "must not re-lock the same lock");
assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
lock->set_displaced_header(markWord::from_pointer(NULL));
lock->set_displaced_header(markWord::from_pointer(nullptr));
return;
}
@ -528,7 +528,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current)
markWord dhw = lock->displaced_header();
if (dhw.value() == 0) {
// If the displaced header is NULL, then this exit matches up with
// If the displaced header is null, then this exit matches up with
// a recursive enter. No real work to do here except for diagnostics.
#ifndef PRODUCT
if (mark != markWord::INFLATING()) {
@ -658,13 +658,13 @@ ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) {
_thread->check_for_valid_safepoint_state();
_obj = obj;
if (_obj() != NULL) {
if (_obj() != nullptr) {
ObjectSynchronizer::enter(_obj, &_lock, _thread);
}
}
ObjectLocker::~ObjectLocker() {
if (_obj() != NULL) {
if (_obj() != nullptr) {
ObjectSynchronizer::exit(_obj(), &_lock, _thread);
}
}
@ -859,7 +859,7 @@ static inline intptr_t get_next_hash(Thread* current, oop obj) {
intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) {
while (true) {
ObjectMonitor* monitor = NULL;
ObjectMonitor* monitor = nullptr;
markWord temp, test;
intptr_t hash;
markWord mark = read_stable_mark(obj);
@ -993,7 +993,7 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current,
JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
oop obj = h_obj();
address owner = NULL;
address owner = nullptr;
markWord mark = read_stable_mark(obj);
@ -1007,12 +1007,12 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
// The first stage of async deflation does not affect any field
// used by this comparison so the ObjectMonitor* is usable here.
ObjectMonitor* monitor = mark.monitor();
assert(monitor != NULL, "monitor should be non-null");
assert(monitor != nullptr, "monitor should be non-null");
owner = (address) monitor->owner();
}
if (owner != NULL) {
// owning_thread_from_monitor_owner() may also return NULL here
if (owner != nullptr) {
// owning_thread_from_monitor_owner() may also return null here
return Threads::owning_thread_from_monitor_owner(t_list, owner);
}
@ -1021,7 +1021,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob
// locked by another thread when reaching here.
// assert(mark.is_neutral(), "sanity check");
return NULL;
return nullptr;
}
// Visitors ...
@ -1040,7 +1040,7 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* t
// is set to a stack lock address in the target thread.
continue;
}
if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
// Only process with closure if the object is set.
// monitors_iterate() is only called at a safepoint or when the
@ -1065,7 +1065,7 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure,
ObjectMonitor* mid = *iter.next();
// Owner set to a stack lock address in thread should never be seen here:
assert(mid->owner() == thread, "must be");
if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) {
if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) {
// Only process with closure if the object is set.
// monitors_iterate() is only called at a safepoint or when the
@ -1181,7 +1181,7 @@ jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
const oop obj,
ObjectSynchronizer::InflateCause cause) {
assert(event != NULL, "invariant");
assert(event != nullptr, "invariant");
event->set_monitorClass(obj->klass());
event->set_address((uintptr_t)(void*)obj);
event->set_cause((u1)cause);
@ -1298,8 +1298,8 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// with this thread we could simply set m->_owner = current.
// Note that a thread can inflate an object
// that it has stack-locked -- as might happen in wait() -- directly
// with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
m->set_owner_from(NULL, mark.locker());
// with CAS. That is, we can avoid the xchg-nullptr .... ST idiom.
m->set_owner_from(nullptr, mark.locker());
// TODO-FIXME: assert BasicLock->dhw != 0.
// Must preserve store ordering. The monitor state must
@ -1333,7 +1333,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// pre-locked ObjectMonitor pointer into the object header. A successful
// CAS inflates the object *and* confers ownership to the inflating thread.
// In the current implementation we use a 2-step mechanism where we CAS()
// to inflate and then CAS() again to try to swing _owner from NULL to current.
// to inflate and then CAS() again to try to swing _owner from null to current.
// An inflateTry() method that we could call from enter() would be useful.
// Catch if the object's header is not neutral (not locked and
@ -1345,7 +1345,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
delete m;
m = NULL;
m = nullptr;
continue;
// interference - the markword changed - just retry.
// The state-transitions are one-way, so there's no chance of
@ -1380,7 +1380,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n
}
// A safepoint/handshake has started.
if (ls != NULL) {
if (ls != nullptr) {
timer_p->stop();
ls->print_cr("pausing %s: %s=" SIZE_FORMAT ", in_use_list stats: ceiling="
SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
@ -1393,7 +1393,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n
ThreadBlockInVM tbivm(current);
}
if (ls != NULL) {
if (ls != nullptr) {
ls->print_cr("resuming %s: in_use_list stats: ceiling=" SIZE_FORMAT
", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, op_name,
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
@ -1471,7 +1471,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
LogStreamHandle(Debug, monitorinflation) lsh_debug;
LogStreamHandle(Info, monitorinflation) lsh_info;
LogStream* ls = NULL;
LogStream* ls = nullptr;
if (log_is_enabled(Debug, monitorinflation)) {
ls = &lsh_debug;
} else if (log_is_enabled(Info, monitorinflation)) {
@ -1479,7 +1479,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
}
elapsedTimer timer;
if (ls != NULL) {
if (ls != nullptr) {
ls->print_cr("begin deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
timer.start();
@ -1500,7 +1500,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
GrowableArray<ObjectMonitor*> delete_list((int)deflated_count);
unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, &delete_list);
if (current->is_Java_thread()) {
if (ls != NULL) {
if (ls != nullptr) {
timer.stop();
ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT
", in_use_list stats: ceiling=" SIZE_FORMAT ", count="
@ -1514,7 +1514,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
HandshakeForDeflation hfd_hc;
Handshake::execute(&hfd_hc);
if (ls != NULL) {
if (ls != nullptr) {
ls->print_cr("after handshaking: in_use_list stats: ceiling="
SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT,
in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max());
@ -1537,7 +1537,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table)
assert(unlinked_count == deleted_count, "must be");
}
if (ls != NULL) {
if (ls != nullptr) {
timer.stop();
if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) {
ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs",
@ -1681,7 +1681,7 @@ void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
LogStreamHandle(Debug, monitorinflation) lsh_debug;
LogStreamHandle(Info, monitorinflation) lsh_info;
LogStreamHandle(Trace, monitorinflation) lsh_trace;
LogStream* ls = NULL;
LogStream* ls = nullptr;
if (log_is_enabled(Trace, monitorinflation)) {
ls = &lsh_trace;
} else if (log_is_enabled(Debug, monitorinflation)) {
@ -1689,7 +1689,7 @@ void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
} else if (log_is_enabled(Info, monitorinflation)) {
ls = &lsh_info;
}
assert(ls != NULL, "sanity check");
assert(ls != nullptr, "sanity check");
int error_cnt = 0;
@ -1760,11 +1760,11 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out,
}
if (n->header().value() == 0) {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must "
"have non-NULL _header field.", p2i(n));
"have non-null _header field.", p2i(n));
*error_cnt_p = *error_cnt_p + 1;
}
const oop obj = n->object_peek();
if (obj != NULL) {
if (obj != nullptr) {
const markWord mark = obj->mark();
if (!mark.has_monitor()) {
out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's "
@ -1802,8 +1802,8 @@ void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out) {
const markWord mark = mid->header();
ResourceMark rm;
out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(mid),
mid->is_busy(), mark.hash() != 0, mid->owner() != NULL,
p2i(obj), obj == NULL ? "" : obj->klass()->external_name());
mid->is_busy(), mark.hash() != 0, mid->owner() != nullptr,
p2i(obj), obj == nullptr ? "" : obj->klass()->external_name());
if (mid->is_busy()) {
out->print(" (%s)", mid->is_busy_to_string(&ss));
ss.reset();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,7 +109,7 @@ class MonitorList::Iterator {
public:
Iterator(ObjectMonitor* head) : _current(head) {}
bool has_next() const { return _current != NULL; }
bool has_next() const { return _current != nullptr; }
ObjectMonitor* next();
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ void PeriodicTask::enroll() {
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock);
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock);
if (_num_tasks == PeriodicTask::max_tasks) {
fatal("Overflow in PeriodicTask table");
@ -95,7 +95,7 @@ void PeriodicTask::enroll() {
}
WatcherThread* thread = WatcherThread::watcher_thread();
if (thread != NULL) {
if (thread != nullptr) {
thread->unpark();
} else {
WatcherThread::start();
@ -108,7 +108,7 @@ void PeriodicTask::disenroll() {
// not already own the PeriodicTask_lock. Otherwise, we don't try to
// enter it again because VM internal Mutexes do not support recursion.
//
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock);
MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock);
int index;
for(index = 0; index < _num_tasks && _tasks[index] != this; index++)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -54,7 +54,7 @@
#ifndef USE_LIBRARY_BASED_TLS_ONLY
// Current thread is maintained as a thread-local variable
THREAD_LOCAL Thread* Thread::_thr_current = NULL;
THREAD_LOCAL Thread* Thread::_thr_current = nullptr;
#endif
// ======= Thread ========
@ -70,31 +70,31 @@ void Thread::operator delete(void* p) {
// Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread,
// JavaThread
DEBUG_ONLY(Thread* Thread::_starting_thread = NULL;)
DEBUG_ONLY(Thread* Thread::_starting_thread = nullptr;)
Thread::Thread() {
DEBUG_ONLY(_run_state = PRE_CALL_RUN;)
// stack and get_thread
set_stack_base(NULL);
set_stack_base(nullptr);
set_stack_size(0);
set_lgrp_id(-1);
DEBUG_ONLY(clear_suspendible_thread();)
// allocated data structures
set_osthread(NULL);
set_osthread(nullptr);
set_resource_area(new (mtThread)ResourceArea());
DEBUG_ONLY(_current_resource_mark = NULL;)
set_handle_area(new (mtThread) HandleArea(NULL));
DEBUG_ONLY(_current_resource_mark = nullptr;)
set_handle_area(new (mtThread) HandleArea(nullptr));
set_metadata_handles(new (mtClass) GrowableArray<Metadata*>(30, mtClass));
set_last_handle_mark(NULL);
DEBUG_ONLY(_missed_ic_stub_refill_verifier = NULL);
set_last_handle_mark(nullptr);
DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr);
// Initial value of zero ==> never claimed.
_threads_do_token = 0;
_threads_hazard_ptr = NULL;
_threads_list_ptr = NULL;
_threads_hazard_ptr = nullptr;
_threads_list_ptr = nullptr;
_nested_threads_hazard_ptr_cnt = 0;
_rcu_counter = 0;
@ -102,11 +102,11 @@ Thread::Thread() {
new HandleMark(this);
// plain initialization
debug_only(_owned_locks = NULL;)
debug_only(_owned_locks = nullptr;)
NOT_PRODUCT(_skip_gcalot = false;)
_jvmti_env_iteration_count = 0;
set_allocated_bytes(0);
_current_pending_raw_monitor = NULL;
_current_pending_raw_monitor = nullptr;
// thread-specific hashCode stream generator state - Marsaglia shift-xor form
_hashStateX = os::random();
@ -134,14 +134,14 @@ Thread::Thread() {
// BarrierSet::on_thread_create() for this thread is therefore deferred
// to BarrierSet::set_barrier_set().
BarrierSet* const barrier_set = BarrierSet::barrier_set();
if (barrier_set != NULL) {
if (barrier_set != nullptr) {
barrier_set->on_thread_create(this);
} else {
// Only the main thread should be created before the barrier set
// and that happens just before Thread::current is set. No other thread
// can attach as the VM is not created yet, so they can't execute this code.
// If the main thread creates other threads before the barrier set that is an error.
assert(Thread::current_or_null() == NULL, "creating thread before barrier set");
assert(Thread::current_or_null() == nullptr, "creating thread before barrier set");
}
MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false));
@ -155,10 +155,10 @@ void Thread::initialize_tlab() {
void Thread::initialize_thread_current() {
#ifndef USE_LIBRARY_BASED_TLS_ONLY
assert(_thr_current == NULL, "Thread::current already initialized");
assert(_thr_current == nullptr, "Thread::current already initialized");
_thr_current = this;
#endif
assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized");
assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized");
ThreadLocalStorage::set_thread(this);
assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
}
@ -166,9 +166,9 @@ void Thread::initialize_thread_current() {
void Thread::clear_thread_current() {
assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!");
#ifndef USE_LIBRARY_BASED_TLS_ONLY
_thr_current = NULL;
_thr_current = nullptr;
#endif
ThreadLocalStorage::set_thread(NULL);
ThreadLocalStorage::set_thread(nullptr);
}
void Thread::record_stack_base_and_size() {
@ -199,7 +199,7 @@ void Thread::call_run() {
// At this point, Thread object should be fully initialized and
// Thread::current() should be set.
assert(Thread::current_or_null() != NULL, "current thread is unset");
assert(Thread::current_or_null() != nullptr, "current thread is unset");
assert(Thread::current_or_null() == this, "current thread is wrong");
// Perform common initialization actions
@ -226,7 +226,7 @@ void Thread::call_run() {
// Perform common tear-down actions
assert(Thread::current_or_null() != NULL, "current thread is unset");
assert(Thread::current_or_null() != nullptr, "current thread is unset");
assert(Thread::current_or_null() == this, "current thread is wrong");
// Perform <ChildClass> tear-down actions
@ -239,7 +239,7 @@ void Thread::call_run() {
// asynchronously with respect to its termination - that is what _run_state can
// be used to check.
assert(Thread::current_or_null() == NULL, "current thread still present");
assert(Thread::current_or_null() == nullptr, "current thread still present");
}
Thread::~Thread() {
@ -254,7 +254,7 @@ Thread::~Thread() {
// Notify the barrier set that a thread is being destroyed. Note that a barrier
// set might not be available if we encountered errors during bootstrapping.
BarrierSet* const barrier_set = BarrierSet::barrier_set();
if (barrier_set != NULL) {
if (barrier_set != nullptr) {
barrier_set->on_thread_destroy(this);
}
@ -262,19 +262,19 @@ Thread::~Thread() {
delete resource_area();
// since the handle marks are using the handle area, we have to deallocated the root
// handle mark before deallocating the thread's handle area,
assert(last_handle_mark() != NULL, "check we have an element");
assert(last_handle_mark() != nullptr, "check we have an element");
delete last_handle_mark();
assert(last_handle_mark() == NULL, "check we have reached the end");
assert(last_handle_mark() == nullptr, "check we have reached the end");
ParkEvent::Release(_ParkEvent);
// Set to NULL as a termination indicator for has_terminated().
Atomic::store(&_ParkEvent, (ParkEvent*)NULL);
// Set to null as a termination indicator for has_terminated().
Atomic::store(&_ParkEvent, (ParkEvent*)nullptr);
delete handle_area();
delete metadata_handles();
// osthread() can be NULL, if creation of thread failed.
if (osthread() != NULL) os::free_thread(osthread());
// osthread() can be nullptr, if creation of thread failed.
if (osthread() != nullptr) os::free_thread(osthread());
// Clear Thread::current if thread is deleting itself and it has not
// already been done. This must be done before the memory is deallocated.
@ -315,7 +315,7 @@ bool Thread::is_JavaThread_protected(const JavaThread* target) {
// If the target hasn't been started yet then it is trivially
// "protected". We assume the caller is the thread that will do
// the starting.
if (target->osthread() == NULL || target->osthread()->get_state() <= INITIALIZED) {
if (target->osthread() == nullptr || target->osthread()->get_state() <= INITIALIZED) {
return true;
}
@ -357,7 +357,7 @@ bool Thread::is_JavaThread_protected_by_TLH(const JavaThread* target) {
// Check the ThreadsLists associated with the calling thread (if any)
// to see if one of them protects the target JavaThread:
for (SafeThreadsListPtr* stlp = current_thread->_threads_list_ptr;
stlp != NULL; stlp = stlp->previous()) {
stlp != nullptr; stlp = stlp->previous()) {
if (stlp->list()->includes(target)) {
// The target JavaThread is protected by this ThreadsList:
return true;
@ -417,17 +417,17 @@ public:
Thread* self = Thread::current();
if (self->is_Named_thread()) {
_cur_thr = (NamedThread *)self;
assert(_cur_thr->processed_thread() == NULL, "nesting not supported");
assert(_cur_thr->processed_thread() == nullptr, "nesting not supported");
_cur_thr->set_processed_thread(thread);
} else {
_cur_thr = NULL;
_cur_thr = nullptr;
}
}
~RememberProcessedThread() {
if (_cur_thr) {
assert(_cur_thr->processed_thread() != NULL, "nesting not supported");
_cur_thr->set_processed_thread(NULL);
assert(_cur_thr->processed_thread() != nullptr, "nesting not supported");
_cur_thr->set_processed_thread(nullptr);
}
}
};
@ -441,7 +441,7 @@ void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
void Thread::metadata_handles_do(void f(Metadata*)) {
// Only walk the Handles in Thread.
if (metadata_handles() != NULL) {
if (metadata_handles() != nullptr) {
for (int i = 0; i< metadata_handles()->length(); i++) {
f(metadata_handles()->at(i));
}
@ -450,7 +450,7 @@ void Thread::metadata_handles_do(void f(Metadata*)) {
void Thread::print_on(outputStream* st, bool print_extended_info) const {
// get_priority assumes osthread initialized
if (osthread() != NULL) {
if (osthread() != nullptr) {
int os_prio;
if (os::get_native_priority(this, &os_prio) == OS_OK) {
st->print("os_prio=%d ", os_prio);
@ -491,7 +491,7 @@ void Thread::print_on_error(outputStream* st, char* buf, int buflen) const {
st->print("%s \"%s\"", type_name(), name());
OSThread* os_thr = osthread();
if (os_thr != NULL) {
if (os_thr != nullptr) {
if (os_thr->get_state() != ZOMBIE) {
st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]",
p2i(stack_end()), p2i(stack_base()));
@ -515,7 +515,7 @@ void Thread::print_value_on(outputStream* st) const {
#ifdef ASSERT
void Thread::print_owned_locks_on(outputStream* st) const {
Mutex* cur = _owned_locks;
if (cur == NULL) {
if (cur == nullptr) {
st->print(" (no locks) ");
} else {
st->print_cr(" Locks owned:");
@ -537,7 +537,7 @@ bool Thread::is_lock_owned(address adr) const {
}
bool Thread::set_as_starting_thread() {
assert(_starting_thread == NULL, "already initialized: "
assert(_starting_thread == nullptr, "already initialized: "
"_starting_thread=" INTPTR_FORMAT, p2i(_starting_thread));
// NOTE: this must be called inside the main thread.
DEBUG_ONLY(_starting_thread = this;)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -345,11 +345,11 @@ class Thread: public ThreadShadow {
// and logging.
virtual const char* type_name() const { return "Thread"; }
// Returns the current thread (ASSERTS if NULL)
// Returns the current thread (ASSERTS if nullptr)
static inline Thread* current();
// Returns the current thread, or NULL if not attached
// Returns the current thread, or null if not attached
static inline Thread* current_or_null();
// Returns the current thread, or NULL if not attached, and is
// Returns the current thread, or null if not attached, and is
// safe for use from signal-handlers
static inline Thread* current_or_null_safe();
@ -435,7 +435,7 @@ class Thread: public ThreadShadow {
// GC support
// Apply "f->do_oop" to all root oops in "this".
// Used by JavaThread::oops_do.
// Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
// Apply "cf->do_code_blob" (if !nullptr) to all code blobs active in frames
virtual void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf);
virtual void oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {}
void oops_do(OopClosure* f, CodeBlobClosure* cf);
@ -535,7 +535,7 @@ protected:
public:
// Stack overflow support
address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; }
address stack_base() const { assert(_stack_base != nullptr,"Sanity check"); return _stack_base; }
void set_stack_base(address base) { _stack_base = base; }
size_t stack_size() const { return _stack_size; }
void set_stack_size(size_t size) { _stack_size = size; }
@ -569,7 +569,7 @@ protected:
void print_owned_locks_on(outputStream* st) const;
void print_owned_locks() const { print_owned_locks_on(tty); }
Mutex* owned_locks() const { return _owned_locks; }
bool owns_locks() const { return owned_locks() != NULL; }
bool owns_locks() const { return owned_locks() != nullptr; }
// Deadlock detection
ResourceMark* current_resource_mark() { return _current_resource_mark; }
@ -605,9 +605,9 @@ protected:
// and ObjectSynchronizer::read_stable_mark
// Termination indicator used by the signal handler.
// _ParkEvent is just a convenient field we can NULL out after setting the JavaThread termination state
// _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state
// (which can't itself be read from the signal handler if a signal hits during the Thread destructor).
bool has_terminated() { return Atomic::load(&_ParkEvent) == NULL; };
bool has_terminated() { return Atomic::load(&_ParkEvent) == nullptr; };
jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG
jint _hashStateX; // thread-specific hashCode generator state
@ -636,7 +636,7 @@ protected:
// Inline implementation of Thread::current()
inline Thread* Thread::current() {
Thread* current = current_or_null();
assert(current != NULL, "Thread::current() called on detached thread");
assert(current != nullptr, "Thread::current() called on detached thread");
return current;
}
@ -647,7 +647,7 @@ inline Thread* Thread::current_or_null() {
if (ThreadLocalStorage::is_initialized()) {
return ThreadLocalStorage::thread();
}
return NULL;
return nullptr;
#endif
}
@ -655,7 +655,7 @@ inline Thread* Thread::current_or_null_safe() {
if (ThreadLocalStorage::is_initialized()) {
return ThreadLocalStorage::thread();
}
return NULL;
return nullptr;
}
#endif // SHARE_RUNTIME_THREAD_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -127,7 +127,7 @@ volatile uint ThreadsSMRSupport::_tlh_time_max = 0;
// isn't available everywhere (or is it?).
volatile uint ThreadsSMRSupport::_tlh_times = 0;
ThreadsList* ThreadsSMRSupport::_to_delete_list = NULL;
ThreadsList* ThreadsSMRSupport::_to_delete_list = nullptr;
// # of parallel ThreadsLists on the to-delete list.
// Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so
@ -209,7 +209,7 @@ class ThreadScanHashtable : public CHeapObj<mtThread> {
bool has_entry(void *pointer) {
int *val_ptr = _ptrs->get(pointer);
return val_ptr != NULL && *val_ptr == 1;
return val_ptr != nullptr && *val_ptr == 1;
}
void add_entry(void *pointer) {
@ -253,16 +253,16 @@ class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure {
virtual void do_thread(Thread *thread) {
assert_locked_or_safepoint(Threads_lock);
if (thread == NULL) return;
if (thread == nullptr) return;
// This code races with ThreadsSMRSupport::acquire_stable_list() which
// is lock-free so we have to handle some special situations.
//
ThreadsList *current_list = NULL;
ThreadsList *current_list = nullptr;
while (true) {
current_list = thread->get_threads_hazard_ptr();
// No hazard ptr so nothing more to do.
if (current_list == NULL) {
if (current_list == nullptr) {
return;
}
@ -277,7 +277,7 @@ class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure {
// thread will retry the attempt to publish a stable hazard ptr.
// If we lose the race, then we retry our attempt to look at the
// hazard ptr.
if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return;
if (thread->cmpxchg_threads_hazard_ptr(nullptr, current_list) == current_list) return;
}
assert(ThreadsList::is_valid(current_list), "current_list="
@ -308,9 +308,9 @@ class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure {
virtual void do_thread(Thread* thread) {
assert_locked_or_safepoint(Threads_lock);
if (thread == NULL) return;
if (thread == nullptr) return;
ThreadsList *hazard_ptr = thread->get_threads_hazard_ptr();
if (hazard_ptr == NULL) return;
if (hazard_ptr == nullptr) return;
#ifdef ASSERT
if (!Thread::is_hazard_ptr_tagged(hazard_ptr)) {
// We only validate hazard_ptrs that are not tagged since a tagged
@ -344,9 +344,9 @@ class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
virtual void do_thread(Thread *thread) {
assert_locked_or_safepoint(Threads_lock);
if (thread == NULL) return;
if (thread == nullptr) return;
ThreadsList *current_list = thread->get_threads_hazard_ptr();
if (current_list == NULL) {
if (current_list == nullptr) {
return;
}
// If the hazard ptr is unverified, then ignore it.
@ -358,7 +358,7 @@ class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure {
// the hazard ptr is protecting all the JavaThreads on that
// ThreadsList, but we only care about matching a specific JavaThread.
JavaThreadIterator jti(current_list);
for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) {
for (JavaThread *p = jti.first(); p != nullptr; p = jti.next()) {
if (p == _thread) {
log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread));
break;
@ -376,9 +376,9 @@ class ValidateHazardPtrsClosure : public ThreadClosure {
virtual void do_thread(Thread* thread) {
assert_locked_or_safepoint(Threads_lock);
if (thread == NULL) return;
if (thread == nullptr) return;
ThreadsList *hazard_ptr = thread->get_threads_hazard_ptr();
if (hazard_ptr == NULL) return;
if (hazard_ptr == nullptr) return;
// If the hazard ptr is unverified, then ignore it since it could
// be deleted at any time now.
if (Thread::is_hazard_ptr_tagged(hazard_ptr)) return;
@ -412,12 +412,12 @@ class VerifyHazardPtrThreadClosure : public ThreadClosure {
// Acquire a stable ThreadsList.
//
void SafeThreadsListPtr::acquire_stable_list() {
assert(_thread != NULL, "sanity check");
assert(_thread != nullptr, "sanity check");
_needs_release = true;
_previous = _thread->_threads_list_ptr;
_thread->_threads_list_ptr = this;
if (_thread->get_threads_hazard_ptr() == NULL && _previous == NULL) {
if (_thread->get_threads_hazard_ptr() == nullptr && _previous == nullptr) {
// The typical case is first.
acquire_stable_list_fast_path();
return;
@ -430,8 +430,8 @@ void SafeThreadsListPtr::acquire_stable_list() {
// Fast path way to acquire a stable ThreadsList.
//
void SafeThreadsListPtr::acquire_stable_list_fast_path() {
assert(_thread != NULL, "sanity check");
assert(_thread->get_threads_hazard_ptr() == NULL, "sanity check");
assert(_thread != nullptr, "sanity check");
assert(_thread->get_threads_hazard_ptr() == nullptr, "sanity check");
ThreadsList* threads;
@ -483,7 +483,7 @@ void SafeThreadsListPtr::acquire_stable_list_fast_path() {
// reference counting.
//
void SafeThreadsListPtr::acquire_stable_list_nested_path() {
assert(_thread != NULL, "sanity check");
assert(_thread != nullptr, "sanity check");
// The thread already has a hazard ptr (ThreadsList ref) so we need
// to create a nested ThreadsListHandle with the current ThreadsList
@ -504,7 +504,7 @@ void SafeThreadsListPtr::acquire_stable_list_nested_path() {
}
// Clear the hazard ptr so we can go through the fast path below and
// acquire a nested stable ThreadsList.
_thread->set_threads_hazard_ptr(NULL);
_thread->set_threads_hazard_ptr(nullptr);
if (EnableThreadSMRStatistics && _thread->nested_threads_hazard_ptr_cnt() > ThreadsSMRSupport::_nested_thread_list_max) {
ThreadsSMRSupport::_nested_thread_list_max = _thread->nested_threads_hazard_ptr_cnt();
@ -520,15 +520,15 @@ void SafeThreadsListPtr::acquire_stable_list_nested_path() {
// Release a stable ThreadsList.
//
void SafeThreadsListPtr::release_stable_list() {
assert(_thread != NULL, "sanity check");
assert(_thread != nullptr, "sanity check");
assert(_thread->_threads_list_ptr == this, "sanity check");
_thread->_threads_list_ptr = _previous;
// We're releasing either a leaf or nested ThreadsListHandle. In either
// case, we set this thread's hazard ptr back to NULL and we do it before
// case, we set this thread's hazard ptr back to null and we do it before
// _nested_handle_cnt is decremented below.
_thread->set_threads_hazard_ptr(NULL);
if (_previous != NULL) {
_thread->set_threads_hazard_ptr(nullptr);
if (_previous != nullptr) {
// The ThreadsListHandle being released is a nested ThreadsListHandle.
if (EnableThreadSMRStatistics) {
_thread->dec_nested_threads_hazard_ptr_cnt();
@ -559,7 +559,7 @@ void SafeThreadsListPtr::release_stable_list() {
// An exiting thread might be waiting in smr_delete(); we need to
// check with delete_lock to be sure.
ThreadsSMRSupport::release_stable_list_wake_up(_has_ref_count);
assert(_previous == NULL || ThreadsList::is_valid(_previous->_list),
assert(_previous == nullptr || ThreadsList::is_valid(_previous->_list),
"_previous->_list=" INTPTR_FORMAT
" is not valid after calling release_stable_list_wake_up!",
p2i(_previous->_list));
@ -571,7 +571,7 @@ void SafeThreadsListPtr::release_stable_list() {
// the Thread-SMR protocol.
void SafeThreadsListPtr::verify_hazard_ptr_scanned() {
#ifdef ASSERT
assert(_list != NULL, "_list must not be NULL");
assert(_list != nullptr, "_list must not be null");
if (ThreadsSMRSupport::is_bootstrap_list(_list)) {
// We are early in VM bootstrapping so nothing to do here.
@ -620,16 +620,16 @@ void SafeThreadsListPtr::verify_hazard_ptr_scanned() {
// Shared singleton data for all ThreadsList(0) instances.
// Used by _bootstrap_list to avoid static init time heap allocation.
// No real entries, just the final NULL terminator.
// No real entries, just the final nullptr terminator.
static JavaThread* const empty_threads_list_data[1] = {};
// Result has 'entries + 1' elements, with the last being the NULL terminator.
// Result has 'entries + 1' elements, with the last being the null terminator.
static JavaThread* const* make_threads_list_data(int entries) {
if (entries == 0) {
return empty_threads_list_data;
}
JavaThread** data = NEW_C_HEAP_ARRAY(JavaThread*, entries + 1, mtThread);
data[entries] = NULL; // Make sure the final entry is NULL.
data[entries] = nullptr; // Make sure the final entry is null.
return data;
}
@ -659,7 +659,7 @@ void ThreadsList::Iterator::assert_same_list(Iterator i) const {
ThreadsList::ThreadsList(int entries) :
_magic(THREADS_LIST_MAGIC),
_length(entries),
_next_list(NULL),
_next_list(nullptr),
_threads(make_threads_list_data(entries)),
_nested_handle_cnt(0)
{}
@ -693,7 +693,7 @@ void ThreadsList::dec_nested_handle_cnt() {
}
int ThreadsList::find_index_of_JavaThread(JavaThread *target) {
if (target == NULL) {
if (target == nullptr) {
return -1;
}
for (uint i = 0; i < length(); i++) {
@ -707,7 +707,7 @@ int ThreadsList::find_index_of_JavaThread(JavaThread *target) {
JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
ThreadIdTable::lazy_initialize(this);
JavaThread* thread = ThreadIdTable::find_thread_by_tid(java_tid);
if (thread == NULL) {
if (thread == nullptr) {
// If the thread is not found in the table find it
// with a linear search and add to the table.
for (uint i = 0; i < length(); i++) {
@ -715,7 +715,7 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
oop tobj = thread->threadObj();
// Ignore the thread if it hasn't run yet, has exited
// or is starting to exit.
if (tobj != NULL && java_tid == java_lang_Thread::thread_id(tobj)) {
if (tobj != nullptr && java_tid == java_lang_Thread::thread_id(tobj)) {
MutexLocker ml(Threads_lock);
// Must be inside the lock to ensure that we don't add a thread to the table
// that has just passed the removal point in Threads::remove().
@ -728,7 +728,7 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const {
} else if (!thread->is_exiting()) {
return thread;
}
return NULL;
return nullptr;
}
void ThreadsList::inc_nested_handle_cnt() {
@ -736,7 +736,7 @@ void ThreadsList::inc_nested_handle_cnt() {
}
bool ThreadsList::includes(const JavaThread * const p) const {
if (p == NULL) {
if (p == nullptr) {
return false;
}
for (uint i = 0; i < length(); i++) {
@ -790,7 +790,7 @@ ThreadsListHandle::~ThreadsListHandle() {
// associated ThreadsList. This ThreadsListHandle "protects" the
// returned JavaThread *.
//
// If thread_oop_p is not NULL, then the caller wants to use the oop
// If thread_oop_p is not null, then the caller wants to use the oop
// after this call so the oop is returned. On success, *jt_pp is set
// to the converted JavaThread * and true is returned. On error,
// returns false.
@ -798,25 +798,25 @@ ThreadsListHandle::~ThreadsListHandle() {
bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
JavaThread ** jt_pp,
oop * thread_oop_p) {
assert(this->list() != NULL, "must have a ThreadsList");
assert(jt_pp != NULL, "must have a return JavaThread pointer");
assert(this->list() != nullptr, "must have a ThreadsList");
assert(jt_pp != nullptr, "must have a return JavaThread pointer");
// thread_oop_p is optional so no assert()
// The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI
// allows a NULL thread parameter to signify "current thread" which
// The JVM_* interfaces don't allow a null thread parameter; JVM/TI
// allows a null thread parameter to signify "current thread" which
// allows us to avoid calling cv_external_thread_to_JavaThread().
// The JVM_* interfaces have no such leeway.
oop thread_oop = JNIHandles::resolve_non_null(jthread);
// Looks like an oop at this point.
if (thread_oop_p != NULL) {
if (thread_oop_p != nullptr) {
// Return the oop to the caller; the caller may still want
// the oop even if this function returns false.
*thread_oop_p = thread_oop;
}
JavaThread *java_thread = java_lang_Thread::thread(thread_oop);
if (java_thread == NULL) {
if (java_thread == nullptr) {
// The java.lang.Thread does not contain a JavaThread * so it has
// not yet run or it has died.
return false;
@ -903,14 +903,14 @@ void ThreadsSMRSupport::free_list(ThreadsList* threads) {
// Walk through the linked list of pending freeable ThreadsLists
// and free the ones that are not referenced from hazard ptrs.
ThreadsList* current = _to_delete_list;
ThreadsList* prev = NULL;
ThreadsList* next = NULL;
ThreadsList* prev = nullptr;
ThreadsList* next = nullptr;
bool threads_is_freed = false;
while (current != NULL) {
while (current != nullptr) {
next = current->next_list();
if (!scan_table->has_entry((void*)current) && current->_nested_handle_cnt == 0) {
// This ThreadsList is not referenced by a hazard ptr.
if (prev != NULL) {
if (prev != nullptr) {
prev->set_next_list(next);
}
if (_to_delete_list == current) {
@ -960,7 +960,7 @@ bool ThreadsSMRSupport::is_a_protected_JavaThread(JavaThread *thread) {
// and include the ones that are currently in use by a nested
// ThreadsListHandle in the search set.
ThreadsList* current = _to_delete_list;
while (current != NULL) {
while (current != nullptr) {
if (current->_nested_handle_cnt != 0) {
// 'current' is in use by a nested ThreadsListHandle so the hazard
// ptr is protecting all the JavaThreads on that ThreadsList.
@ -1075,7 +1075,7 @@ void ThreadsSMRSupport::wait_until_not_protected(JavaThread *thread) {
ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread);
threads_do(&scan_cl);
ThreadsList* current = _to_delete_list;
while (current != NULL) {
while (current != nullptr) {
if (current->_nested_handle_cnt != 0 && current->includes(thread)) {
log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::wait_until_not_protected: found nested hazard pointer to thread=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread));
}
@ -1143,10 +1143,10 @@ void ThreadsSMRSupport::log_statistics() {
// Print SMR info for a thread to a given output stream.
void ThreadsSMRSupport::print_info_on(const Thread* thread, outputStream* st) {
ThreadsList* hazard_ptr = thread->get_threads_hazard_ptr();
if (hazard_ptr != NULL) {
if (hazard_ptr != nullptr) {
st->print(" _threads_hazard_ptr=" INTPTR_FORMAT, p2i(hazard_ptr));
}
if (EnableThreadSMRStatistics && thread->_threads_list_ptr != NULL) {
if (EnableThreadSMRStatistics && thread->_threads_list_ptr != nullptr) {
// The count is only interesting if we have a _threads_list_ptr.
st->print(", _nested_threads_hazard_ptr_cnt=%u", thread->_nested_threads_hazard_ptr_cnt);
}
@ -1154,11 +1154,11 @@ void ThreadsSMRSupport::print_info_on(const Thread* thread, outputStream* st) {
// It is only safe to walk the list if we're at a safepoint or the
// calling thread is walking its own list.
SafeThreadsListPtr* current = thread->_threads_list_ptr;
if (current != NULL) {
if (current != nullptr) {
// Skip the top nesting level as it is always printed above.
current = current->previous();
}
while (current != NULL) {
while (current != nullptr) {
current->print_on(st);
current = current->previous();
}
@ -1175,7 +1175,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) {
needs_unlock = true;
}
ThreadsList* saved_threads_list = NULL;
ThreadsList* saved_threads_list = nullptr;
{
ThreadsListHandle tlh; // make the current ThreadsList safe for reporting
saved_threads_list = tlh.list(); // save for later comparison
@ -1187,7 +1187,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) {
st->print_cr("}");
}
if (_to_delete_list != NULL) {
if (_to_delete_list != nullptr) {
if (Threads_lock->owned_by_self()) {
// Only safe if we have the Threads_lock.
st->print_cr("_to_delete_list=" INTPTR_FORMAT ", length=%u, elements={",
@ -1195,7 +1195,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) {
print_info_elements_on(st, _to_delete_list);
st->print_cr("}");
for (ThreadsList *t_list = _to_delete_list->next_list();
t_list != NULL; t_list = t_list->next_list()) {
t_list != nullptr; t_list = t_list->next_list()) {
st->print("next-> " INTPTR_FORMAT ", length=%u, elements={",
p2i(t_list), t_list->length());
print_info_elements_on(st, t_list);
@ -1254,7 +1254,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) {
void ThreadsSMRSupport::print_info_elements_on(outputStream* st, ThreadsList* t_list) {
uint cnt = 0;
JavaThreadIterator jti(t_list);
for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
for (JavaThread *jt = jti.first(); jt != nullptr; jt = jti.next()) {
st->print(INTPTR_FORMAT, p2i(jt));
if (cnt < t_list->length() - 1) {
// Separate with comma or comma-space except for the last one.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,8 +52,8 @@ class ThreadsList;
// jobject jthread = ...;
// :
// ThreadsListHandle tlh;
// JavaThread* jt = NULL;
// bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &jt, NULL);
// JavaThread* jt = nullptr;
// bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &jt, nullptr);
// if (is_alive) {
// : // do stuff with 'jt'...
// }
@ -61,9 +61,9 @@ class ThreadsList;
// JVM/TI jthread example:
// jthread thread = ...;
// :
// JavaThread* jt = NULL;
// JavaThread* jt = nullptr;
// ThreadsListHandle tlh;
// jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &jt, NULL);
// jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &jt, nullptr);
// if (err != JVMTI_ERROR_NONE) {
// return err;
// }
@ -72,7 +72,7 @@ class ThreadsList;
// JVM/TI oop example (this one should be very rare):
// oop thread_obj = ...;
// :
// JavaThread *jt = NULL;
// JavaThread *jt = nullptr;
// ThreadsListHandle tlh;
// jvmtiError err = JvmtiExport::cv_oop_to_JavaThread(tlh.list(), thread_obj, &jt);
// if (err != JVMTI_ERROR_NONE) {
@ -260,9 +260,9 @@ class SafeThreadsListPtr {
public:
// Constructor that attaches the list onto a thread.
SafeThreadsListPtr(Thread *thread, bool acquire) :
_previous(NULL),
_previous(nullptr),
_thread(thread),
_list(NULL),
_list(nullptr),
_has_ref_count(false),
_needs_release(false)
{
@ -337,7 +337,7 @@ public:
// specified ThreadsList using the following style:
//
// JavaThreadIterator jti(t_list);
// for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) {
// for (JavaThread *jt = jti.first(); jt != nullptr; jt = jti.next()) {
// ...
// }
//
@ -347,7 +347,7 @@ class JavaThreadIterator : public StackObj {
public:
JavaThreadIterator(ThreadsList *list) : _list(list), _index(0) {
assert(list != NULL, "ThreadsList must not be NULL.");
assert(list != nullptr, "ThreadsList must not be null.");
}
JavaThread *first() {
@ -361,7 +361,7 @@ public:
JavaThread *next() {
if (++_index >= length()) {
return NULL;
return nullptr;
}
return _list->thread_at(_index);
}
@ -392,7 +392,7 @@ public:
JavaThread *next() {
if (_index >= length()) {
return NULL;
return nullptr;
}
return _tlh.list()->thread_at(_index++);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,7 +140,7 @@ inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() {
}
inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) {
MutexLocker ml(Threads_lock->owned_by_self() ? NULL : Threads_lock);
MutexLocker ml(Threads_lock->owned_by_self() ? nullptr : Threads_lock);
return is_a_protected_JavaThread(thread);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -176,18 +176,18 @@ static void create_initial_thread(Handle thread_group, JavaThread* thread,
static const char* get_java_version_info(InstanceKlass* ik,
Symbol* field_name) {
fieldDescriptor fd;
bool found = ik != NULL &&
bool found = ik != nullptr &&
ik->find_local_field(field_name,
vmSymbols::string_signature(), &fd);
if (found) {
oop name_oop = ik->java_mirror()->obj_field(fd.offset());
if (name_oop == NULL) {
return NULL;
if (name_oop == nullptr) {
return nullptr;
}
const char* name = java_lang_String::as_utf8_string(name_oop);
return name;
} else {
return NULL;
return nullptr;
}
}
@ -217,7 +217,7 @@ bool Threads::_vm_complete = false;
// The Java library method itself may be changed independently from the VM.
static void call_postVMInitHook(TRAPS) {
Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_vm_PostVMInitHook(), THREAD);
if (klass != NULL) {
if (klass != nullptr) {
JavaValue result(T_VOID);
JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(),
vmSymbols::void_method_signature(),
@ -519,7 +519,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI);
memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize);
} else {
JavaThread::_jvmci_old_thread_counters = NULL;
JavaThread::_jvmci_old_thread_counters = nullptr;
}
#endif // INCLUDE_JVMCI
@ -817,8 +817,8 @@ extern "C" {
static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
const char *on_load_symbols[],
size_t num_symbol_entries) {
OnLoadEntry_t on_load_entry = NULL;
void *library = NULL;
OnLoadEntry_t on_load_entry = nullptr;
void *library = nullptr;
if (!agent->valid()) {
char buffer[JVM_MAXPATHLEN];
@ -831,13 +831,13 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
library = agent->os_lib();
} else if (agent->is_absolute_path()) {
library = os::dll_load(name, ebuf, sizeof ebuf);
if (library == NULL) {
if (library == nullptr) {
const char *sub_msg = " in absolute path, with error: ";
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
// If we can't find the agent, exit.
vm_exit_during_initialization(buf, NULL);
vm_exit_during_initialization(buf, nullptr);
FREE_C_HEAP_ARRAY(char, buf);
}
} else {
@ -846,11 +846,11 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
name)) {
library = os::dll_load(buffer, ebuf, sizeof ebuf);
}
if (library == NULL) { // Try the library path directory.
if (library == nullptr) { // Try the library path directory.
if (os::dll_build_name(buffer, sizeof(buffer), name)) {
library = os::dll_load(buffer, ebuf, sizeof ebuf);
}
if (library == NULL) {
if (library == nullptr) {
const char *sub_msg = " on the library path, with error: ";
const char *sub_msg2 = "\nModule java.instrument may be missing from runtime image.";
@ -863,7 +863,7 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
jio_snprintf(buf, len, "%s%s%s%s%s", msg, name, sub_msg, ebuf, sub_msg2);
}
// If we can't find the agent, exit.
vm_exit_during_initialization(buf, NULL);
vm_exit_during_initialization(buf, nullptr);
FREE_C_HEAP_ARRAY(char, buf);
}
}
@ -901,15 +901,15 @@ void Threads::convert_vm_init_libraries_to_agents() {
AgentLibrary* agent;
AgentLibrary* next;
for (agent = Arguments::libraries(); agent != NULL; agent = next) {
for (agent = Arguments::libraries(); agent != nullptr; agent = next) {
next = agent->next(); // cache the next agent now as this agent may get moved off this list
OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
// If there is an JVM_OnLoad function it will get called later,
// otherwise see if there is an Agent_OnLoad
if (on_load_entry == NULL) {
if (on_load_entry == nullptr) {
on_load_entry = lookup_agent_on_load(agent);
if (on_load_entry != NULL) {
if (on_load_entry != nullptr) {
// switch it to the agent list -- so that Agent_OnLoad will be called,
// JVM_OnLoad won't be attempted and Agent_OnUnload will
Arguments::convert_library_to_agent(agent);
@ -929,7 +929,7 @@ void Threads::create_vm_init_agents() {
JvmtiExport::enter_onload_phase();
for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
for (agent = Arguments::agents(); agent != nullptr; agent = agent->next()) {
// CDS dumping does not support native JVMTI agent.
// CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified.
if (Arguments::is_dumping_archive()) {
@ -943,9 +943,9 @@ void Threads::create_vm_init_agents() {
OnLoadEntry_t on_load_entry = lookup_agent_on_load(agent);
if (on_load_entry != NULL) {
if (on_load_entry != nullptr) {
// Invoke the Agent_OnLoad function
jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
jint err = (*on_load_entry)(&main_vm, agent->options(), nullptr);
if (err != JNI_OK) {
vm_exit_during_initialization("agent library failed to init", agent->name());
}
@ -966,7 +966,7 @@ void Threads::shutdown_vm_agents() {
const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS;
size_t num_symbol_entries = ARRAY_SIZE(on_unload_symbols);
extern struct JavaVM_ main_vm;
for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) {
for (AgentLibrary* agent = Arguments::agents(); agent != nullptr; agent = agent->next()) {
// Find the Agent_OnUnload function.
Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t,
@ -976,7 +976,7 @@ void Threads::shutdown_vm_agents() {
num_symbol_entries));
// Invoke the Agent_OnUnload function
if (unload_entry != NULL) {
if (unload_entry != nullptr) {
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
HandleMark hm(thread);
@ -991,15 +991,15 @@ void Threads::create_vm_init_libraries() {
extern struct JavaVM_ main_vm;
AgentLibrary* agent;
for (agent = Arguments::libraries(); agent != NULL; agent = agent->next()) {
for (agent = Arguments::libraries(); agent != nullptr; agent = agent->next()) {
OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent);
if (on_load_entry != NULL) {
if (on_load_entry != nullptr) {
// Invoke the JVM_OnLoad function
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
HandleMark hm(thread);
jint err = (*on_load_entry)(&main_vm, agent->options(), NULL);
jint err = (*on_load_entry)(&main_vm, agent->options(), nullptr);
if (err != JNI_OK) {
vm_exit_during_initialization("-Xrun library failed to init", agent->name());
}
@ -1373,8 +1373,8 @@ GrowableArray<JavaThread*>* Threads::get_pending_threads(ThreadsList * t_list,
JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list,
address owner) {
// NULL owner means not locked so we can skip the search
if (owner == NULL) return NULL;
// null owner means not locked so we can skip the search
if (owner == nullptr) return nullptr;
for (JavaThread* p : *t_list) {
// first, see if owner is the address of a Java thread
@ -1384,13 +1384,13 @@ JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list,
// Cannot assert on lack of success here since this function may be
// used by code that is trying to report useful problem information
// like deadlock detection.
if (UseHeavyMonitors) return NULL;
if (UseHeavyMonitors) return nullptr;
// If we didn't find a matching Java thread and we didn't force use of
// heavyweight monitors, then the owner is the stack address of the
// Lock Word in the owning Java thread's stack.
//
JavaThread* the_owner = NULL;
JavaThread* the_owner = nullptr;
for (JavaThread* q : *t_list) {
if (q->is_lock_owned(owner)) {
the_owner = q;
@ -1416,7 +1416,7 @@ public:
_st(st) {}
virtual void do_thread(Thread* thread) {
if (thread != NULL) {
if (thread != nullptr) {
thread->print_on(_st);
_st->cr();
}
@ -1479,7 +1479,7 @@ void Threads::print_on(outputStream* st, bool print_stacks,
void Threads::print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf,
int buflen, bool* found_current) {
if (this_thread != NULL) {
if (this_thread != nullptr) {
bool is_current = (current == this_thread);
*found_current = *found_current || is_current;
st->print("%s", is_current ? "=>" : " ");
@ -1528,7 +1528,7 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf,
print_on_error(WatcherThread::watcher_thread(), st, current, buf, buflen, &found_current);
print_on_error(AsyncLogWriter::instance(), st, current, buf, buflen, &found_current);
if (Universe::heap() != NULL) {
if (Universe::heap() != nullptr) {
PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current);
Universe::heap()->gc_threads_do(&print_closure);
}
@ -1555,15 +1555,15 @@ void Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, b
if (thread->is_Compiler_thread()) {
CompilerThread* ct = (CompilerThread*) thread;
// Keep task in local variable for NULL check.
// ct->_task might be set to NULL by concurring compiler thread
// Keep task in local variable for null check.
// ct->_task might be set to null by concurring compiler thread
// because it completed the compilation. The task is never freed,
// though, just returned to a free list.
CompileTask* task = ct->task();
if (task != NULL) {
if (task != nullptr) {
thread->print_name_on_error(st, buf, buflen);
st->print(" ");
task->print(st, NULL, short_form, true);
task->print(st, nullptr, short_form, true);
}
}
}
@ -1574,5 +1574,5 @@ void Threads::verify() {
p->verify();
}
VMThread* thread = VMThread::vm_thread();
if (thread != NULL) thread->verify();
if (thread != nullptr) thread->verify();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,10 +31,10 @@ TraceTime::TraceTime(const char* title,
_active = doit;
_verbose = true;
_title = title;
_print = NULL;
_print = nullptr;
if (_active) {
_accum = NULL;
_accum = nullptr;
_t.start();
}
}
@ -46,7 +46,7 @@ TraceTime::TraceTime(const char* title,
_active = doit;
_verbose = verbose;
_title = title;
_print = NULL;
_print = nullptr;
if (_active) {
_accum = accumulator;
@ -56,13 +56,13 @@ TraceTime::TraceTime(const char* title,
TraceTime::TraceTime(const char* title,
TraceTimerLogPrintFunc ttlpf) {
_active = ttlpf!= NULL;
_active = ttlpf!= nullptr;
_verbose = true;
_title = title;
_print = ttlpf;
if (_active) {
_accum = NULL;
_accum = nullptr;
_t.start();
}
}
@ -72,7 +72,7 @@ TraceTime::~TraceTime() {
return;
}
_t.stop();
if (_accum != NULL) {
if (_accum != nullptr) {
_accum->add(_t);
}
if (!_verbose) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,7 +42,7 @@ typedef void (*TraceTimerLogPrintFunc)(const char*, ...);
// We need to explicit take address of LogImpl<>write<> and static cast
// due to MSVC is not compliant with templates two-phase lookup
#define TRACETIME_LOG(TT_LEVEL, ...) \
log_is_enabled(TT_LEVEL, __VA_ARGS__) ? static_cast<TraceTimerLogPrintFunc>(&LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::TT_LEVEL>) : (TraceTimerLogPrintFunc)NULL
log_is_enabled(TT_LEVEL, __VA_ARGS__) ? static_cast<TraceTimerLogPrintFunc>(&LogImpl<LOG_TAGS(__VA_ARGS__)>::write<LogLevel::TT_LEVEL>) : (TraceTimerLogPrintFunc)nullptr
class TraceTime: public StackObj {
private:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ void UnhandledOops::dump_oops(UnhandledOops *list) {
// For debugging unhandled oop detector _in the debugger_
// You don't want to turn it on in compiled code here.
static Thread* unhandled_oop_print = NULL;
static Thread* unhandled_oop_print = nullptr;
void UnhandledOops::register_unhandled_oop(oop* op) {
if (!_thread->is_in_live_stack((address)op)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ class UnhandledOopEntry : public CHeapObj<mtThread> {
bool _ok_for_gc;
public:
oop* oop_ptr() { return _oop_ptr; }
UnhandledOopEntry() : _oop_ptr(NULL), _ok_for_gc(false) {}
UnhandledOopEntry() : _oop_ptr(nullptr), _ok_for_gc(false) {}
UnhandledOopEntry(oop* op) :
_oop_ptr(op), _ok_for_gc(false) {}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@
vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread)
: _reg_map(reg_map), _thread(thread),
_chunk(Thread::current(), reg_map->stack_chunk()()) {
assert(fr != NULL, "must have frame");
assert(fr != nullptr, "must have frame");
_fr = *fr;
}
@ -70,7 +70,7 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea
// Compiled frame
CodeBlob* cb = f->cb();
if (cb != NULL) {
if (cb != nullptr) {
if (cb->is_compiled()) {
CompiledMethod* nm = (CompiledMethod*)cb;
return new compiledVFrame(f, reg_map, thread, nm);
@ -96,10 +96,10 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea
vframe* vframe::sender() const {
RegisterMap temp_map = *register_map();
assert(is_top(), "just checking");
if (_fr.is_empty()) return NULL;
if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL;
if (_fr.is_empty()) return nullptr;
if (_fr.is_entry_frame() && _fr.is_first_frame()) return nullptr;
frame s = _fr.real_sender(&temp_map);
if (s.is_first_frame()) return NULL;
if (s.is_first_frame()) return nullptr;
return vframe::new_vframe(&s, &temp_map, thread());
}
@ -109,13 +109,13 @@ bool vframe::is_vthread_entry() const {
javaVFrame* vframe::java_sender() const {
vframe* f = sender();
while (f != NULL) {
while (f != nullptr) {
if (f->is_vthread_entry()) break;
if (f->is_java_frame() && !javaVFrame::cast(f)->method()->is_continuation_enter_intrinsic())
return javaVFrame::cast(f);
f = f->sender();
}
return NULL;
return nullptr;
}
// ------------- javaVFrame --------------
@ -133,18 +133,18 @@ GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
// at a safepoint or the calling thread is operating on itself so
// it cannot exit the ObjectMonitor so it remains busy.
ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor();
ObjectMonitor *pending_monitor = NULL;
if (waiting_monitor == NULL) {
ObjectMonitor *pending_monitor = nullptr;
if (waiting_monitor == nullptr) {
pending_monitor = thread()->current_pending_monitor();
}
oop pending_obj = (pending_monitor != NULL ? pending_monitor->object() : (oop) NULL);
oop waiting_obj = (waiting_monitor != NULL ? waiting_monitor->object() : (oop) NULL);
oop pending_obj = (pending_monitor != nullptr ? pending_monitor->object() : (oop) nullptr);
oop waiting_obj = (waiting_monitor != nullptr ? waiting_monitor->object() : (oop) nullptr);
for (int index = (mons->length()-1); index >= 0; index--) {
MonitorInfo* monitor = mons->at(index);
if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor
oop obj = monitor->owner();
if (obj == NULL) continue; // skip unowned monitor
if (obj == nullptr) continue; // skip unowned monitor
//
// Skip the monitor that the thread is blocked to enter or waiting on
//
@ -200,7 +200,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
} else {
st->print_cr("\t- %s <no object reference available>", wait_state);
}
} else if (thread()->current_park_blocker() != NULL) {
} else if (thread()->current_park_blocker() != nullptr) {
oop obj = thread()->current_park_blocker();
Klass* k = obj->klass();
st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", p2i(obj), k->external_name());
@ -208,7 +208,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
else if (thread()->osthread()->get_state() == CONDVAR_WAIT) {
// We are waiting on the native class initialization monitor.
InstanceKlass* k = thread()->class_to_be_initialized();
if (k != NULL) {
if (k != nullptr) {
st->print_cr("\t- waiting on the Class initialization monitor for %s", k->external_name());
}
}
@ -227,13 +227,13 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
st->print_cr("\t- eliminated <owner is scalar replaced> (a %s)", k->external_name());
} else {
Handle obj(current, monitor->owner());
if (obj() != NULL) {
if (obj() != nullptr) {
print_locked_object_class_name(st, obj, "eliminated");
}
}
continue;
}
if (monitor->owner() != NULL) {
if (monitor->owner() != nullptr) {
// the monitor is associated with an object, i.e., it is locked
const char *lock_state = "locked"; // assume we have the monitor locked
@ -266,18 +266,18 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
// ------------- interpretedVFrame --------------
u_char* interpretedVFrame::bcp() const {
return stack_chunk() == NULL ? fr().interpreter_frame_bcp() : stack_chunk()->interpreter_frame_bcp(fr());
return stack_chunk() == nullptr ? fr().interpreter_frame_bcp() : stack_chunk()->interpreter_frame_bcp(fr());
}
intptr_t* interpretedVFrame::locals_addr_at(int offset) const {
assert(stack_chunk() == NULL, "Not supported for heap frames"); // unsupported for now because seems to be unused
assert(stack_chunk() == nullptr, "Not supported for heap frames"); // unsupported for now because seems to be unused
assert(fr().is_interpreted_frame(), "frame should be an interpreted frame");
return fr().interpreter_frame_local_at(offset);
}
GrowableArray<MonitorInfo*>* interpretedVFrame::monitors() const {
GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(5);
if (stack_chunk() == NULL) { // no monitors in continuations
if (stack_chunk() == nullptr) { // no monitors in continuations
for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin()));
current >= fr().interpreter_frame_monitor_end();
current = fr().previous_monitor_in_interpreter_frame(current)) {
@ -292,7 +292,7 @@ int interpretedVFrame::bci() const {
}
Method* interpretedVFrame::method() const {
return stack_chunk() == NULL ? fr().interpreter_frame_method() : stack_chunk()->interpreter_frame_method(fr());
return stack_chunk() == nullptr ? fr().interpreter_frame_method() : stack_chunk()->interpreter_frame_method(fr());
}
static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask,
@ -307,11 +307,11 @@ static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_
return StackValue::create_stack_value_from_oop_location(chunk, (void*)addr);
}
// value (integer) "v"
return new StackValue(addr != NULL ? *addr : 0);
return new StackValue(addr != nullptr ? *addr : 0);
}
static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) {
assert(addr != NULL, "invariant");
assert(addr != nullptr, "invariant");
// Ensure to be 'inside' the expression stack (i.e., addr >= sp for Intel).
// In case of exceptions, the expression stack is invalid and the sp
@ -329,20 +329,20 @@ static void stack_locals(StackValueCollection* result,
const frame& fr,
const stackChunkOop chunk) {
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
for (int i = 0; i < length; ++i) {
const intptr_t* addr;
if (chunk == NULL) {
if (chunk == nullptr) {
addr = fr.interpreter_frame_local_at(i);
assert(addr >= fr.sp(), "must be inside the frame");
} else {
addr = chunk->interpreter_frame_local_at(fr, i);
}
assert(addr != NULL, "invariant");
assert(addr != nullptr, "invariant");
StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr, chunk);
assert(sv != NULL, "sanity check");
assert(sv != nullptr, "sanity check");
result->add(sv);
}
@ -355,16 +355,16 @@ static void stack_expressions(StackValueCollection* result,
const frame& fr,
const stackChunkOop chunk) {
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
for (int i = 0; i < length; ++i) {
const intptr_t* addr;
if (chunk == NULL) {
if (chunk == nullptr) {
addr = fr.interpreter_frame_expression_stack_at(i);
assert(addr != NULL, "invariant");
assert(addr != nullptr, "invariant");
if (!is_in_expression_stack(fr, addr)) {
// Need to ensure no bogus escapes.
addr = NULL;
addr = nullptr;
}
} else {
addr = chunk->interpreter_frame_expression_stack_at(fr, i);
@ -374,7 +374,7 @@ static void stack_expressions(StackValueCollection* result,
i + max_locals,
addr,
chunk);
assert(sv != NULL, "sanity check");
assert(sv != nullptr, "sanity check");
result->add(sv);
}
@ -433,7 +433,7 @@ StackValueCollection* interpretedVFrame::stack_data(bool expressions) const {
}
void interpretedVFrame::set_locals(StackValueCollection* values) const {
if (values == NULL || values->size() == 0) return;
if (values == nullptr || values->size() == 0) return;
// If the method is native, max_locals is not telling the truth.
// maxlocals then equals the size of parameters
@ -449,7 +449,7 @@ void interpretedVFrame::set_locals(StackValueCollection* values) const {
// Depending on oop/int put it in the right package
const StackValue* const sv = values->at(i);
assert(sv != NULL, "sanity check");
assert(sv != nullptr, "sanity check");
if (sv->type() == T_OBJECT) {
*(oop *) addr = (sv->get_obj())();
} else { // integer
@ -593,7 +593,7 @@ void vframeStreamCommon::skip_prefixed_method_and_wrappers() {
}
javaVFrame* vframeStreamCommon::asJavaVFrame() {
javaVFrame* result = NULL;
javaVFrame* result = nullptr;
// FIXME, need to re-do JDK-8271140 and check is_native_frame?
if (_mode == compiled_mode && _frame.is_compiled_frame()) {
assert(_frame.is_compiled_frame() || _frame.is_native_frame(), "expected compiled Java frame");
@ -619,7 +619,7 @@ javaVFrame* vframeStreamCommon::asJavaVFrame() {
#ifndef PRODUCT
void vframe::print() {
if (WizardMode) _fr.print_value_on(tty,NULL);
if (WizardMode) _fr.print_value_on(tty,nullptr);
}
void vframe::print_value() const {
@ -670,7 +670,7 @@ void javaVFrame::print() {
if (monitor->owner_is_scalar_replaced()) {
Klass* k = java_lang_Class::as_Klass(monitor->owner_klass());
tty->print("( is scalar replaced %s)", k->external_name());
} else if (monitor->owner() == NULL) {
} else if (monitor->owner() == nullptr) {
tty->print("( null )");
} else {
monitor->owner()->print_value();
@ -701,14 +701,14 @@ void javaVFrame::print_value() const {
if (!m->is_native()) {
Symbol* source_name = k->source_file_name();
int line_number = m->line_number_from_bci(bci());
if (source_name != NULL && (line_number != -1)) {
if (source_name != nullptr && (line_number != -1)) {
tty->print("(%s:%d)", source_name->as_C_string(), line_number);
}
} else {
tty->print("(Native Method)");
}
// Check frame size and print warning if it looks suspiciously large
if (fr().sp() != NULL) {
if (fr().sp() != nullptr) {
RegisterMap map = *register_map();
uint size = fr().frame_size();
#ifdef _LP64
@ -734,7 +734,7 @@ void javaVFrame::print_activation(int index) const {
// ------------- externalVFrame --------------
void externalVFrame::print() {
_fr.print_value_on(tty,NULL);
_fr.print_value_on(tty,nullptr);
}
void externalVFrame::print_value() const {

Some files were not shown because too many files have changed in this diff Show More