8300245: Replace NULL with nullptr in share/jfr/

Reviewed-by: mgronlun, coleenp
This commit is contained in:
Johan Sjölen 2023-05-10 12:35:21 +00:00
parent 4251b56214
commit cc396895e5
125 changed files with 2065 additions and 2069 deletions

View File

@ -58,7 +58,7 @@ bool register_jfr_dcmds() {
static bool is_disabled(outputStream* output) {
if (Jfr::is_disabled()) {
if (output != NULL) {
if (output != nullptr) {
output->print_cr("Flight Recorder is disabled.\n");
}
return true;
@ -93,14 +93,14 @@ static bool invalid_state(outputStream* out, TRAPS) {
}
static void handle_pending_exception(outputStream* output, bool startup, oop throwable) {
assert(throwable != NULL, "invariant");
assert(throwable != nullptr, "invariant");
oop msg = java_lang_Throwable::message(throwable);
if (msg == NULL) {
if (msg == nullptr) {
return;
}
char* text = java_lang_String::as_utf8_string(msg);
if (text != NULL) {
if (text != nullptr) {
if (startup) {
log_error(jfr,startup)("%s", text);
} else {
@ -111,12 +111,12 @@ static void handle_pending_exception(outputStream* output, bool startup, oop thr
static void print_message(outputStream* output, oop content, TRAPS) {
objArrayOop lines = objArrayOop(content);
assert(lines != NULL, "invariant");
assert(lines != nullptr, "invariant");
assert(lines->is_array(), "must be array");
const int length = lines->length();
for (int i = 0; i < length; ++i) {
const char* text = JfrJavaSupport::c_str(lines->obj_at(i), THREAD);
if (text == NULL) {
if (text == nullptr) {
// An oome has been thrown and is pending.
break;
}
@ -127,12 +127,12 @@ static void print_message(outputStream* output, oop content, TRAPS) {
static void log(oop content, TRAPS) {
LogMessage(jfr,startup) msg;
objArrayOop lines = objArrayOop(content);
assert(lines != NULL, "invariant");
assert(lines != nullptr, "invariant");
assert(lines->is_array(), "must be array");
const int length = lines->length();
for (int i = 0; i < length; ++i) {
const char* text = JfrJavaSupport::c_str(lines->obj_at(i), THREAD);
if (text == NULL) {
if (text == nullptr) {
// An oome has been thrown and is pending.
break;
}
@ -145,7 +145,7 @@ static void handle_dcmd_result(outputStream* output,
const DCmdSource source,
TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
assert(output != NULL, "invariant");
assert(output != nullptr, "invariant");
ResourceMark rm(THREAD);
const bool startup = DCmd_Source_Internal == source;
if (HAS_PENDING_EXCEPTION) {
@ -177,16 +177,16 @@ static void handle_dcmd_result(outputStream* output,
}
static oop construct_dcmd_instance(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
assert(args->klass() != NULL, "invariant");
assert(args->klass() != nullptr, "invariant");
args->set_name("<init>");
args->set_signature("()V");
JfrJavaSupport::new_object(args, CHECK_NULL);
return args->result()->get_oop();
}
JfrDCmd::JfrDCmd(outputStream* output, bool heap, int num_arguments) : DCmd(output, heap), _args(NULL), _num_arguments(num_arguments), _delimiter('\0') {}
JfrDCmd::JfrDCmd(outputStream* output, bool heap, int num_arguments) : DCmd(output, heap), _args(nullptr), _num_arguments(num_arguments), _delimiter('\0') {}
void JfrDCmd::invoke(JfrJavaArguments& method, TRAPS) const {
JavaValue constructor_result(T_OBJECT);
@ -221,7 +221,7 @@ void JfrDCmd::execute(DCmdSource source, TRAPS) {
JavaValue result(T_OBJECT);
JfrJavaArguments execute(&result, javaClass(), "execute", signature, CHECK);
jstring argument = JfrJavaSupport::new_string(_args, CHECK);
jstring s = NULL;
jstring s = nullptr;
if (source == DCmd_Source_Internal) {
s = JfrJavaSupport::new_string("internal", CHECK);
}
@ -248,11 +248,11 @@ void JfrDCmd::print_help(const char* name) const {
}
static void initialize_dummy_descriptors(GrowableArray<DCmdArgumentInfo*>* array) {
assert(array != NULL, "invariant");
DCmdArgumentInfo * const dummy = new DCmdArgumentInfo(NULL,
NULL,
NULL,
NULL,
assert(array != nullptr, "invariant");
DCmdArgumentInfo * const dummy = new DCmdArgumentInfo(nullptr,
nullptr,
nullptr,
nullptr,
false,
true, // a DcmdFramework "option"
false);
@ -263,7 +263,7 @@ static void initialize_dummy_descriptors(GrowableArray<DCmdArgumentInfo*>* array
// Since the DcmdFramework does not support dynamically allocated strings,
// we keep them in a thread local arena. The arena is reset between invocations.
static THREAD_LOCAL Arena* dcmd_arena = NULL;
static THREAD_LOCAL Arena* dcmd_arena = nullptr;
static void prepare_dcmd_string_arena(JavaThread* jt) {
dcmd_arena = JfrThreadLocal::dcmd_arena(jt);
@ -272,17 +272,17 @@ static void prepare_dcmd_string_arena(JavaThread* jt) {
}
static char* dcmd_arena_allocate(size_t size) {
assert(dcmd_arena != NULL, "invariant");
assert(dcmd_arena != nullptr, "invariant");
return (char*)dcmd_arena->Amalloc(size);
}
static const char* get_as_dcmd_arena_string(oop string) {
char* str = NULL;
char* str = nullptr;
const typeArrayOop value = java_lang_String::value(string);
if (value != NULL) {
if (value != nullptr) {
const size_t length = static_cast<size_t>(java_lang_String::utf8_length(string, value)) + 1;
str = dcmd_arena_allocate(length);
assert(str != NULL, "invariant");
assert(str != nullptr, "invariant");
java_lang_String::as_utf8_string(string, value, str, static_cast<int>(length));
}
return str;
@ -297,7 +297,7 @@ static const char* read_string_field(oop argument, const char* field_name, TRAPS
args.set_receiver(argument);
JfrJavaSupport::get_field(&args, THREAD);
const oop string_oop = result.get_oop();
return string_oop != NULL ? get_as_dcmd_arena_string(string_oop) : NULL;
return string_oop != nullptr ? get_as_dcmd_arena_string(string_oop) : nullptr;
}
static bool read_boolean_field(oop argument, const char* field_name, TRAPS) {
@ -342,14 +342,14 @@ GrowableArray<DCmdArgumentInfo*>* JfrDCmd::argument_info_array() const {
return array;
}
objArrayOop arguments = objArrayOop(result.get_oop());
assert(arguments != NULL, "invariant");
assert(arguments != nullptr, "invariant");
assert(arguments->is_array(), "must be array");
const int num_arguments = arguments->length();
assert(num_arguments == _num_arguments, "invariant");
prepare_dcmd_string_arena(thread);
for (int i = 0; i < num_arguments; ++i) {
DCmdArgumentInfo* const dai = create_info(arguments->obj_at(i), thread);
assert(dai != NULL, "invariant");
assert(dai != nullptr, "invariant");
array->append(dai);
}
return array;
@ -366,8 +366,8 @@ GrowableArray<const char*>* JfrDCmd::argument_name_array() const {
JfrConfigureFlightRecorderDCmd::JfrConfigureFlightRecorderDCmd(outputStream* output,
bool heap) : DCmdWithParser(output, heap),
_repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, NULL),
_dump_path("dumppath", "Path to dump, e.g. \\\"My Dump path\\\"", "STRING", false, NULL),
_repository_path("repositorypath", "Path to repository,.e.g \\\"My Repository\\\"", "STRING", false, nullptr),
_dump_path("dumppath", "Path to dump, e.g. \\\"My Dump path\\\"", "STRING", false, nullptr),
_stack_depth("stackdepth", "Stack depth", "JULONG", false, "64"),
_global_buffer_count("globalbuffercount", "Number of global buffers,", "JULONG", false, "20"),
_global_buffer_size("globalbuffersize", "Size of a global buffers,", "MEMORY SIZE", false, "512k"),
@ -469,22 +469,22 @@ void JfrConfigureFlightRecorderDCmd::execute(DCmdSource source, TRAPS) {
Handle h_dcmd_instance(THREAD, dcmd);
assert(h_dcmd_instance.not_null(), "invariant");
jstring repository_path = NULL;
if (_repository_path.is_set() && _repository_path.value() != NULL) {
jstring repository_path = nullptr;
if (_repository_path.is_set() && _repository_path.value() != nullptr) {
repository_path = JfrJavaSupport::new_string(_repository_path.value(), CHECK);
}
jstring dump_path = NULL;
if (_dump_path.is_set() && _dump_path.value() != NULL) {
jstring dump_path = nullptr;
if (_dump_path.is_set() && _dump_path.value() != nullptr) {
dump_path = JfrJavaSupport::new_string(_dump_path.value(), CHECK);
}
jobject stack_depth = NULL;
jobject global_buffer_count = NULL;
jobject global_buffer_size = NULL;
jobject thread_buffer_size = NULL;
jobject max_chunk_size = NULL;
jobject memory_size = NULL;
jobject stack_depth = nullptr;
jobject global_buffer_count = nullptr;
jobject global_buffer_size = nullptr;
jobject thread_buffer_size = nullptr;
jobject max_chunk_size = nullptr;
jobject memory_size = nullptr;
jobject preserve_repository = nullptr;
if (!JfrRecorder::is_created()) {

View File

@ -59,7 +59,7 @@ class JfrStartFlightRecordingDCmd : public JfrDCmd {
return "Medium: Depending on the settings for a recording, the impact can range from low to high.";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
@ -84,7 +84,7 @@ class JfrDumpFlightRecordingDCmd : public JfrDCmd {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
@ -109,7 +109,7 @@ class JfrCheckFlightRecordingDCmd : public JfrDCmd {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
@ -134,7 +134,7 @@ class JfrStopFlightRecordingDCmd : public JfrDCmd {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
virtual const char* javaClass() const {
@ -175,7 +175,7 @@ class JfrConfigureFlightRecorderDCmd : public DCmdWithParser {
return "Low";
}
static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL};
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p;
}
static int num_arguments() { return 10; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -191,7 +191,7 @@ static int skip_annotation_value(const address, int, int); // fwd decl
// Skip an annotation. Return >=limit if there is any problem.
static int next_annotation_index(const address buffer, int limit, int index) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
index += 2; // skip atype
if ((index += 2) >= limit) {
return limit;
@ -206,7 +206,7 @@ static int next_annotation_index(const address buffer, int limit, int index) {
// Skip an annotation value. Return >=limit if there is any problem.
static int skip_annotation_value(const address buffer, int limit, int index) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
// value := switch (tag:u1) {
// case B, C, I, S, Z, D, F, J, c: con:u2;
// case e: e_class:u2 e_name:u2;
@ -280,7 +280,7 @@ class AnnotationElementIterator : public StackObj {
_limit(limit),
_current(element_name_offset),
_next(element_name_offset) {
assert(_buffer != NULL, "invariant");
assert(_buffer != nullptr, "invariant");
assert(_next == element_name_offset, "invariant");
assert(_current == element_name_offset, "invariant");
}
@ -332,11 +332,11 @@ class AnnotationIterator : public StackObj {
public:
AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik),
_limit(ar != NULL ? ar->length() : 0),
_buffer(_limit > 2 ? ar->adr_at(2) : NULL),
_limit(ar != nullptr ? ar->length() : 0),
_buffer(_limit > 2 ? ar->adr_at(2) : nullptr),
_current(0),
_next(0) {
if (_buffer != NULL) {
if (_buffer != nullptr) {
_limit -= 2; // subtract sizeof(u2) number of annotations field
}
}
@ -358,7 +358,7 @@ class AnnotationIterator : public StackObj {
return AnnotationElementIterator(_ik, _buffer + _current, _next - _current);
}
const Symbol* type() const {
assert(_buffer != NULL, "invariant");
assert(_buffer != nullptr, "invariant");
assert(_current < _limit, "invariant");
return _ik->constants()->symbol_at(JfrBigEndian::read<u2>(_buffer + _current));
}
@ -366,9 +366,9 @@ class AnnotationIterator : public StackObj {
static const char value_name[] = "value";
static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) {
assert(annotation_type != NULL, "invariant");
assert(annotation_type != nullptr, "invariant");
AnnotationArray* class_annotations = ik->class_annotations();
if (class_annotations == NULL) {
if (class_annotations == nullptr) {
return false;
}
@ -379,7 +379,7 @@ static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_typ
// target annotation found
static const Symbol* value_symbol =
SymbolTable::probe(value_name, sizeof value_name - 1);
assert(value_symbol != NULL, "invariant");
assert(value_symbol != nullptr, "invariant");
const AnnotationElementIterator element_iterator = annotation_iterator.elements();
while (element_iterator.has_next()) {
element_iterator.move_to_next();
@ -399,14 +399,14 @@ static bool has_annotation(const InstanceKlass* ik, const Symbol* annotation_typ
// Searching moves upwards in the klass hierarchy in order to support
// inherited annotations in addition to the ability to override.
static bool annotation_value(const InstanceKlass* ik, const Symbol* annotation_type, bool& value) {
assert(ik != NULL, "invariant");
assert(annotation_type != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(annotation_type != nullptr, "invariant");
assert(JdkJfrEvent::is_a(ik), "invariant");
if (has_annotation(ik, annotation_type, value)) {
return true;
}
InstanceKlass* const super = InstanceKlass::cast(ik->super());
return super != NULL && JdkJfrEvent::is_a(super) ? annotation_value(super, annotation_type, value) : false;
return super != nullptr && JdkJfrEvent::is_a(super) ? annotation_value(super, annotation_type, value) : false;
}
static const char jdk_jfr_module_name[] = "jdk.jfr";
@ -416,30 +416,30 @@ static bool java_base_can_read_jdk_jfr() {
if (can_read) {
return true;
}
static Symbol* jdk_jfr_module_symbol = NULL;
if (jdk_jfr_module_symbol == NULL) {
static Symbol* jdk_jfr_module_symbol = nullptr;
if (jdk_jfr_module_symbol == nullptr) {
jdk_jfr_module_symbol = SymbolTable::probe(jdk_jfr_module_name, sizeof jdk_jfr_module_name - 1);
if (jdk_jfr_module_symbol == NULL) {
if (jdk_jfr_module_symbol == nullptr) {
return false;
}
}
assert(jdk_jfr_module_symbol != NULL, "invariant");
assert(jdk_jfr_module_symbol != nullptr, "invariant");
ModuleEntryTable* const table = Modules::get_module_entry_table(Handle());
assert(table != NULL, "invariant");
assert(table != nullptr, "invariant");
const ModuleEntry* const java_base_module = table->javabase_moduleEntry();
if (java_base_module == NULL) {
if (java_base_module == nullptr) {
return false;
}
assert(java_base_module != NULL, "invariant");
assert(java_base_module != nullptr, "invariant");
ModuleEntry* jdk_jfr_module;
{
MutexLocker ml(Module_lock);
jdk_jfr_module = table->lookup_only(jdk_jfr_module_symbol);
if (jdk_jfr_module == NULL) {
if (jdk_jfr_module == nullptr) {
return false;
}
}
assert(jdk_jfr_module != NULL, "invariant");
assert(jdk_jfr_module != nullptr, "invariant");
if (java_base_module->can_read(jdk_jfr_module)) {
can_read = true;
}
@ -452,18 +452,18 @@ static const char registered_constant[] = "Ljdk/jfr/Registered;";
// Searching moves upwards in the klass hierarchy in order to support
// inherited annotations in addition to the ability to override.
static bool should_register_klass(const InstanceKlass* ik, bool& untypedEventHandler) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(JdkJfrEvent::is_a(ik), "invariant");
assert(!untypedEventHandler, "invariant");
static const Symbol* registered_symbol = NULL;
if (registered_symbol == NULL) {
static const Symbol* registered_symbol = nullptr;
if (registered_symbol == nullptr) {
registered_symbol = SymbolTable::probe(registered_constant, sizeof registered_constant - 1);
if (registered_symbol == NULL) {
if (registered_symbol == nullptr) {
untypedEventHandler = true;
return false;
}
}
assert(registered_symbol != NULL, "invariant");
assert(registered_symbol != nullptr, "invariant");
bool value = false; // to be set by annotation_value
untypedEventHandler = !(annotation_value(ik, registered_symbol, value) || java_base_can_read_jdk_jfr());
return value;
@ -473,14 +473,14 @@ static bool should_register_klass(const InstanceKlass* ik, bool& untypedEventHan
* Map an utf8 constant back to its CONSTANT_UTF8_INFO
*/
static u2 utf8_info_index(const InstanceKlass* ik, const Symbol* const target, TRAPS) {
assert(target != NULL, "invariant");
assert(target != nullptr, "invariant");
const ConstantPool* cp = ik->constants();
const int cp_len = cp->length();
for (u2 index = 1; index < cp_len; ++index) {
const constantTag tag = cp->tag_at(index);
if (tag.is_utf8()) {
const Symbol* const utf8_sym = cp->symbol_at(index);
assert(utf8_sym != NULL, "invariant");
assert(utf8_sym != nullptr, "invariant");
if (utf8_sym == target) {
return index;
}
@ -497,7 +497,7 @@ static bool is_index_within_range(u2 index, u2 orig_cp_len, u2 new_cp_entries_le
#endif
static u2 add_utf8_info(JfrBigEndianWriter& writer, const char* utf8_constant, u2 orig_cp_len, u2& new_cp_entries_len) {
assert(utf8_constant != NULL, "invariant");
assert(utf8_constant != nullptr, "invariant");
writer.write<u1>(JVM_CONSTANT_Utf8);
writer.write_utf8_u2_len(utf8_constant);
assert(writer.is_valid(), "invariant");
@ -540,7 +540,7 @@ static u2 add_flr_register_method_constants(JfrBigEndianWriter& writer,
u2 orig_cp_len,
u2& number_of_new_constants,
TRAPS) {
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
return add_method_ref_info(writer,
utf8_indexes[UTF8_OPT_FlightRecorder],
utf8_indexes[UTF8_OPT_register],
@ -573,7 +573,7 @@ static jlong add_field_info(JfrBigEndianWriter& writer, u2 name_index, u2 desc_i
}
static u2 add_field_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes, bool untypedEventConfiguration) {
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
add_field_info(writer,
utf8_indexes[UTF8_REQ_eventConfiguration],
untypedEventConfiguration ? utf8_indexes[UTF8_OPT_LjavaLangObject] : utf8_indexes[UTF8_OPT_eventConfiguration_FIELD_DESC],
@ -648,7 +648,7 @@ static jlong add_method_info(JfrBigEndianWriter& writer,
* Stream should come in at the start position.
*/
static u2 position_stream_after_cp(const ClassFileStream* stream) {
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
assert(stream->current_offset() == 0, "invariant");
stream->skip_u4_fast(2); // 8 bytes skipped
const u2 cp_len = stream->get_u2_fast();
@ -715,7 +715,7 @@ static u2 position_stream_after_cp(const ClassFileStream* stream) {
* Stream should come in positioned just before fields_count
*/
static u2 position_stream_after_fields(const ClassFileStream* stream) {
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
assert(stream->current_offset() > 0, "invariant");
// fields len
const u2 orig_fields_len = stream->get_u2_fast();
@ -745,9 +745,9 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
bool register_klass,
const Method* clinit_method,
u4& orig_method_len_offset) {
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
assert(stream->current_offset() > 0, "invariant");
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
// We will come back to this location when we
// know how many methods there will be.
writer.reserve(sizeof(u2));
@ -766,7 +766,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
const u4 attrib_len = stream->get_u4_fast();
stream->skip_u1_fast(attrib_len);
}
if (clinit_method != NULL && name_index == clinit_method->name_index()) {
if (clinit_method != nullptr && name_index == clinit_method->name_index()) {
// The method just parsed is an existing <clinit> method.
// If the class has the @Registered(false) annotation, i.e. marking a class
// for opting out from automatic registration, then we do not need to do anything.
@ -790,7 +790,7 @@ static u2 position_stream_after_methods(JfrBigEndianWriter& writer,
}
static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) {
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
add_method_info(writer,
utf8_indexes[UTF8_REQ_begin],
utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC],
@ -838,13 +838,13 @@ static u2 add_method_infos(JfrBigEndianWriter& writer, const u2* utf8_indexes) {
}
static void adjust_exception_table(JfrBigEndianWriter& writer, u2 bci_adjustment_offset, const Method* method, TRAPS) {
const u2 ex_table_length = method != NULL ? (u2)method->exception_table_length() : 0;
const u2 ex_table_length = method != nullptr ? (u2)method->exception_table_length() : 0;
writer.write<u2>(ex_table_length); // Exception table length
if (ex_table_length > 0) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
const ExceptionTableElement* const ex_elements = method->exception_table_start();
for (int i = 0; i < ex_table_length; ++i) {
assert(ex_elements != NULL, "invariant");
assert(ex_elements != nullptr, "invariant");
writer.write<u2>(ex_elements[i].start_pc + bci_adjustment_offset);
writer.write<u2>(ex_elements[i].end_pc + bci_adjustment_offset);
writer.write<u2>(ex_elements[i].handler_pc + bci_adjustment_offset);
@ -872,8 +872,8 @@ static void adjust_stack_map(JfrBigEndianWriter& writer,
const u2* utf8_indexes,
u2 bci_adjustment_offset,
TRAPS) {
assert(stack_map != NULL, "invariant");
assert(utf8_indexes != NULL, "invariant");
assert(stack_map != nullptr, "invariant");
assert(utf8_indexes != nullptr, "invariant");
writer.write<u2>(utf8_indexes[UTF8_OPT_StackMapTable]);
const jlong stack_map_attrib_len_offset = writer.current_offset();
writer.reserve(sizeof(u4));
@ -920,8 +920,8 @@ static void adjust_line_number_table(JfrBigEndianWriter& writer,
u4 bci_adjustement_offset,
const Method* method,
TRAPS) {
assert(utf8_indexes != NULL, "invariant");
assert(method != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(method->has_linenumber_table(), "invariant");
writer.write(utf8_indexes[UTF8_OPT_LineNumberTable]);
const jlong lnt_attributes_length_offset = writer.current_offset();
@ -950,8 +950,8 @@ static u2 adjust_local_variable_table(JfrBigEndianWriter& writer,
u2 bci_adjustment_offset,
const Method* method,
TRAPS) {
assert(utf8_indexes != NULL, "invariant");
assert(method != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(method->has_localvariable_table(), "invariant");
writer.write<u2>(utf8_indexes[UTF8_OPT_LocalVariableTable]);
const jlong lvt_attributes_length_offset = writer.current_offset();
@ -959,7 +959,7 @@ static u2 adjust_local_variable_table(JfrBigEndianWriter& writer,
const int lvt_len = method->localvariable_table_length();
writer.write<u2>((u2)lvt_len);
const LocalVariableTableElement* table = method->localvariable_table_start();
assert(table != NULL, "invariant");
assert(table != nullptr, "invariant");
u2 num_lvtt_entries = 0;
for (int i = 0; i < lvt_len; ++i) {
writer.write<u2>(table[i].start_bci + bci_adjustment_offset);
@ -990,7 +990,7 @@ static void adjust_local_variable_type_table(JfrBigEndianWriter& writer,
writer.reserve(sizeof(u4));
writer.write<u2>(num_lvtt_entries);
const LocalVariableTableElement* table = method->localvariable_table_start();
assert(table != NULL, "invariant");
assert(table != nullptr, "invariant");
const int lvt_len = method->localvariable_table_length();
for (int i = 0; i < lvt_len; ++i) {
if (table[i].signature_cp_index > 0) {
@ -1013,23 +1013,23 @@ static void adjust_code_attributes(JfrBigEndianWriter& writer,
const Method* clinit_method,
TRAPS) {
// "Code" attributes
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
const jlong code_attributes_offset = writer.current_offset();
writer.reserve(sizeof(u2));
u2 number_of_code_attributes = 0;
if (clinit_method != NULL) {
if (clinit_method != nullptr) {
Array<u1>* stack_map = clinit_method->stackmap_data();
if (stack_map != NULL) {
if (stack_map != nullptr) {
++number_of_code_attributes;
adjust_stack_map(writer, stack_map, utf8_indexes, bci_adjustment_offset, THREAD);
assert(writer.is_valid(), "invariant");
}
if (clinit_method != NULL && clinit_method->has_linenumber_table()) {
if (clinit_method != nullptr && clinit_method->has_linenumber_table()) {
++number_of_code_attributes;
adjust_line_number_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD);
assert(writer.is_valid(), "invariant");
}
if (clinit_method != NULL && clinit_method->has_localvariable_table()) {
if (clinit_method != nullptr && clinit_method->has_localvariable_table()) {
++number_of_code_attributes;
const u2 num_of_lvtt_entries = adjust_local_variable_table(writer, utf8_indexes, bci_adjustment_offset, clinit_method, THREAD);
assert(writer.is_valid(), "invariant");
@ -1053,7 +1053,7 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
const u2 register_method_ref_index,
const Method* clinit_method,
TRAPS) {
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
// The injected code length is always this value.
// This is to ensure that padding can be done
// where needed and to simplify size calculations.
@ -1061,10 +1061,10 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
const u2 name_index = utf8_indexes[UTF8_OPT_clinit];
assert(name_index != invalid_cp_index, "invariant");
const u2 desc_index = utf8_indexes[UTF8_REQ_EMPTY_VOID_METHOD_DESC];
const u2 max_stack = MAX2(clinit_method != NULL ? clinit_method->verifier_max_stack() : 1, 1);
const u2 max_locals = MAX2(clinit_method != NULL ? clinit_method->max_locals() : 0, 0);
const u2 orig_bytecodes_length = clinit_method != NULL ? (u2)clinit_method->code_size() : 0;
const address orig_bytecodes = clinit_method != NULL ? clinit_method->code_base() : NULL;
const u2 max_stack = MAX2(clinit_method != nullptr ? clinit_method->verifier_max_stack() : 1, 1);
const u2 max_locals = MAX2(clinit_method != nullptr ? clinit_method->max_locals() : 0, 0);
const u2 orig_bytecodes_length = clinit_method != nullptr ? (u2)clinit_method->code_size() : 0;
const address orig_bytecodes = clinit_method != nullptr ? clinit_method->code_base() : nullptr;
const u2 new_code_length = injected_code_length + orig_bytecodes_length;
DEBUG_ONLY(const jlong start_offset = writer.current_offset();)
writer.write<u2>(JVM_ACC_STATIC); // flags
@ -1090,7 +1090,7 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
writer.write<u1>((u1)Bytecodes::_invokestatic);
// invoke "FlightRecorder.register(Ljava/lang/Class;")
writer.write<u2>(register_method_ref_index);
if (clinit_method == NULL) {
if (clinit_method == nullptr) {
writer.write<u1>((u1)Bytecodes::_nop);
writer.write<u1>((u1)Bytecodes::_return);
} else {
@ -1118,30 +1118,30 @@ static jlong insert_clinit_method(const InstanceKlass* ik,
return writer.current_offset();
}
static Symbol* begin = NULL;
static Symbol* end = NULL;
static Symbol* commit = NULL;
static Symbol* isEnabled = NULL;
static Symbol* shouldCommit = NULL;
static Symbol* void_method_sig = NULL;
static Symbol* boolean_method_sig = NULL;
static Symbol* begin = nullptr;
static Symbol* end = nullptr;
static Symbol* commit = nullptr;
static Symbol* isEnabled = nullptr;
static Symbol* shouldCommit = nullptr;
static Symbol* void_method_sig = nullptr;
static Symbol* boolean_method_sig = nullptr;
static void initialize_symbols() {
if (begin == NULL) {
if (begin == nullptr) {
begin = SymbolTable::probe("begin", 5);
assert(begin != NULL, "invariant");
assert(begin != nullptr, "invariant");
end = SymbolTable::probe("end", 3);
assert(end != NULL, "invariant");
assert(end != nullptr, "invariant");
commit = SymbolTable::probe("commit", 6);
assert(commit != NULL, "invariant");
assert(commit != nullptr, "invariant");
isEnabled = SymbolTable::probe("isEnabled", 9);
assert(isEnabled != NULL, "invariant");
assert(isEnabled != nullptr, "invariant");
shouldCommit = SymbolTable::probe("shouldCommit", 12);
assert(shouldCommit != NULL, "invariant");
assert(shouldCommit != nullptr, "invariant");
void_method_sig = SymbolTable::probe("()V", 3);
assert(void_method_sig != NULL, "invariant");
assert(void_method_sig != nullptr, "invariant");
boolean_method_sig = SymbolTable::probe("()Z", 3);
assert(boolean_method_sig != NULL, "invariant");
assert(boolean_method_sig != nullptr, "invariant");
}
}
@ -1151,14 +1151,14 @@ static ClassFileStream* schema_extend_event_klass_bytes(const InstanceKlass* ik,
initialize_symbols();
static const u2 public_final_flag_mask = JVM_ACC_PUBLIC | JVM_ACC_FINAL;
const ClassFileStream* const orig_stream = parser.clone_stream();
assert(orig_stream != NULL, "invariant");
assert(orig_stream != nullptr, "invariant");
const int orig_stream_length = orig_stream->length();
// allocate an identically sized buffer
u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, orig_stream_length);
if (new_buffer == NULL) {
return NULL;
if (new_buffer == nullptr) {
return nullptr;
}
assert(new_buffer != NULL, "invariant");
assert(new_buffer != nullptr, "invariant");
// memcpy the entire [B
memcpy(new_buffer, orig_stream->buffer(), orig_stream_length);
const u2 orig_cp_len = position_stream_after_cp(orig_stream);
@ -1198,7 +1198,7 @@ static ClassFileStream* schema_extend_event_klass_bytes(const InstanceKlass* ik,
orig_stream->skip_u1_fast(attrib_len);
}
}
return new ClassFileStream(new_buffer, orig_stream_length, NULL, ClassFileStream::verify);
return new ClassFileStream(new_buffer, orig_stream_length, nullptr, ClassFileStream::verify);
}
// Attempt to locate an existing UTF8_INFO mapping the utf8_constant.
@ -1209,7 +1209,7 @@ static u2 find_or_add_utf8_info(JfrBigEndianWriter& writer,
u2 orig_cp_len,
u2& added_cp_entries,
TRAPS) {
assert(utf8_constant != NULL, "invariant");
assert(utf8_constant != nullptr, "invariant");
TempNewSymbol utf8_sym = SymbolTable::new_symbol(utf8_constant);
// lookup existing
const int utf8_orig_idx = utf8_info_index(ik, utf8_sym, THREAD);
@ -1240,7 +1240,7 @@ static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer,
bool register_klass,
bool untypedEventConfiguration,
TRAPS) {
assert(utf8_indexes != NULL, "invariant");
assert(utf8_indexes != nullptr, "invariant");
u2 added_cp_entries = 0;
// resolve all required symbols
for (u2 index = 0; index < NOF_UTF8_REQ_SYMBOLS; ++index) {
@ -1270,21 +1270,21 @@ static u2 resolve_utf8_indexes(JfrBigEndianWriter& writer,
utf8_indexes[UTF8_OPT_CLASS_VOID_METHOD_DESC] = invalid_cp_index;
}
if (clinit_method != NULL && clinit_method->has_stackmap_table()) {
if (clinit_method != nullptr && clinit_method->has_stackmap_table()) {
utf8_indexes[UTF8_OPT_StackMapTable] =
find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_StackMapTable], orig_cp_len, added_cp_entries, THREAD);
} else {
utf8_indexes[UTF8_OPT_StackMapTable] = invalid_cp_index;
}
if (clinit_method != NULL && clinit_method->has_linenumber_table()) {
if (clinit_method != nullptr && clinit_method->has_linenumber_table()) {
utf8_indexes[UTF8_OPT_LineNumberTable] =
find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_LineNumberTable], orig_cp_len, added_cp_entries, THREAD);
} else {
utf8_indexes[UTF8_OPT_LineNumberTable] = invalid_cp_index;
}
if (clinit_method != NULL && clinit_method->has_localvariable_table()) {
if (clinit_method != nullptr && clinit_method->has_localvariable_table()) {
utf8_indexes[UTF8_OPT_LocalVariableTable] =
find_or_add_utf8_info(writer, ik, utf8_constants[UTF8_OPT_LocalVariableTable], orig_cp_len, added_cp_entries, THREAD);
utf8_indexes[UTF8_OPT_LocalVariableTypeTable] =
@ -1301,7 +1301,7 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik,
const ClassFileParser& parser,
jint& size_of_new_bytes,
TRAPS) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
// If the class already has a clinit method
// we need to take that into account
const Method* clinit_method = ik->class_initializer();
@ -1317,12 +1317,12 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik,
// to be used in building up a modified class [B.
const jint new_buffer_size = extra_stream_bytes + orig_stream_size;
u1* const new_buffer = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, u1, new_buffer_size);
if (new_buffer == NULL) {
if (new_buffer == nullptr) {
log_error(jfr, system) ("Thread local allocation (native) for " SIZE_FORMAT
" bytes failed in JfrEventClassTransformer::on_klass_creation", static_cast<size_t>(new_buffer_size));
return NULL;
return nullptr;
}
assert(new_buffer != NULL, "invariant");
assert(new_buffer != nullptr, "invariant");
// [B wrapped in a big endian writer
JfrBigEndianWriter writer(new_buffer, new_buffer_size);
assert(writer.current_offset() == 0, "invariant");
@ -1406,7 +1406,7 @@ static u1* schema_extend_event_subklass_bytes(const InstanceKlass* ik,
if (register_klass) {
insert_clinit_method(ik, parser, writer, orig_cp_len, utf8_indexes, flr_register_method_ref_index, clinit_method, THREAD);
}
number_of_new_methods += clinit_method != NULL ? 0 : register_klass ? 1 : 0;
number_of_new_methods += clinit_method != nullptr ? 0 : register_klass ? 1 : 0;
// Update classfile methods_count
writer.write_at_offset<u2>(orig_methods_len + number_of_new_methods, new_method_len_offset);
assert(writer.is_valid(), "invariant");
@ -1423,18 +1423,18 @@ static bool should_force_instrumentation() {
}
static void log_pending_exception(oop throwable) {
assert(throwable != NULL, "invariant");
assert(throwable != nullptr, "invariant");
oop msg = java_lang_Throwable::message(throwable);
if (msg != NULL) {
if (msg != nullptr) {
char* text = java_lang_String::as_utf8_string(msg);
if (text != NULL) {
if (text != nullptr) {
log_error(jfr, system) ("%s", text);
}
}
}
static bool has_pending_exception(TRAPS) {
assert(THREAD != NULL, "invariant");
assert(THREAD != nullptr, "invariant");
if (HAS_PENDING_EXCEPTION) {
log_pending_exception(PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
@ -1444,17 +1444,17 @@ static bool has_pending_exception(TRAPS) {
}
static bool has_local_method_implementation(const InstanceKlass* ik, const Symbol* name, const Symbol* signature) {
assert(ik != NULL, "invariant");
assert(name != NULL, "invariant");
assert(signature != NULL, "invariant");
return NULL != ik->find_local_method(name, signature, Klass::OverpassLookupMode::skip, Klass::StaticLookupMode::find,
assert(ik != nullptr, "invariant");
assert(name != nullptr, "invariant");
assert(signature != nullptr, "invariant");
return nullptr != ik->find_local_method(name, signature, Klass::OverpassLookupMode::skip, Klass::StaticLookupMode::find,
Klass::PrivateLookupMode::find);
}
// If for a subklass, on initial class load, an implementation exist for any of the final methods declared in Event,
// then constraints are considered breached.
static bool invalid_preconditions_for_subklass_on_initial_load(const InstanceKlass* ik) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
return has_local_method_implementation(ik, begin, void_method_sig) ||
has_local_method_implementation(ik, end, void_method_sig) ||
has_local_method_implementation(ik, commit, void_method_sig) ||
@ -1470,19 +1470,19 @@ static ClassFileStream* schema_extend_event_subklass_bytes(const InstanceKlass*
// Remove the tag denoting this as a jdk.jfr.Event subklass. No instrumentation, hence no events can be written.
// The class is allowed to load as-is, but it is classified as outside of the jfr system.
JdkJfrEvent::remove(ik);
return NULL;
return nullptr;
}
jint size_of_new_bytes = 0;
const u1* new_bytes = schema_extend_event_subklass_bytes(ik, parser, size_of_new_bytes, THREAD);
if (new_bytes == NULL) {
return NULL;
if (new_bytes == nullptr) {
return nullptr;
}
assert(new_bytes != NULL, "invariant");
assert(new_bytes != nullptr, "invariant");
assert(size_of_new_bytes > 0, "invariant");
const bool force_instrumentation = should_force_instrumentation();
if (Jfr::is_recording() || force_instrumentation) {
jint size_of_instrumented_bytes = 0;
unsigned char* instrumented_bytes = NULL;
unsigned char* instrumented_bytes = nullptr;
const jclass super = static_cast<jclass>(JfrJavaSupport::local_jni_handle(ik->super()->java_mirror(), THREAD));
const jboolean boot_class_loader = ik->class_loader_data()->is_boot_class_loader_data();
JfrUpcalls::new_bytes_eager_instrumentation(JfrTraceId::load_raw(ik),
@ -1496,15 +1496,15 @@ static ClassFileStream* schema_extend_event_subklass_bytes(const InstanceKlass*
THREAD);
JfrJavaSupport::destroy_local_jni_handle(super);
if (has_pending_exception(THREAD)) {
return NULL;
return nullptr;
}
assert(instrumented_bytes != NULL, "invariant");
assert(instrumented_bytes != nullptr, "invariant");
assert(size_of_instrumented_bytes > 0, "invariant");
new_bytes = instrumented_bytes;
size_of_new_bytes = size_of_instrumented_bytes;
is_instrumented = true;
}
return new ClassFileStream(new_bytes, size_of_new_bytes, NULL, ClassFileStream::verify);
return new ClassFileStream(new_bytes, size_of_new_bytes, nullptr, ClassFileStream::verify);
}
static bool _force_instrumentation = false;
@ -1518,14 +1518,14 @@ bool JfrEventClassTransformer::is_force_instrumentation() {
}
static ClassFileStream* retransform_bytes(const Klass* existing_klass, const ClassFileParser& parser, bool& is_instrumented, TRAPS) {
assert(existing_klass != NULL, "invariant");
assert(existing_klass != nullptr, "invariant");
assert(!is_instrumented, "invariant");
assert(JdkJfrEvent::is_a(existing_klass) || JdkJfrEvent::is_host(existing_klass), "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
jint size_of_new_bytes = 0;
unsigned char* new_bytes = NULL;
unsigned char* new_bytes = nullptr;
const ClassFileStream* const stream = parser.clone_stream();
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
const jclass clazz = static_cast<jclass>(JfrJavaSupport::local_jni_handle(existing_klass->java_mirror(), THREAD));
JfrUpcalls::on_retransform(JfrTraceId::load_raw(existing_klass),
clazz,
@ -1536,19 +1536,19 @@ static ClassFileStream* retransform_bytes(const Klass* existing_klass, const Cla
THREAD);
JfrJavaSupport::destroy_local_jni_handle(clazz);
if (has_pending_exception(THREAD)) {
return NULL;
return nullptr;
}
assert(new_bytes != NULL, "invariant");
assert(new_bytes != nullptr, "invariant");
assert(size_of_new_bytes > 0, "invariant");
is_instrumented = true;
return new ClassFileStream(new_bytes, size_of_new_bytes, NULL, ClassFileStream::verify);
return new ClassFileStream(new_bytes, size_of_new_bytes, nullptr, ClassFileStream::verify);
}
// On initial class load.
static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream* new_stream, const JavaThread* thread) {
assert(new_ik != NULL, "invariant");
assert(new_stream != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(new_ik != nullptr, "invariant");
assert(new_stream != nullptr, "invariant");
assert(thread != nullptr, "invariant");
assert(!thread->has_pending_exception(), "invariant");
if (!JfrOptionSet::allow_retransforms()) {
return;
@ -1556,7 +1556,7 @@ static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream*
const jint stream_len = new_stream->length();
JvmtiCachedClassFileData* p =
(JvmtiCachedClassFileData*)NEW_C_HEAP_ARRAY_RETURN_NULL(u1, offset_of(JvmtiCachedClassFileData, data) + stream_len, mtInternal);
if (p == NULL) {
if (p == nullptr) {
log_error(jfr, system)("Allocation using C_HEAP_ARRAY for " SIZE_FORMAT " bytes failed in JfrEventClassTransformer::cache_class_file_data",
static_cast<size_t>(offset_of(JvmtiCachedClassFileData, data) + stream_len));
return;
@ -1568,12 +1568,12 @@ static void cache_class_file_data(InstanceKlass* new_ik, const ClassFileStream*
// On redefine / retransform, in case an agent modified the class, the original bytes are cached onto the scratch klass.
static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* new_ik, const ClassFileParser& parser, JavaThread* thread) {
assert(ik != NULL, "invariant");
assert(new_ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(new_ik != nullptr, "invariant");
JvmtiCachedClassFileData* const p = ik->get_cached_class_file();
if (p != NULL) {
if (p != nullptr) {
new_ik->set_cached_class_file(p);
ik->set_cached_class_file(NULL);
ik->set_cached_class_file(nullptr);
return;
}
// No cached classfile indicates that no agent modified the klass.
@ -1583,9 +1583,9 @@ static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* ne
}
static void rewrite_klass_pointer(InstanceKlass*& ik, InstanceKlass* new_ik, ClassFileParser& parser, const JavaThread* thread) {
assert(ik != NULL, "invariant");
assert(new_ik != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(new_ik != nullptr, "invariant");
assert(thread != nullptr, "invariant");
assert(IS_EVENT_OR_HOST_KLASS(new_ik), "invariant");
assert(TRACE_ID(ik) == TRACE_ID(new_ik), "invariant");
assert(!thread->has_pending_exception(), "invariant");
@ -1597,14 +1597,14 @@ static void rewrite_klass_pointer(InstanceKlass*& ik, InstanceKlass* new_ik, Cla
// If code size is 1, it is 0xb1, i.e. the return instruction.
static inline bool is_commit_method_instrumented(const Method* m) {
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
assert(m->name() == commit, "invariant");
assert(m->constMethod()->code_size() > 0, "invariant");
return m->constMethod()->code_size() > 1;
}
static bool bless_static_commit_method(const Array<Method*>* methods) {
assert(methods != NULL, "invariant");
assert(methods != nullptr, "invariant");
for (int i = 0; i < methods->length(); ++i) {
const Method* const m = methods->at(i);
// Method is of the form "static void UserEvent::commit(...)" and instrumented
@ -1617,7 +1617,7 @@ static bool bless_static_commit_method(const Array<Method*>* methods) {
}
static void bless_instance_commit_method(const Array<Method*>* methods) {
assert(methods != NULL, "invariant");
assert(methods != nullptr, "invariant");
for (int i = 0; i < methods->length(); ++i) {
const Method* const m = methods->at(i);
// Method is of the form "void UserEvent:commit()" and instrumented
@ -1634,10 +1634,10 @@ static void bless_instance_commit_method(const Array<Method*>* methods) {
// It is primarily the class file schema extended instance 'commit()V' method.
// Jdk events can also define a static commit method with an arbitrary signature.
static void bless_commit_method(const InstanceKlass* new_ik) {
assert(new_ik != NULL, "invariant");
assert(new_ik != nullptr, "invariant");
assert(JdkJfrEvent::is_subklass(new_ik), "invariant");
const Array<Method*>* const methods = new_ik->methods();
if (new_ik->class_loader() == NULL) {
if (new_ik->class_loader() == nullptr) {
// JDK events are allowed an additional commit method that is static.
// Search precedence must therefore inspect static methods first.
if (bless_static_commit_method(methods)) {
@ -1648,45 +1648,45 @@ static void bless_commit_method(const InstanceKlass* new_ik) {
}
static void copy_traceid(const InstanceKlass* ik, const InstanceKlass* new_ik) {
assert(ik != NULL, "invariant");
assert(new_ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(new_ik != nullptr, "invariant");
new_ik->set_trace_id(ik->trace_id());
assert(TRACE_ID(ik) == TRACE_ID(new_ik), "invariant");
}
static const Klass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) {
assert(ik != NULL, "invariant");
assert(state != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(state != nullptr, "invariant");
const GrowableArray<Klass*>* const redef_klasses = state->get_classes_being_redefined();
if (redef_klasses == NULL || redef_klasses->is_empty()) {
return NULL;
if (redef_klasses == nullptr || redef_klasses->is_empty()) {
return nullptr;
}
for (int i = 0; i < redef_klasses->length(); ++i) {
const Klass* const existing_klass = redef_klasses->at(i);
assert(existing_klass != NULL, "invariant");
assert(existing_klass != nullptr, "invariant");
if (ik->name() == existing_klass->name() && ik->class_loader_data() == existing_klass->class_loader_data()) {
// 'ik' is a scratch klass. Return the klass being redefined.
return existing_klass;
}
}
return NULL;
return nullptr;
}
// Redefining / retransforming?
static const Klass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread) {
assert(ik != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(thread != nullptr, "invariant");
JvmtiThreadState* const state = thread->jvmti_thread_state();
return state != NULL ? klass_being_redefined(ik, state) : NULL;
return state != nullptr ? klass_being_redefined(ik, state) : nullptr;
}
static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS) {
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
ResourceMark rm(THREAD);
ClassLoaderData* const cld = ik->class_loader_data();
Handle pd(THREAD, ik->protection_domain());
Symbol* const class_name = ik->name();
const char* const klass_name = class_name != NULL ? class_name->as_C_string() : "";
const char* const klass_name = class_name != nullptr ? class_name->as_C_string() : "";
ClassLoadInfo cl_info(pd);
ClassFileParser new_parser(stream,
class_name,
@ -1697,30 +1697,30 @@ static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStre
if (HAS_PENDING_EXCEPTION) {
log_pending_exception(PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
return NULL;
return nullptr;
}
const ClassInstanceInfo* cl_inst_info = cl_info.class_hidden_info_ptr();
InstanceKlass* const new_ik = new_parser.create_instance_klass(false, *cl_inst_info, THREAD);
if (HAS_PENDING_EXCEPTION) {
log_pending_exception(PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
return NULL;
return nullptr;
}
assert(new_ik != NULL, "invariant");
assert(new_ik->name() != NULL, "invariant");
assert(new_ik != nullptr, "invariant");
assert(new_ik->name() != nullptr, "invariant");
assert(strncmp(ik->name()->as_C_string(), new_ik->name()->as_C_string(), strlen(ik->name()->as_C_string())) == 0, "invariant");
return new_ik;
}
static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream* stream, bool is_initial_load, JavaThread* thread) {
if (stream == NULL) {
if (stream == nullptr) {
if (is_initial_load) {
log_error(jfr, system)("JfrEventClassTransformer: unable to create ClassFileStream for %s", ik->external_name());
}
return NULL;
return nullptr;
}
InstanceKlass* const new_ik = create_new_instance_klass(ik, stream, thread);
if (new_ik == NULL) {
if (new_ik == nullptr) {
if (is_initial_load) {
log_error(jfr, system)("JfrEventClassTransformer: unable to create InstanceKlass for %s", ik->external_name());
}
@ -1731,20 +1731,20 @@ static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream*
static void transform(InstanceKlass*& ik, ClassFileParser& parser, JavaThread* thread) {
assert(IS_EVENT_OR_HOST_KLASS(ik), "invariant");
bool is_instrumented = false;
ClassFileStream* stream = NULL;
ClassFileStream* stream = nullptr;
const Klass* const existing_klass = find_existing_klass(ik, thread);
if (existing_klass != NULL) {
if (existing_klass != nullptr) {
// There is already a klass defined, implying we are redefining / retransforming.
stream = retransform_bytes(existing_klass, parser, is_instrumented, thread);
} else {
// No existing klass, implying this is the initial load.
stream = JdkJfrEvent::is(ik) ? schema_extend_event_klass_bytes(ik, parser, thread) : schema_extend_event_subklass_bytes(ik, parser, is_instrumented, thread);
}
InstanceKlass* const new_ik = create_instance_klass(ik, stream, existing_klass == NULL, thread);
if (new_ik == NULL) {
InstanceKlass* const new_ik = create_instance_klass(ik, stream, existing_klass == nullptr, thread);
if (new_ik == nullptr) {
return;
}
if (existing_klass != NULL) {
if (existing_klass != nullptr) {
transfer_cached_class_file_data(ik, new_ik, parser, thread);
} else {
cache_class_file_data(new_ik, stream, thread);
@ -1762,7 +1762,7 @@ static void transform(InstanceKlass*& ik, ClassFileParser& parser, JavaThread* t
// instance of the passed in InstanceKlass. The original 'ik' will be set onto the passed parser,
// for destruction when the parser goes out of scope.
void JfrEventClassTransformer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(IS_EVENT_OR_HOST_KLASS(ik), "invariant");
if (ik->is_abstract() && !JdkJfrEvent::is(ik)) {
assert(JdkJfrEvent::is_subklass(ik), "invariant");
@ -1775,7 +1775,7 @@ void JfrEventClassTransformer::on_klass_creation(InstanceKlass*& ik, ClassFilePa
}
static bool is_static_commit_method_blessed(const Array<Method*>* methods) {
assert(methods != NULL, "invariant");
assert(methods != nullptr, "invariant");
for (int i = 0; i < methods->length(); ++i) {
const Method* const m = methods->at(i);
// Must be of form: static void UserEvent::commit(...)
@ -1787,7 +1787,7 @@ static bool is_static_commit_method_blessed(const Array<Method*>* methods) {
}
static bool is_instance_commit_method_blessed(const Array<Method*>* methods) {
assert(methods != NULL, "invariant");
assert(methods != nullptr, "invariant");
for (int i = 0; i < methods->length(); ++i) {
const Method* const m = methods->at(i);
// Must be of form: void UserEvent::commit()
@ -1799,10 +1799,10 @@ static bool is_instance_commit_method_blessed(const Array<Method*>* methods) {
}
bool JfrEventClassTransformer::is_instrumented(const InstanceKlass* ik) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(JdkJfrEvent::is_subklass(ik), "invariant");
const Array<Method*>* const methods = ik->methods();
if (ik->class_loader() == NULL) {
if (ik->class_loader() == nullptr) {
// JDK events are allowed an additional commit method that is static.
// Search precedence must therefore inspect static methods first.
if (is_static_commit_method_blessed(methods)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,17 +40,17 @@
#include "utilities/exceptions.hpp"
static const size_t ERROR_MSG_BUFFER_SIZE = 256;
static JfrJvmtiAgent* agent = NULL;
static jvmtiEnv* jfr_jvmti_env = NULL;
static JfrJvmtiAgent* agent = nullptr;
static jvmtiEnv* jfr_jvmti_env = nullptr;
static void check_jvmti_error(jvmtiEnv* jvmti, jvmtiError errnum, const char* str) {
if (errnum != JVMTI_ERROR_NONE) {
char* errnum_str = NULL;
char* errnum_str = nullptr;
jvmti->GetErrorName(errnum, &errnum_str);
log_error(jfr, system)("ERROR: JfrJvmtiAgent: " INT32_FORMAT " (%s): %s\n",
errnum,
NULL == errnum_str ? "Unknown" : errnum_str,
NULL == str ? "" : str);
nullptr == errnum_str ? "Unknown" : errnum_str,
nullptr == str ? "" : str);
}
}
@ -58,14 +58,14 @@ static bool set_event_notification_mode(jvmtiEventMode mode,
jvmtiEvent event,
jthread event_thread,
...) {
assert(jfr_jvmti_env != NULL, "invariant");
assert(jfr_jvmti_env != nullptr, "invariant");
const jvmtiError jvmti_ret_code = jfr_jvmti_env->SetEventNotificationMode(mode, event, event_thread);
check_jvmti_error(jfr_jvmti_env, jvmti_ret_code, "SetEventNotificationMode");
return jvmti_ret_code == JVMTI_ERROR_NONE;
}
static bool update_class_file_load_hook_event(jvmtiEventMode mode) {
return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, NULL);
return set_event_notification_mode(mode, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, nullptr);
}
// jvmti event callbacks require C linkage
@ -79,7 +79,7 @@ extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env,
const unsigned char* class_data,
jint* new_class_data_len,
unsigned char** new_class_data) {
if (class_being_redefined == NULL) {
if (class_being_redefined == nullptr) {
return;
}
JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env);
@ -100,7 +100,7 @@ static jclass* create_classes_array(jint classes_count, TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
ThreadInVMfromNative tvmfn(THREAD);
jclass* const classes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, jclass, classes_count);
if (NULL == classes) {
if (nullptr == classes) {
char error_buffer[ERROR_MSG_BUFFER_SIZE];
jio_snprintf(error_buffer, ERROR_MSG_BUFFER_SIZE,
"Thread local allocation (native) of " SIZE_FORMAT " bytes failed "
@ -119,7 +119,7 @@ static void log_and_throw(jvmtiError error, TRAPS) {
const char base_error_msg[] = "JfrJvmtiAgent::retransformClasses failed: ";
size_t length = sizeof base_error_msg; // includes terminating null
const char* const jvmti_error_name = JvmtiUtil::error_name(error);
assert(jvmti_error_name != NULL, "invariant");
assert(jvmti_error_name != nullptr, "invariant");
length += strlen(jvmti_error_name);
char* error_msg = NEW_RESOURCE_ARRAY(char, length);
jio_snprintf(error_msg, length, "%s%s", base_error_msg, jvmti_error_name);
@ -132,7 +132,7 @@ static void log_and_throw(jvmtiError error, TRAPS) {
}
static void check_exception_and_log(JNIEnv* env, TRAPS) {
assert(env != NULL, "invariant");
assert(env != nullptr, "invariant");
if (env->ExceptionOccurred()) {
// array index out of bound
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
@ -147,8 +147,8 @@ static bool is_valid_jvmti_phase() {
}
void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array, TRAPS) {
assert(env != NULL, "invariant");
assert(classes_array != NULL, "invariant");
assert(env != nullptr, "invariant");
assert(classes_array != nullptr, "invariant");
assert(is_valid_jvmti_phase(), "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(THREAD));
const jint classes_count = env->GetArrayLength(classes_array);
@ -157,7 +157,7 @@ void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array,
}
ResourceMark rm(THREAD);
jclass* const classes = create_classes_array(classes_count, CHECK);
assert(classes != NULL, "invariant");
assert(classes != nullptr, "invariant");
for (jint i = 0; i < classes_count; i++) {
jclass clz = (jclass)env->GetObjectArrayElement(classes_array, i);
check_exception_and_log(env, THREAD);
@ -182,7 +182,7 @@ void JfrJvmtiAgent::retransform_classes(JNIEnv* env, jobjectArray classes_array,
}
static bool register_callbacks(JavaThread* jt) {
assert(jfr_jvmti_env != NULL, "invariant");
assert(jfr_jvmti_env != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
jvmtiEventCallbacks callbacks;
/* Set callbacks */
@ -193,7 +193,7 @@ static bool register_callbacks(JavaThread* jt) {
}
static bool register_capabilities(JavaThread* jt) {
assert(jfr_jvmti_env != NULL, "invariant");
assert(jfr_jvmti_env != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
jvmtiCapabilities capabilities;
/* Add JVMTI capabilities */
@ -206,7 +206,7 @@ static bool register_capabilities(JavaThread* jt) {
}
static jint create_jvmti_env(JavaThread* jt) {
assert(jfr_jvmti_env == NULL, "invariant");
assert(jfr_jvmti_env == nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
extern struct JavaVM_ main_vm;
JavaVM* vm = &main_vm;
@ -214,7 +214,7 @@ static jint create_jvmti_env(JavaThread* jt) {
}
static bool unregister_callbacks(JavaThread* jt) {
assert(jfr_jvmti_env != NULL, "invariant");
assert(jfr_jvmti_env != nullptr, "invariant");
jvmtiEventCallbacks callbacks;
/* Set empty callbacks */
memset(&callbacks, 0, sizeof(callbacks));
@ -228,24 +228,24 @@ JfrJvmtiAgent::JfrJvmtiAgent() {}
JfrJvmtiAgent::~JfrJvmtiAgent() {
JavaThread* jt = JavaThread::current();
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
if (jfr_jvmti_env != NULL) {
if (jfr_jvmti_env != nullptr) {
ThreadToNativeFromVM transition(jt);
update_class_file_load_hook_event(JVMTI_DISABLE);
unregister_callbacks(jt);
jfr_jvmti_env->DisposeEnvironment();
jfr_jvmti_env = NULL;
jfr_jvmti_env = nullptr;
}
}
static bool initialize(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
ThreadToNativeFromVM transition(jt);
if (create_jvmti_env(jt) != JNI_OK) {
assert(jfr_jvmti_env == NULL, "invariant");
assert(jfr_jvmti_env == nullptr, "invariant");
return false;
}
assert(jfr_jvmti_env != NULL, "invariant");
assert(jfr_jvmti_env != nullptr, "invariant");
if (!register_capabilities(jt)) {
return false;
}
@ -265,27 +265,27 @@ static void log_and_throw_illegal_state_exception(TRAPS) {
}
bool JfrJvmtiAgent::create() {
assert(agent == NULL, "invariant");
assert(agent == nullptr, "invariant");
JavaThread* const jt = JavaThread::current();
if (!is_valid_jvmti_phase()) {
log_and_throw_illegal_state_exception(jt);
return false;
}
agent = new JfrJvmtiAgent();
if (agent == NULL) {
if (agent == nullptr) {
return false;
}
if (!initialize(jt)) {
delete agent;
agent = NULL;
agent = nullptr;
return false;
}
return true;
}
void JfrJvmtiAgent::destroy() {
if (agent != NULL) {
if (agent != nullptr) {
delete agent;
agent = NULL;
agent = nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,12 +39,12 @@ static bool is_large_value(const JavaValue& value) {
#endif // ASSERT
static Symbol* resolve(const char* str) {
assert(str != NULL, "invariant");
assert(str != nullptr, "invariant");
return SymbolTable::new_symbol(str);
}
static Klass* resolve(Symbol* k_sym, TRAPS) {
assert(k_sym != NULL, "invariant");
assert(k_sym != nullptr, "invariant");
return SystemDictionary::resolve_or_fail(k_sym, true, THREAD);
}
@ -54,7 +54,6 @@ JfrJavaArguments::Parameters::Parameters() : _storage_index(0), _java_stack_slot
}
void JfrJavaArguments::Parameters::push(const JavaValue& value) {
assert(_storage != NULL, "invariant");
assert(!is_large_value(value), "invariant");
assert(_storage_index < SIZE, "invariant");
_storage[_storage_index++] = value;
@ -62,7 +61,6 @@ void JfrJavaArguments::Parameters::push(const JavaValue& value) {
}
void JfrJavaArguments::Parameters::push_large(const JavaValue& value) {
assert(_storage != NULL, "invariant");
assert(is_large_value(value), "invariant");
assert(_storage_index < SIZE, "invariant");
_storage[_storage_index++] = value;
@ -70,8 +68,7 @@ void JfrJavaArguments::Parameters::push_large(const JavaValue& value) {
}
void JfrJavaArguments::Parameters::set_receiver(const oop receiver) {
assert(_storage != NULL, "invariant");
assert(receiver != NULL, "invariant");
assert(receiver != nullptr, "invariant");
JavaValue value(T_OBJECT);
value.set_oop(receiver);
_storage[0] = value;
@ -88,7 +85,6 @@ oop JfrJavaArguments::Parameters::receiver() const {
}
bool JfrJavaArguments::Parameters::has_receiver() const {
assert(_storage != NULL, "invariant");
assert(_storage_index >= 1, "invariant");
assert(_java_stack_slots >= 1, "invariant");
return _storage[0].get_type() == T_OBJECT;
@ -184,92 +180,92 @@ void JfrJavaArguments::Parameters::copy(JavaCallArguments& args, TRAPS) const {
}
}
JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(-1) {
assert(result != NULL, "invariant");
JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(nullptr), _name(nullptr), _signature(nullptr), _array_length(-1) {
assert(result != nullptr, "invariant");
}
JfrJavaArguments::JfrJavaArguments(JavaValue* result, const char* klass_name, const char* name, const char* signature, TRAPS) :
_result(result),
_klass(NULL),
_name(NULL),
_signature(NULL),
_klass(nullptr),
_name(nullptr),
_signature(nullptr),
_array_length(-1) {
assert(result != NULL, "invariant");
if (klass_name != NULL) {
assert(result != nullptr, "invariant");
if (klass_name != nullptr) {
set_klass(klass_name, CHECK);
}
if (name != NULL) {
if (name != nullptr) {
set_name(name);
}
if (signature != NULL) {
if (signature != nullptr) {
set_signature(signature);
}
}
JfrJavaArguments::JfrJavaArguments(JavaValue* result, const Klass* klass, const Symbol* name, const Symbol* signature) : _result(result),
_klass(NULL),
_name(NULL),
_signature(NULL),
_klass(nullptr),
_name(nullptr),
_signature(nullptr),
_array_length(-1) {
assert(result != NULL, "invariant");
if (klass != NULL) {
assert(result != nullptr, "invariant");
if (klass != nullptr) {
set_klass(klass);
}
if (name != NULL) {
if (name != nullptr) {
set_name(name);
}
if (signature != NULL) {
if (signature != nullptr) {
set_signature(signature);
}
}
Klass* JfrJavaArguments::klass() const {
assert(_klass != NULL, "invariant");
assert(_klass != nullptr, "invariant");
return const_cast<Klass*>(_klass);
}
void JfrJavaArguments::set_klass(const char* klass_name, TRAPS) {
assert(klass_name != NULL, "invariant");
assert(klass_name != nullptr, "invariant");
Symbol* const k_sym = resolve(klass_name);
assert(k_sym != NULL, "invariant");
assert(k_sym != nullptr, "invariant");
const Klass* const klass = resolve(k_sym, CHECK);
set_klass(klass);
}
void JfrJavaArguments::set_klass(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
_klass = klass;
}
Symbol* JfrJavaArguments::name() const {
assert(_name != NULL, "invariant");
assert(_name != nullptr, "invariant");
return const_cast<Symbol*>(_name);
}
void JfrJavaArguments::set_name(const char* name) {
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
const Symbol* const sym = resolve(name);
set_name(sym);
}
void JfrJavaArguments::set_name(const Symbol* name) {
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
_name = name;
}
Symbol* JfrJavaArguments::signature() const {
assert(_signature != NULL, "invariant");
assert(_signature != nullptr, "invariant");
return const_cast<Symbol*>(_signature);
}
void JfrJavaArguments::set_signature(const char* signature) {
assert(signature != NULL, "invariant");
assert(signature != nullptr, "invariant");
const Symbol* const sym = resolve(signature);
set_signature(sym);
}
void JfrJavaArguments::set_signature(const Symbol* signature) {
assert(signature != NULL, "invariant");
assert(signature != nullptr, "invariant");
_signature = signature;
}
@ -283,7 +279,7 @@ void JfrJavaArguments::set_array_length(int length) {
}
JavaValue* JfrJavaArguments::result() const {
assert(_result != NULL, "invariant");
assert(_result != nullptr, "invariant");
return const_cast<JavaValue*>(_result);
}
@ -348,7 +344,7 @@ void JfrJavaArguments::copy(JavaCallArguments& args, TRAPS) {
}
void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
@ -358,7 +354,7 @@ void JfrJavaCall::call_static(JfrJavaArguments* args, TRAPS) {
}
void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(args->has_receiver(), "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
ResourceMark rm(THREAD);
@ -369,7 +365,7 @@ void JfrJavaCall::call_special(JfrJavaArguments* args, TRAPS) {
}
void JfrJavaCall::call_virtual(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(args->has_receiver(), "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
ResourceMark rm(THREAD);

View File

@ -56,7 +56,7 @@
#ifdef ASSERT
static void check_java_thread_state(JavaThread* t, JavaThreadState state) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(t->is_Java_thread(), "invariant");
assert(t->thread_state() == state, "invariant");
}
@ -86,7 +86,7 @@ jobject JfrJavaSupport::local_jni_handle(const oop obj, JavaThread* t) {
jobject JfrJavaSupport::local_jni_handle(const jobject handle, JavaThread* t) {
DEBUG_ONLY(check_java_thread_in_vm(t));
const oop obj = JNIHandles::resolve(handle);
return obj == NULL ? NULL : local_jni_handle(obj, t);
return obj == nullptr ? nullptr : local_jni_handle(obj, t);
}
void JfrJavaSupport::destroy_local_jni_handle(jobject handle) {
@ -101,7 +101,7 @@ jobject JfrJavaSupport::global_jni_handle(const oop obj, JavaThread* t) {
jobject JfrJavaSupport::global_jni_handle(const jobject handle, JavaThread* t) {
const oop obj = JNIHandles::resolve(handle);
return obj == NULL ? NULL : global_jni_handle(obj, t);
return obj == nullptr ? nullptr : global_jni_handle(obj, t);
}
void JfrJavaSupport::destroy_global_jni_handle(jobject handle) {
@ -116,7 +116,7 @@ jweak JfrJavaSupport::global_weak_jni_handle(const oop obj, JavaThread* t) {
jweak JfrJavaSupport::global_weak_jni_handle(const jobject handle, JavaThread* t) {
const oop obj = JNIHandles::resolve(handle);
return obj == NULL ? NULL : global_weak_jni_handle(obj, t);
return obj == nullptr ? nullptr : global_weak_jni_handle(obj, t);
}
void JfrJavaSupport::destroy_global_weak_jni_handle(jweak handle) {
@ -147,7 +147,7 @@ void JfrJavaSupport::call_virtual(JfrJavaArguments* args, TRAPS) {
}
void JfrJavaSupport::notify_all(jobject object, TRAPS) {
assert(object != NULL, "invariant");
assert(object != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(THREAD));
HandleMark hm(THREAD);
Handle h_obj(THREAD, resolve_non_null(object));
@ -162,9 +162,9 @@ void JfrJavaSupport::notify_all(jobject object, TRAPS) {
* Object construction
*/
static void object_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, TRAPS) {
assert(args != NULL, "invariant");
assert(result != NULL, "invariant");
assert(klass != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(result != nullptr, "invariant");
assert(klass != nullptr, "invariant");
assert(klass->is_initialized(), "invariant");
HandleMark hm(THREAD);
@ -179,9 +179,9 @@ static void object_construction(JfrJavaArguments* args, JavaValue* result, Insta
}
static void array_construction(JfrJavaArguments* args, JavaValue* result, InstanceKlass* klass, int array_length, TRAPS) {
assert(args != NULL, "invariant");
assert(result != NULL, "invariant");
assert(klass != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(result != nullptr, "invariant");
assert(klass != nullptr, "invariant");
assert(klass->is_initialized(), "invariant");
Klass* const ak = klass->array_klass(THREAD);
@ -192,8 +192,8 @@ static void array_construction(JfrJavaArguments* args, JavaValue* result, Instan
}
static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) {
assert(args != NULL, "invariant");
assert(result != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(result != nullptr, "invariant");
assert(result->get_type() == T_OBJECT, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
@ -210,10 +210,10 @@ static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) {
}
static void handle_result(JavaValue* result, bool global_ref, JavaThread* t) {
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(t));
const oop result_oop = result->get_oop();
if (result_oop == NULL) {
if (result_oop == nullptr) {
return;
}
result->set_jobject(global_ref ?
@ -222,31 +222,31 @@ static void handle_result(JavaValue* result, bool global_ref, JavaThread* t) {
}
void JfrJavaSupport::new_object(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(THREAD));
create_object(args, args->result(), THREAD);
}
void JfrJavaSupport::new_object_local_ref(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(THREAD));
JavaValue* const result = args->result();
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
create_object(args, result, CHECK);
handle_result(result, false, THREAD);
}
void JfrJavaSupport::new_object_global_ref(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(THREAD));
JavaValue* const result = args->result();
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
create_object(args, result, CHECK);
handle_result(result, true, THREAD);
}
jstring JfrJavaSupport::new_string(const char* c_str, TRAPS) {
assert(c_str != NULL, "invariant");
assert(c_str != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(THREAD));
const oop result = java_lang_String::create_oop_from_str(c_str, THREAD);
return (jstring)local_jni_handle(result, THREAD);
@ -289,7 +289,7 @@ jobject JfrJavaSupport::new_java_lang_Long(jlong value, TRAPS) {
}
void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int index, JavaThread* t) {
assert(arr != NULL, "invariant");
assert(arr != nullptr, "invariant");
DEBUG_ONLY(check_java_thread_in_vm(t));
HandleMark hm(t);
objArrayHandle a(t, (objArrayOop)resolve_non_null(arr));
@ -301,38 +301,38 @@ void JfrJavaSupport::set_array_element(jobjectArray arr, jobject element, int in
*/
static void write_int_field(const Handle& h_oop, fieldDescriptor* fd, jint value) {
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
h_oop->int_field_put(fd->offset(), value);
}
static void write_float_field(const Handle& h_oop, fieldDescriptor* fd, jfloat value) {
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
h_oop->float_field_put(fd->offset(), value);
}
static void write_double_field(const Handle& h_oop, fieldDescriptor* fd, jdouble value) {
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
h_oop->double_field_put(fd->offset(), value);
}
static void write_long_field(const Handle& h_oop, fieldDescriptor* fd, jlong value) {
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
h_oop->long_field_put(fd->offset(), value);
}
static void write_oop_field(const Handle& h_oop, fieldDescriptor* fd, const oop value) {
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
h_oop->obj_field_put(fd->offset(), value);
}
static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop, fieldDescriptor* fd, bool static_field) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
assert(fd->offset() > 0, "invariant");
assert(args->length() >= 1, "invariant");
@ -367,9 +367,9 @@ static void write_specialized_field(JfrJavaArguments* args, const Handle& h_oop,
}
static void read_specialized_field(JavaValue* result, const Handle& h_oop, fieldDescriptor* fd) {
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
assert(h_oop.not_null(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
assert(fd->offset() > 0, "invariant");
switch(fd->field_type()) {
@ -402,18 +402,18 @@ static bool find_field(const InstanceKlass* ik,
fieldDescriptor* fd,
bool is_static = false,
bool allow_super = false) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
if (allow_super || is_static) {
return ik->find_field(name_symbol, signature_symbol, is_static, fd) != NULL;
return ik->find_field(name_symbol, signature_symbol, is_static, fd) != nullptr;
}
return ik->find_local_field(name_symbol, signature_symbol, fd);
}
static void lookup_field(JfrJavaArguments* args, const InstanceKlass* ik, fieldDescriptor* fd, bool static_field) {
assert(args != NULL, "invariant");
assert(ik != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(ik != nullptr, "invariant");
assert(ik->is_initialized(), "invariant");
assert(fd != NULL, "invariant");
assert(fd != nullptr, "invariant");
find_field(ik, args->name(), args->signature(), fd, static_field, true);
}
@ -431,8 +431,8 @@ static void read_field(JfrJavaArguments* args, JavaValue* result, Thread* thread
}
static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) {
assert(args != NULL, "invariant");
assert(result != NULL, "invariant");
assert(args != nullptr, "invariant");
assert(result != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
result->set_oop(nullptr); // Initialize result in case klass initialize throws.
InstanceKlass* const klass = static_cast<InstanceKlass*>(args->klass());
@ -441,7 +441,7 @@ static void read_field(JfrJavaArguments* args, JavaValue* result, TRAPS) {
}
static void write_field(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
InstanceKlass* const klass = static_cast<InstanceKlass*>(args->klass());
@ -458,20 +458,20 @@ static void write_field(JfrJavaArguments* args, TRAPS) {
}
void JfrJavaSupport::set_field(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
write_field(args, THREAD);
}
void JfrJavaSupport::get_field(JfrJavaArguments* args, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
read_field(args, args->result(), THREAD);
}
static void get_field_ref(JfrJavaArguments* args, bool local_ref, TRAPS) {
assert(args != NULL, "invariant");
assert(args != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
JavaValue* const result = args->result();
assert(result != NULL, "invariant");
assert(result != nullptr, "invariant");
assert(result->get_type() == T_OBJECT, "invariant");
read_field(args, result, CHECK);
const oop obj = result->get_oop();
@ -498,7 +498,7 @@ void JfrJavaSupport::get_field_global_ref(JfrJavaArguments* args, TRAPS) {
*/
Klass* JfrJavaSupport::klass(const jobject handle) {
const oop obj = resolve_non_null(handle);
assert(obj != NULL, "invariant");
assert(obj != nullptr, "invariant");
return obj->klass();
}
@ -508,13 +508,13 @@ static char* allocate_string(bool c_heap, int length, Thread* thread) {
}
const char* JfrJavaSupport::c_str(oop string, Thread* thread, bool c_heap /* false */) {
char* str = NULL;
char* str = nullptr;
const typeArrayOop value = java_lang_String::value(string);
if (value != NULL) {
if (value != nullptr) {
const int length = java_lang_String::utf8_length(string, value);
str = allocate_string(c_heap, length + 1, thread);
if (str == NULL) {
return NULL;
if (str == nullptr) {
return nullptr;
}
java_lang_String::as_utf8_string(string, value, str, length + 1);
}
@ -522,14 +522,14 @@ const char* JfrJavaSupport::c_str(oop string, Thread* thread, bool c_heap /* fal
}
const char* JfrJavaSupport::c_str(jstring string, Thread* thread, bool c_heap /* false */) {
return string != NULL ? c_str(resolve_non_null(string), thread, c_heap) : NULL;
return string != nullptr ? c_str(resolve_non_null(string), thread, c_heap) : nullptr;
}
/*
* Exceptions and errors
*/
static void create_and_throw(Symbol* name, const char* message, TRAPS) {
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
assert(!HAS_PENDING_EXCEPTION, "invariant");
THROW_MSG(name, message);
@ -608,7 +608,7 @@ void JfrJavaSupport::set_cause(jthrowable throwable, JavaThread* t) {
void JfrJavaSupport::uncaught_exception(jthrowable throwable, JavaThread* t) {
DEBUG_ONLY(check_java_thread_in_vm(t));
assert(throwable != NULL, "invariant");
assert(throwable != nullptr, "invariant");
set_cause(throwable, t);
}
@ -643,7 +643,7 @@ static bool is_jdk_jfr_module_in_readability_graph() {
}
static void print_module_resolution_error(outputStream* stream) {
assert(stream != NULL, "invariant");
assert(stream != nullptr, "invariant");
stream->print_cr("Module %s not found.", JDK_JFR_MODULE_NAME);
stream->print_cr("Flight Recorder can not be enabled.");
}
@ -654,7 +654,7 @@ bool JfrJavaSupport::is_jdk_jfr_module_available() {
bool JfrJavaSupport::is_jdk_jfr_module_available(outputStream* stream, TRAPS) {
if (!JfrJavaSupport::is_jdk_jfr_module_available()) {
if (stream != NULL) {
if (stream != nullptr) {
print_module_resolution_error(stream);
}
return false;
@ -664,9 +664,10 @@ bool JfrJavaSupport::is_jdk_jfr_module_available(outputStream* stream, TRAPS) {
typedef JfrOopTraceId<ThreadIdAccess> AccessThreadTraceId;
static JavaThread* get_native(ThreadsListHandle& tlh, jobject thread) {
JavaThread* native_thread = NULL;
(void)tlh.cv_internal_thread_to_JavaThread(thread, &native_thread, NULL);
JavaThread* native_thread = nullptr;
(void)tlh.cv_internal_thread_to_JavaThread(thread, &native_thread, nullptr);
return native_thread;
}
@ -777,7 +778,7 @@ bool JfrJavaSupport::is_excluded(Thread* thread) {
static const Klass* get_configuration_field_descriptor(const Handle& h_mirror, fieldDescriptor* descriptor, TRAPS) {
assert(h_mirror.not_null(), "invariant");
assert(descriptor != NULL, "invariant");
assert(descriptor != nullptr, "invariant");
Klass* const k = java_lang_Class::as_Klass(h_mirror());
assert(k->is_instance_klass(), "invariant");
InstanceKlass* const ik = InstanceKlass::cast(k);
@ -789,7 +790,7 @@ static const Klass* get_configuration_field_descriptor(const Handle& h_mirror, f
vmSymbols::jdk_jfr_internal_event_EventConfiguration_signature(),
true,
descriptor);
return typed_field_holder != NULL ? typed_field_holder : ik->find_field(vmSymbols::eventConfiguration_name(),
return typed_field_holder != nullptr ? typed_field_holder : ik->find_field(vmSymbols::eventConfiguration_name(),
vmSymbols::object_signature(), // untyped
true,
descriptor);
@ -802,13 +803,13 @@ jobject JfrJavaSupport::get_configuration(jobject clazz, TRAPS) {
assert(h_mirror.not_null(), "invariant");
fieldDescriptor configuration_field_descriptor;
const Klass* const field_holder = get_configuration_field_descriptor(h_mirror, &configuration_field_descriptor, THREAD);
if (field_holder == NULL) {
if (field_holder == nullptr) {
// The only reason should be that klass initialization failed.
return NULL;
return nullptr;
}
assert(java_lang_Class::as_Klass(h_mirror()) == field_holder, "invariant");
oop configuration_oop = h_mirror->obj_field(configuration_field_descriptor.offset());
return configuration_oop != NULL ? JfrJavaSupport::local_jni_handle(configuration_oop, THREAD) : NULL;
return configuration_oop != nullptr ? JfrJavaSupport::local_jni_handle(configuration_oop, THREAD) : nullptr;
}
bool JfrJavaSupport::set_configuration(jobject clazz, jobject configuration, TRAPS) {
@ -818,13 +819,13 @@ bool JfrJavaSupport::set_configuration(jobject clazz, jobject configuration, TRA
assert(h_mirror.not_null(), "invariant");
fieldDescriptor configuration_field_descriptor;
const Klass* const field_holder = get_configuration_field_descriptor(h_mirror, &configuration_field_descriptor, THREAD);
if (field_holder == NULL) {
if (field_holder == nullptr) {
// The only reason should be that klass initialization failed.
return false;
}
assert(java_lang_Class::as_Klass(h_mirror()) == field_holder, "invariant");
const oop configuration_oop = JNIHandles::resolve(configuration);
assert(configuration_oop != NULL, "invariant");
assert(configuration_oop != nullptr, "invariant");
h_mirror->obj_field_put(configuration_field_descriptor.offset(), configuration_oop);
return true;
}
@ -837,7 +838,7 @@ bool JfrJavaSupport::is_instrumented(jobject clazz, TRAPS) {
}
bool JfrJavaSupport::on_thread_start(Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(Thread::current() == t, "invariant");
if (!t->is_Java_thread()) {
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,7 @@ NO_TRANSITION(jstring, jfr_get_pid(JNIEnv* env, jobject jvm))
char pid_buf[32] = { 0 };
jio_snprintf(pid_buf, sizeof(pid_buf), "%d", os::current_process_id());
jstring pid_string = env->NewStringUTF(pid_buf);
return pid_string; // exception pending if NULL
return pid_string; // exception pending if null
NO_TRANSITION_END
NO_TRANSITION(jlong, jfr_elapsed_frequency(JNIEnv* env, jobject jvm))
@ -183,7 +183,7 @@ NO_TRANSITION(jboolean, jfr_should_rotate_disk(JNIEnv* env, jobject jvm))
NO_TRANSITION_END
NO_TRANSITION(jlong, jfr_get_type_id_from_string(JNIEnv * env, jobject jvm, jstring type))
const char* type_name = env->GetStringUTFChars(type, NULL);
const char* type_name = env->GetStringUTFChars(type, nullptr);
jlong id = JfrType::name_to_id(type_name);
env->ReleaseStringUTFChars(type, type_name);
return id;
@ -315,10 +315,10 @@ JVM_ENTRY_NO_ENV(void, jfr_set_repository_location(JNIEnv* env, jobject repo, js
JVM_END
NO_TRANSITION(void, jfr_set_dump_path(JNIEnv* env, jobject jvm, jstring dumppath))
if (dumppath == NULL) {
JfrEmergencyDump::set_dump_path(NULL);
if (dumppath == nullptr) {
JfrEmergencyDump::set_dump_path(nullptr);
} else {
const char* dump_path = env->GetStringUTFChars(dumppath, NULL);
const char* dump_path = env->GetStringUTFChars(dumppath, nullptr);
JfrEmergencyDump::set_dump_path(dump_path);
env->ReleaseStringUTFChars(dumppath, dump_path);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,9 +31,9 @@
#include "utilities/exceptions.hpp"
JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
assert(env != NULL, "invariant");
assert(env != nullptr, "invariant");
jclass jfr_clz = env->FindClass("jdk/jfr/internal/JVM");
if (jfr_clz != NULL) {
if (jfr_clz != nullptr) {
JNINativeMethod method[] = {
(char*)"beginRecording", (char*)"()V", (void*)jfr_begin_recording,
(char*)"isRecording", (char*)"()Z", (void*)jfr_is_recording,
@ -101,7 +101,7 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod);
if (env->RegisterNatives(jfr_clz, method, (jint)method_array_length) != JNI_OK) {
JavaThread* jt = JavaThread::thread_from_jni_environment(env);
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(jt->thread_state() == _thread_in_native, "invariant");
ThreadInVMfromNative transition(jt);
log_error(jfr, system)("RegisterNatives for JVM class failed!");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,13 +41,13 @@
#include "runtime/os.hpp"
#include "utilities/exceptions.hpp"
static Symbol* jvm_upcalls_class_sym = NULL;
static Symbol* on_retransform_method_sym = NULL;
static Symbol* on_retransform_signature_sym = NULL;
static Symbol* bytes_for_eager_instrumentation_sym = NULL;
static Symbol* bytes_for_eager_instrumentation_sig_sym = NULL;
static Symbol* unhide_internal_types_sym = NULL;
static Symbol* unhide_internal_types_sig_sym = NULL;
static Symbol* jvm_upcalls_class_sym = nullptr;
static Symbol* on_retransform_method_sym = nullptr;
static Symbol* on_retransform_signature_sym = nullptr;
static Symbol* bytes_for_eager_instrumentation_sym = nullptr;
static Symbol* bytes_for_eager_instrumentation_sig_sym = nullptr;
static Symbol* unhide_internal_types_sym = nullptr;
static Symbol* unhide_internal_types_sig_sym = nullptr;
static bool initialize(TRAPS) {
static bool initialized = false;
@ -60,7 +60,7 @@ static bool initialize(TRAPS) {
bytes_for_eager_instrumentation_sig_sym = SymbolTable::new_permanent_symbol("(JZZLjava/lang/Class;[B)[B");
unhide_internal_types_sym = SymbolTable::new_permanent_symbol("unhideInternalTypes");
unhide_internal_types_sig_sym = SymbolTable::new_permanent_symbol("()V");
initialized = unhide_internal_types_sig_sym != NULL;
initialized = unhide_internal_types_sig_sym != nullptr;
}
return initialized;
}
@ -77,7 +77,7 @@ static const typeArrayOop invoke(jlong trace_id,
TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL);
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
typeArrayOop old_byte_array = oopFactory::new_byteArray(class_data_len, CHECK_NULL);
memcpy(old_byte_array->byte_at_addr(0), class_data, class_data_len);
JavaValue result(T_OBJECT);
@ -91,11 +91,11 @@ static const typeArrayOop invoke(jlong trace_id,
if (HAS_PENDING_EXCEPTION) {
ResourceMark rm(THREAD);
log_error(jfr, system)("JfrUpcall failed for %s", method_sym->as_C_string());
return NULL;
return nullptr;
}
// The result should be a [B
const oop res = result.get_oop();
assert(res != NULL, "invariant");
assert(res != nullptr, "invariant");
assert(res->is_typeArray(), "invariant");
assert(TypeArrayKlass::cast(res->klass())->element_type() == T_BYTE, "invariant");
const typeArrayOop new_byte_array = typeArrayOop(res);
@ -120,10 +120,10 @@ void JfrUpcalls::on_retransform(jlong trace_id,
unsigned char** new_class_data,
TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
assert(class_being_redefined != NULL, "invariant");
assert(class_data != NULL, "invariant");
assert(new_class_data_len != NULL, "invariant");
assert(new_class_data != NULL, "invariant");
assert(class_being_redefined != nullptr, "invariant");
assert(class_data != nullptr, "invariant");
assert(new_class_data_len != nullptr, "invariant");
assert(new_class_data != nullptr, "invariant");
if (!JdkJfrEvent::is_visible(class_being_redefined)) {
return;
}
@ -139,13 +139,13 @@ void JfrUpcalls::on_retransform(jlong trace_id,
on_retransform_signature_sym,
new_bytes_length,
CHECK);
assert(new_byte_array != NULL, "invariant");
assert(new_byte_array != nullptr, "invariant");
assert(new_bytes_length > 0, "invariant");
unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length);
if (new_bytes == NULL) {
if (new_bytes == nullptr) {
log_error_and_throw_oom(new_bytes_length, THREAD); // unwinds
}
assert(new_bytes != NULL, "invariant");
assert(new_bytes != nullptr, "invariant");
memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length);
*new_class_data_len = new_bytes_length;
*new_class_data = new_bytes;
@ -161,10 +161,10 @@ void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id,
unsigned char** new_class_data,
TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
assert(super != NULL, "invariant");
assert(class_data != NULL, "invariant");
assert(new_class_data_len != NULL, "invariant");
assert(new_class_data != NULL, "invariant");
assert(super != nullptr, "invariant");
assert(class_data != nullptr, "invariant");
assert(new_class_data_len != nullptr, "invariant");
assert(new_class_data != nullptr, "invariant");
jint new_bytes_length = 0;
initialize(THREAD);
const typeArrayOop new_byte_array = invoke(trace_id,
@ -177,13 +177,13 @@ void JfrUpcalls::new_bytes_eager_instrumentation(jlong trace_id,
bytes_for_eager_instrumentation_sig_sym,
new_bytes_length,
CHECK);
assert(new_byte_array != NULL, "invariant");
assert(new_byte_array != nullptr, "invariant");
assert(new_bytes_length > 0, "invariant");
unsigned char* const new_bytes = NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(THREAD, unsigned char, new_bytes_length);
if (new_bytes == NULL) {
if (new_bytes == nullptr) {
log_error_and_throw_oom(new_bytes_length, THREAD); // this unwinds
}
assert(new_bytes != NULL, "invariant");
assert(new_bytes != nullptr, "invariant");
memcpy(new_bytes, new_byte_array->byte_at_addr(0), (size_t)new_bytes_length);
*new_class_data_len = new_bytes_length;
*new_class_data = new_bytes;
@ -193,7 +193,7 @@ bool JfrUpcalls::unhide_internal_types(TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
JavaValue result(T_VOID);
const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_false);
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
JfrJavaArguments args(&result, klass, unhide_internal_types_sym, unhide_internal_types_sig_sym);
JfrJavaSupport::call_static(&args, THREAD);
if (HAS_PENDING_EXCEPTION) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, JFRBitSet*
_edge_queue(edge_queue),
_edge_store(edge_store),
_mark_bits(mark_bits),
_current_parent(NULL),
_current_parent(nullptr),
_current_frontier_level(0),
_next_frontier_idx(0),
_prev_frontier_idx(0),
@ -106,7 +106,7 @@ void BFSClosure::process() {
void BFSClosure::process_root_set() {
for (size_t idx = _edge_queue->bottom(); idx < _edge_queue->top(); ++idx) {
const Edge* edge = _edge_queue->element_at(idx);
assert(edge->parent() == NULL, "invariant");
assert(edge->parent() == nullptr, "invariant");
process(edge->reference(), edge->pointee());
}
}
@ -123,7 +123,7 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
}
if (_use_dfs) {
assert(_current_parent != NULL, "invariant");
assert(_current_parent != nullptr, "invariant");
DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, _current_parent);
return;
}
@ -136,7 +136,7 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
}
// if we are processinig initial root set, don't add to queue
if (_current_parent != NULL) {
if (_current_parent != nullptr) {
_edge_queue->add(_current_parent, reference);
}
@ -147,10 +147,10 @@ void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
}
void BFSClosure::add_chain(UnifiedOopRef reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(pointee != nullptr, "invariant");
assert(pointee->mark().is_marked(), "invariant");
Edge leak_edge(_current_parent, reference);
_edge_store->put_chain(&leak_edge, _current_parent == NULL ? 1 : _current_frontier_level + 2);
_edge_store->put_chain(&leak_edge, _current_parent == nullptr ? 1 : _current_frontier_level + 2);
}
void BFSClosure::dfs_fallback() {
@ -159,7 +159,7 @@ void BFSClosure::dfs_fallback() {
_dfs_fallback_idx = _edge_queue->bottom();
while (!_edge_queue->is_empty()) {
const Edge* edge = _edge_queue->remove();
if (edge->pointee() != NULL) {
if (edge->pointee() != nullptr) {
DFSClosure::find_leaks_from_edge(_edge_store, _mark_bits, edge);
}
}
@ -203,34 +203,34 @@ bool BFSClosure::is_complete() const {
}
void BFSClosure::iterate(const Edge* parent) {
assert(parent != NULL, "invariant");
assert(parent != nullptr, "invariant");
const oop pointee = parent->pointee();
assert(pointee != NULL, "invariant");
assert(pointee != nullptr, "invariant");
_current_parent = parent;
pointee->oop_iterate(this);
}
void BFSClosure::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
if (pointee != nullptr) {
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void BFSClosure::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
if (pointee != nullptr) {
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void BFSClosure::do_root(UnifiedOopRef ref) {
assert(ref.dereference() != NULL, "pointee must not be null");
assert(ref.dereference() != nullptr, "pointee must not be null");
if (!_edge_queue->is_full()) {
_edge_queue->add(NULL, ref);
_edge_queue->add(nullptr, ref);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,9 +42,9 @@ UnifiedOopRef DFSClosure::_reference_stack[max_dfs_depth];
void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
JFRBitSet* mark_bits,
const Edge* start_edge) {
assert(edge_store != NULL, "invariant");
assert(mark_bits != NULL," invariant");
assert(start_edge != NULL, "invariant");
assert(edge_store != nullptr, "invariant");
assert(mark_bits != nullptr," invariant");
assert(start_edge != nullptr, "invariant");
// Depth-first search, starting from a BFS edge
DFSClosure dfs(edge_store, mark_bits, start_edge);
@ -53,11 +53,11 @@ void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
JFRBitSet* mark_bits) {
assert(edge_store != NULL, "invariant");
assert(mark_bits != NULL, "invariant");
assert(edge_store != nullptr, "invariant");
assert(mark_bits != nullptr, "invariant");
// Mark root set, to avoid going sideways
DFSClosure dfs(edge_store, mark_bits, NULL);
DFSClosure dfs(edge_store, mark_bits, nullptr);
dfs._max_depth = 1;
RootSetClosure<DFSClosure> rs(&dfs);
rs.process();
@ -74,7 +74,7 @@ DFSClosure::DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge*
}
void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(pointee != nullptr, "invariant");
assert(!reference.is_null(), "invariant");
if (GranularTimer::is_finished()) {
@ -123,28 +123,28 @@ void DFSClosure::add_chain() {
assert(array_length == idx + 1, "invariant");
// aggregate from breadth-first search
if (_start_edge != NULL) {
if (_start_edge != nullptr) {
chain[idx++] = *_start_edge;
} else {
chain[idx - 1] = Edge(NULL, chain[idx - 1].reference());
chain[idx - 1] = Edge(nullptr, chain[idx - 1].reference());
}
_edge_store->put_chain(chain, idx + (_start_edge != NULL ? _start_edge->distance_to_root() : 0));
_edge_store->put_chain(chain, idx + (_start_edge != nullptr ? _start_edge->distance_to_root() : 0));
}
void DFSClosure::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
if (pointee != nullptr) {
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void DFSClosure::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
if (pointee != nullptr) {
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
@ -152,6 +152,6 @@ void DFSClosure::do_oop(narrowOop* ref) {
void DFSClosure::do_root(UnifiedOopRef ref) {
assert(!ref.is_null(), "invariant");
const oop pointee = ref.dereference();
assert(pointee != NULL, "invariant");
assert(pointee != nullptr, "invariant");
closure_impl(ref, pointee);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,13 +34,13 @@ const oop Edge::pointee() const {
}
const oop Edge::reference_owner() const {
return is_root() ? (oop)NULL : _parent->pointee();
return is_root() ? (oop)nullptr : _parent->pointee();
}
size_t Edge::distance_to_root() const {
size_t depth = 0;
const Edge* current = _parent;
while (current != NULL) {
while (current != nullptr) {
depth++;
current = current->parent();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ class Edge {
return _parent;
}
bool is_root() const {
return _parent == NULL;
return _parent == nullptr;
}
const oop pointee() const;
const oop reference_owner() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,7 @@
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes) :
_vmm(NULL),
_vmm(nullptr),
_reservation_size_bytes(reservation_size_bytes),
_commit_block_size_bytes(commit_block_size_bytes),
_top_index(0),
@ -37,9 +37,9 @@ EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_byt
bool EdgeQueue::initialize() {
assert(_reservation_size_bytes >= _commit_block_size_bytes, "invariant");
assert(_vmm == NULL, "invariant");
assert(_vmm == nullptr, "invariant");
_vmm = new JfrVirtualMemory();
return _vmm != NULL && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge));
return _vmm != nullptr && _vmm->initialize(_reservation_size_bytes, _commit_block_size_bytes, sizeof(Edge));
}
EdgeQueue::~EdgeQueue() {
@ -51,7 +51,7 @@ void EdgeQueue::add(const Edge* parent, UnifiedOopRef ref) {
assert(!is_full(), "EdgeQueue is full. Check is_full before adding another Edge");
assert(!_vmm->is_full(), "invariant");
void* const allocation = _vmm->new_datum();
assert(allocation != NULL, "invariant");
assert(allocation != nullptr, "invariant");
new (allocation)Edge(parent, ref);
_top_index++;
assert(_vmm->count() == _top_index, "invariant");
@ -86,16 +86,16 @@ const Edge* EdgeQueue::element_at(size_t index) const {
}
size_t EdgeQueue::reserved_size() const {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
return _vmm->reserved_size();
}
size_t EdgeQueue::live_set() const {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
return _vmm->live_set();
}
size_t EdgeQueue::sizeof_edge() const {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
return _vmm->aligned_datum_size_bytes();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,60 +43,60 @@ bool EdgeStore::is_empty() const {
}
void EdgeStore::on_link(EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(++_edge_id_counter);
}
bool EdgeStore::on_equals(uintptr_t hash, const EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->hash() == hash, "invariant");
return true;
}
void EdgeStore::on_unlink(EdgeEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
// nothing
}
#ifdef ASSERT
bool EdgeStore::contains(UnifiedOopRef reference) const {
return get(reference) != NULL;
return get(reference) != nullptr;
}
#endif
StoredEdge* EdgeStore::get(UnifiedOopRef reference) const {
assert(!reference.is_null(), "invariant");
EdgeEntry* const entry = _edges->lookup_only(reference.addr<uintptr_t>());
return entry != NULL ? entry->literal_addr() : NULL;
return entry != nullptr ? entry->literal_addr() : nullptr;
}
StoredEdge* EdgeStore::put(UnifiedOopRef reference) {
assert(!reference.is_null(), "invariant");
const StoredEdge e(NULL, reference);
assert(NULL == _edges->lookup_only(reference.addr<uintptr_t>()), "invariant");
const StoredEdge e(nullptr, reference);
assert(nullptr == _edges->lookup_only(reference.addr<uintptr_t>()), "invariant");
EdgeEntry& entry = _edges->put(reference.addr<uintptr_t>(), e);
return entry.literal_addr();
}
traceid EdgeStore::get_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
EdgeEntry* const entry = _edges->lookup_only(edge->reference().addr<uintptr_t>());
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
return entry->id();
}
traceid EdgeStore::gc_root_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
const traceid gc_root_id = static_cast<const StoredEdge*>(edge)->gc_root_id();
if (gc_root_id != 0) {
return gc_root_id;
}
// not cached
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
const Edge* const root = EdgeUtils::root(*edge);
assert(root != NULL, "invariant");
assert(root->parent() == NULL, "invariant");
assert(root != nullptr, "invariant");
assert(root->parent() == nullptr, "invariant");
return get_id(root);
}
@ -105,15 +105,15 @@ static const Edge* get_skip_ancestor(const Edge** current, size_t distance_to_ro
assert(*skip_length == 0, "invariant");
*skip_length = distance_to_root - (EdgeUtils::root_context - 1);
const Edge* const target = EdgeUtils::ancestor(**current, *skip_length);
assert(target != NULL, "invariant");
assert(target != nullptr, "invariant");
assert(target->distance_to_root() + 1 == EdgeUtils::root_context, "invariant");
return target;
}
bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_t distance_to_root) {
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(*current != NULL, "invariant");
assert(*previous != nullptr, "invariant");
assert((*previous)->parent() == nullptr, "invariant");
assert(*current != nullptr, "invariant");
assert((*current)->distance_to_root() == distance_to_root, "invariant");
if (distance_to_root < EdgeUtils::root_context) {
@ -123,20 +123,20 @@ bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_
size_t skip_length = 0;
const Edge* const skip_ancestor = get_skip_ancestor(current, distance_to_root, &skip_length);
assert(skip_ancestor != NULL, "invariant");
assert(skip_ancestor != nullptr, "invariant");
(*previous)->set_skip_length(skip_length);
// lookup target
StoredEdge* stored_target = get(skip_ancestor->reference());
if (stored_target != NULL) {
if (stored_target != nullptr) {
(*previous)->set_parent(stored_target);
// linked to existing, complete
return true;
}
assert(stored_target == NULL, "invariant");
assert(stored_target == nullptr, "invariant");
stored_target = put(skip_ancestor->reference());
assert(stored_target != NULL, "invariant");
assert(stored_target != nullptr, "invariant");
(*previous)->set_parent(stored_target);
*previous = stored_target;
*current = skip_ancestor->parent();
@ -144,18 +144,18 @@ bool EdgeStore::put_skip_edge(StoredEdge** previous, const Edge** current, size_
}
static void link_edge(const StoredEdge* current_stored, StoredEdge** previous) {
assert(current_stored != NULL, "invariant");
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(current_stored != nullptr, "invariant");
assert(*previous != nullptr, "invariant");
assert((*previous)->parent() == nullptr, "invariant");
(*previous)->set_parent(current_stored);
}
static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t* distance) {
assert(edge != NULL, "invariant");
assert(distance != NULL, "invariant");
assert(edge != nullptr, "invariant");
assert(distance != nullptr, "invariant");
const StoredEdge* current = edge;
*distance = 1;
while (current != NULL && !current->is_skip_edge()) {
while (current != nullptr && !current->is_skip_edge()) {
++(*distance);
current = current->parent();
}
@ -163,11 +163,11 @@ static const StoredEdge* find_closest_skip_edge(const StoredEdge* edge, size_t*
}
void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, StoredEdge** previous, size_t previous_length) {
assert(current_stored != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(current_stored != nullptr, "invariant");
assert((*previous)->parent() == nullptr, "invariant");
size_t distance_to_skip_edge; // including the skip edge itself
const StoredEdge* const closest_skip_edge = find_closest_skip_edge(current_stored, &distance_to_skip_edge);
if (closest_skip_edge == NULL) {
if (closest_skip_edge == nullptr) {
// no found skip edge implies root
if (distance_to_skip_edge + previous_length <= EdgeUtils::max_ref_chain_depth) {
link_edge(current_stored, previous);
@ -188,33 +188,33 @@ void EdgeStore::link_with_existing_chain(const StoredEdge* current_stored, Store
}
StoredEdge* EdgeStore::link_new_edge(StoredEdge** previous, const Edge** current) {
assert(*previous != NULL, "invariant");
assert((*previous)->parent() == NULL, "invariant");
assert(*current != NULL, "invariant");
assert(*previous != nullptr, "invariant");
assert((*previous)->parent() == nullptr, "invariant");
assert(*current != nullptr, "invariant");
assert(!contains((*current)->reference()), "invariant");
StoredEdge* const stored_edge = put((*current)->reference());
assert(stored_edge != NULL, "invariant");
assert(stored_edge != nullptr, "invariant");
link_edge(stored_edge, previous);
return stored_edge;
}
bool EdgeStore::put_edges(StoredEdge** previous, const Edge** current, size_t limit) {
assert(*previous != NULL, "invariant");
assert(*current != NULL, "invariant");
assert(*previous != nullptr, "invariant");
assert(*current != nullptr, "invariant");
size_t depth = 1;
while (*current != NULL && depth < limit) {
while (*current != nullptr && depth < limit) {
StoredEdge* stored_edge = get((*current)->reference());
if (stored_edge != NULL) {
if (stored_edge != nullptr) {
link_with_existing_chain(stored_edge, previous, depth);
return true;
}
stored_edge = link_new_edge(previous, current);
assert((*previous)->parent() != NULL, "invariant");
assert((*previous)->parent() != nullptr, "invariant");
*previous = stored_edge;
*current = (*current)->parent();
++depth;
}
return NULL == *current;
return nullptr == *current;
}
static GrowableArray<const StoredEdge*>* _leak_context_edges = nullptr;
@ -222,7 +222,7 @@ static GrowableArray<const StoredEdge*>* _leak_context_edges = nullptr;
EdgeStore::EdgeStore() : _edges(new EdgeHashTable(this)) {}
EdgeStore::~EdgeStore() {
assert(_edges != NULL, "invariant");
assert(_edges != nullptr, "invariant");
delete _edges;
delete _leak_context_edges;
_leak_context_edges = nullptr;
@ -265,7 +265,7 @@ const StoredEdge* EdgeStore::get(const ObjectSample* sample) const {
static constexpr const int max_idx = right_n_bits(32 - markWord::lock_bits);
static void store_idx_precondition(oop sample_object, int idx) {
assert(sample_object != NULL, "invariant");
assert(sample_object != nullptr, "invariant");
assert(sample_object->mark().is_marked(), "invariant");
assert(idx > 0, "invariant");
assert(idx <= max_idx, "invariant");
@ -298,7 +298,7 @@ static void associate_with_candidate(const StoredEdge* leak_context_edge) {
}
StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
assert(!contains(edge->reference()), "invariant");
StoredEdge* const leak_context_edge = put(edge->reference());
associate_with_candidate(leak_context_edge);
@ -315,11 +315,11 @@ StoredEdge* EdgeStore::associate_leak_context_with_candidate(const Edge* edge) {
* The leak context edge is the edge adjacent to the leak candidate object, always an edge in the edge store.
*/
void EdgeStore::put_chain(const Edge* chain, size_t length) {
assert(chain != NULL, "invariant");
assert(chain != nullptr, "invariant");
assert(chain->distance_to_root() + 1 == length, "invariant");
StoredEdge* const leak_context_edge = associate_leak_context_with_candidate(chain);
assert(leak_context_edge != NULL, "invariant");
assert(leak_context_edge->parent() == NULL, "invariant");
assert(leak_context_edge != nullptr, "invariant");
assert(leak_context_edge->parent() == nullptr, "invariant");
if (1 == length) {
store_gc_root_id_in_leak_context_edge(leak_context_edge, leak_context_edge);
@ -327,13 +327,13 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) {
}
const Edge* current = chain->parent();
assert(current != NULL, "invariant");
assert(current != nullptr, "invariant");
StoredEdge* previous = leak_context_edge;
// a leak context is the sequence of (limited) edges reachable from the leak candidate
if (put_edges(&previous, &current, EdgeUtils::leak_context)) {
// complete
assert(previous != NULL, "invariant");
assert(previous != nullptr, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
return;
}
@ -345,9 +345,9 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) {
// connecting the leak context sequence with the root context sequence
if (put_skip_edge(&previous, &current, distance_to_root)) {
// complete
assert(previous != NULL, "invariant");
assert(previous != nullptr, "invariant");
assert(previous->is_skip_edge(), "invariant");
assert(previous->parent() != NULL, "invariant");
assert(previous->parent() != nullptr, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous->parent()));
return;
}
@ -356,13 +356,13 @@ void EdgeStore::put_chain(const Edge* chain, size_t length) {
// a root context is the sequence of (limited) edges reachable from the root
put_edges(&previous, &current, EdgeUtils::root_context);
assert(previous != NULL, "invariant");
assert(previous != nullptr, "invariant");
put_chain_epilogue(leak_context_edge, EdgeUtils::root(*previous));
}
void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* root) const {
assert(leak_context_edge != NULL, "invariant");
assert(root != NULL, "invariant");
assert(leak_context_edge != nullptr, "invariant");
assert(root != nullptr, "invariant");
store_gc_root_id_in_leak_context_edge(leak_context_edge, root);
assert(leak_context_edge->distance_to_root() + 1 <= EdgeUtils::max_ref_chain_depth, "invariant");
}
@ -370,10 +370,10 @@ void EdgeStore::put_chain_epilogue(StoredEdge* leak_context_edge, const Edge* ro
// To avoid another traversal to resolve the root edge id later,
// cache it in the immediate leak context edge for fast retrieval.
void EdgeStore::store_gc_root_id_in_leak_context_edge(StoredEdge* leak_context_edge, const Edge* root) const {
assert(leak_context_edge != NULL, "invariant");
assert(leak_context_edge != nullptr, "invariant");
assert(leak_context_edge->gc_root_id() == 0, "invariant");
assert(root != NULL, "invariant");
assert(root->parent() == NULL, "invariant");
assert(root != nullptr, "invariant");
assert(root->parent() == nullptr, "invariant");
assert(root->distance_to_root() == 0, "invariant");
const StoredEdge* const stored_root = static_cast<const StoredEdge*>(root);
traceid root_id = stored_root->gc_root_id();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,14 +36,14 @@
#include "runtime/handles.inline.hpp"
static bool is_static_field(const oop ref_owner, const InstanceKlass* ik, int offset) {
assert(ref_owner != NULL, "invariant");
assert(ik != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
assert(ik != nullptr, "invariant");
assert(ref_owner->klass() == ik, "invariant");
return ik->is_mirror_instance_klass() && offset >= InstanceMirrorKlass::cast(ik)->offset_of_static_fields();
}
static int field_offset(const Edge& edge, const oop ref_owner) {
assert(ref_owner != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
assert(!ref_owner->is_array(), "invariant");
assert(ref_owner->is_instance(), "invariant");
UnifiedOopRef reference = edge.reference();
@ -57,9 +57,9 @@ static int field_offset(const Edge& edge, const oop ref_owner) {
const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) {
assert(!edge.is_root(), "invariant");
assert(!EdgeUtils::is_array_element(edge), "invariant");
assert(modifiers != NULL, "invariant");
assert(modifiers != nullptr, "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
assert(ref_owner->klass()->is_instance_klass(), "invariant");
const InstanceKlass* ik = InstanceKlass::cast(ref_owner->klass());
const int offset = field_offset(edge, ref_owner);
@ -68,7 +68,7 @@ const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) {
assert(java_lang_Class::as_Klass(ref_owner)->is_instance_klass(), "invariant");
ik = InstanceKlass::cast(java_lang_Class::as_Klass(ref_owner));
}
while (ik != NULL) {
while (ik != nullptr) {
JavaFieldStream jfs(ik);
while (!jfs.done()) {
if (offset == jfs.offset()) {
@ -80,20 +80,20 @@ const Symbol* EdgeUtils::field_name(const Edge& edge, jshort* modifiers) {
ik = (const InstanceKlass*)ik->super();
}
*modifiers = 0;
return NULL;
return nullptr;
}
bool EdgeUtils::is_array_element(const Edge& edge) {
assert(!edge.is_root(), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
return ref_owner->is_objArray();
}
static int array_offset(const Edge& edge) {
assert(EdgeUtils::is_array_element(edge), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
UnifiedOopRef reference = edge.reference();
assert(!reference.is_null(), "invariant");
assert(ref_owner->is_array(), "invariant");
@ -110,7 +110,7 @@ int EdgeUtils::array_index(const Edge& edge) {
int EdgeUtils::array_size(const Edge& edge) {
assert(is_array_element(edge), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
assert(ref_owner != nullptr, "invariant");
assert(ref_owner->is_objArray(), "invariant");
return ((objArrayOop)ref_owner)->length();
}
@ -118,11 +118,11 @@ int EdgeUtils::array_size(const Edge& edge) {
const Edge* EdgeUtils::root(const Edge& edge) {
const Edge* current = &edge;
const Edge* parent = current->parent();
while (parent != NULL) {
while (parent != nullptr) {
current = parent;
parent = current->parent();
}
assert(current != NULL, "invariant");
assert(current != nullptr, "invariant");
return current;
}
@ -130,7 +130,7 @@ const Edge* EdgeUtils::ancestor(const Edge& edge, size_t distance) {
const Edge* current = &edge;
const Edge* parent = current->parent();
size_t seek = 0;
while (parent != NULL && seek != distance) {
while (parent != nullptr && seek != distance) {
seek++;
current = parent;
parent = parent->parent();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,7 +45,7 @@ class ObjectSampleMarker : public StackObj {
const markWord mark_word) : _obj(obj),
_mark_word(mark_word) {}
public:
ObjectSampleMarkWord() : _obj(NULL), _mark_word(markWord::zero()) {}
ObjectSampleMarkWord() : _obj(nullptr), _mark_word(markWord::zero()) {}
};
GrowableArray<ObjectSampleMarkWord>* _store;
@ -54,7 +54,7 @@ class ObjectSampleMarker : public StackObj {
ObjectSampleMarker() :
_store(new GrowableArray<ObjectSampleMarkWord>(16)) {}
~ObjectSampleMarker() {
assert(_store != NULL, "invariant");
assert(_store != nullptr, "invariant");
// restore the saved, original, markWord for sample objects
while (_store->is_nonempty()) {
ObjectSampleMarkWord sample_oop = _store->pop();
@ -64,7 +64,7 @@ class ObjectSampleMarker : public StackObj {
}
void mark(oop obj) {
assert(obj != NULL, "invariant");
assert(obj != nullptr, "invariant");
// save the original markWord
_store->push(ObjectSampleMarkWord(obj, obj->mark()));
// now we will set the mark word to "marked" in order to quickly

View File

@ -46,7 +46,7 @@ RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegat
template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
if (NativeAccess<>::oop_load(ref) != nullptr) {
_delegate->do_root(UnifiedOopRef::encode_in_native(ref));
@ -55,7 +55,7 @@ void RootSetClosure<Delegate>::do_oop(oop* ref) {
template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
if (!CompressedOops::is_null(*ref)) {
_delegate->do_root(UnifiedOopRef::encode_in_native(ref));
@ -72,7 +72,7 @@ public:
RawRootClosure(Delegate* delegate) : _delegate(delegate) {}
void do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
if (*ref != nullptr) {
_delegate->do_root(UnifiedOopRef::encode_as_raw(ref));
@ -80,7 +80,7 @@ public:
}
void do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
if (!CompressedOops::is_null(*ref)) {
_delegate->do_root(UnifiedOopRef::encode_as_raw(ref));
@ -99,7 +99,7 @@ void RootSetClosure<Delegate>::process() {
// We don't follow code blob oops, because they have misaligned oops.
RawRootClosure<Delegate> rrc(_delegate);
Threads::oops_do(&rrc, NULL);
Threads::oops_do(&rrc, nullptr);
}
template class RootSetClosure<BFSClosure>;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Datadog, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -53,7 +53,7 @@ EventEmitter::~EventEmitter() {
}
void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_all, bool skip_bfs) {
assert(sampler != NULL, "invariant");
assert(sampler != nullptr, "invariant");
ResourceMark rm;
EdgeStore edge_store;
if (cutoff_ticks <= 0) {
@ -71,8 +71,8 @@ void EventEmitter::emit(ObjectSampler* sampler, int64_t cutoff_ticks, bool emit_
size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge_store, bool emit_all) {
assert(_thread == Thread::current(), "invariant");
assert(_thread->jfr_thread_local() == _jfr_thread_local, "invariant");
assert(object_sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(object_sampler != nullptr, "invariant");
assert(edge_store != nullptr, "invariant");
const jlong last_sweep = emit_all ? max_jlong : ObjectSampler::last_sweep();
size_t count = 0;
@ -80,7 +80,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge
// First pass associates a live sample with its immediate edge
// in preparation for writing checkpoint information.
const ObjectSample* current = object_sampler->first();
while (current != NULL) {
while (current != nullptr) {
ObjectSample* prev = current->prev();
if (current->is_alive_and_older_than(last_sweep)) {
link_sample_with_edge(current, edge_store);
@ -96,7 +96,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge
// Now we are ready to write the events
const ObjectSample* current = object_sampler->first();
while (current != NULL) {
while (current != nullptr) {
ObjectSample* prev = current->prev();
if (current->is_alive_and_older_than(last_sweep)) {
write_event(current, edge_store);
@ -108,7 +108,7 @@ size_t EventEmitter::write_events(ObjectSampler* object_sampler, EdgeStore* edge
}
static int array_size(const oop object) {
assert(object != NULL, "invariant");
assert(object != nullptr, "invariant");
if (object->is_array()) {
return arrayOop(object)->length();
}
@ -116,9 +116,9 @@ static int array_size(const oop object) {
}
void EventEmitter::link_sample_with_edge(const ObjectSample* sample, EdgeStore* edge_store) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
assert(!sample->is_dead(), "invariant");
assert(edge_store != NULL, "invariant");
assert(edge_store != nullptr, "invariant");
if (SafepointSynchronize::is_at_safepoint()) {
if (edge_store->has_leak_context(sample)) {
// Associated with an edge (chain) already during heap traversal.
@ -132,13 +132,13 @@ void EventEmitter::link_sample_with_edge(const ObjectSample* sample, EdgeStore*
}
void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
assert(!sample->is_dead(), "invariant");
assert(edge_store != NULL, "invariant");
assert(_jfr_thread_local != NULL, "invariant");
assert(edge_store != nullptr, "invariant");
assert(_jfr_thread_local != nullptr, "invariant");
const StoredEdge* const edge = edge_store->get(sample);
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
assert(edge->pointee() == sample->object(), "invariant");
const traceid object_id = edge_store->get_id(edge);
assert(object_id != 0, "invariant");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,7 @@ static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
return new (mtTracing) GrowableArray<T>(size, mtTracing);
}
static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
static GrowableArray<traceid>* unloaded_thread_id_set = nullptr;
class ThreadIdExclusiveAccess : public StackObj {
private:
@ -69,7 +69,7 @@ Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1);
static bool has_thread_exited(traceid tid) {
assert(tid != 0, "invariant");
if (unloaded_thread_id_set == NULL) {
if (unloaded_thread_id_set == nullptr) {
return false;
}
ThreadIdExclusiveAccess lock;
@ -78,7 +78,7 @@ static bool has_thread_exited(traceid tid) {
static void add_to_unloaded_thread_set(traceid tid) {
ThreadIdExclusiveAccess lock;
if (unloaded_thread_id_set == NULL) {
if (unloaded_thread_id_set == nullptr) {
unloaded_thread_id_set = c_heap_allocate_array<traceid>();
}
JfrMutablePredicate<traceid, compare_traceid>::test(unloaded_thread_id_set, tid);
@ -93,16 +93,16 @@ void ObjectSampleCheckpoint::on_thread_exit(traceid tid) {
void ObjectSampleCheckpoint::clear() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
if (unloaded_thread_id_set != NULL) {
if (unloaded_thread_id_set != nullptr) {
delete unloaded_thread_id_set;
unloaded_thread_id_set = NULL;
unloaded_thread_id_set = nullptr;
}
assert(unloaded_thread_id_set == NULL, "invariant");
assert(unloaded_thread_id_set == nullptr, "invariant");
}
template <typename Processor>
static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
while (sample != end) {
processor.sample_do(sample);
sample = sample->next();
@ -112,10 +112,10 @@ static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor&
template <typename Processor>
static void iterate_samples(Processor& processor, bool all = false) {
ObjectSampler* const sampler = ObjectSampler::sampler();
assert(sampler != NULL, "invariant");
assert(sampler != nullptr, "invariant");
ObjectSample* const last = sampler->last();
assert(last != NULL, "invariant");
do_samples(last, all ? NULL : sampler->last_resolved(), processor);
assert(last != nullptr, "invariant");
do_samples(last, all ? nullptr : sampler->last_resolved(), processor);
}
class SampleMarker {
@ -137,8 +137,8 @@ class SampleMarker {
};
int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) {
assert(sampler != NULL, "invariant");
if (sampler->last() == NULL) {
assert(sampler != nullptr, "invariant");
if (sampler->last() == nullptr) {
return 0;
}
SampleMarker sample_marker(marker, emit_all ? max_jlong : ObjectSampler::last_sweep());
@ -163,45 +163,45 @@ class BlobCache {
};
JfrBlobHandle BlobCache::get(const ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
_lookup_id = sample->stack_trace_id();
assert(_lookup_id != 0, "invariant");
BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash());
return entry != NULL ? entry->literal() : JfrBlobHandle();
return entry != nullptr ? entry->literal() : JfrBlobHandle();
}
void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) {
assert(sample != NULL, "invariant");
assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant");
assert(sample != nullptr, "invariant");
assert(_table.lookup_only(sample->stack_trace_hash()) == nullptr, "invariant");
_lookup_id = sample->stack_trace_id();
assert(_lookup_id != 0, "invariant");
_table.put(sample->stack_trace_hash(), blob);
}
inline void BlobCache::on_link(const BlobEntry* entry) const {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(_lookup_id);
}
inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->hash() == hash, "invariant");
return entry->id() == _lookup_id;
}
inline void BlobCache::on_unlink(BlobEntry* entry) const {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
}
static GrowableArray<traceid>* id_set = NULL;
static GrowableArray<traceid>* id_set = nullptr;
static void prepare_for_resolution() {
id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size());
}
static bool stack_trace_precondition(const ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
return sample->has_stack_trace_id() && !sample->is_dead();
}
@ -227,7 +227,7 @@ class StackTraceBlobInstaller {
#ifdef ASSERT
static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
assert(!sample->has_stacktrace(), "invariant");
assert(stack_trace != NULL, "invariant");
assert(stack_trace != nullptr, "invariant");
assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant");
assert(stack_trace->id() == sample->stack_trace_id(), "invariant");
}
@ -255,7 +255,7 @@ void StackTraceBlobInstaller::install(ObjectSample* sample) {
}
static void install_stack_traces(const ObjectSampler* sampler) {
assert(sampler != NULL, "invariant");
assert(sampler != nullptr, "invariant");
const ObjectSample* const last = sampler->last();
if (last != sampler->last_resolved()) {
ResourceMark rm;
@ -266,7 +266,7 @@ static void install_stack_traces(const ObjectSampler* sampler) {
}
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
assert(sampler != NULL, "invariant");
assert(sampler != nullptr, "invariant");
assert(LeakProfiler::is_running(), "invariant");
JavaThread* const thread = JavaThread::current();
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
@ -284,23 +284,23 @@ static bool is_klass_unloaded(traceid klass_id) {
static bool is_processed(traceid method_id) {
assert(method_id != 0, "invariant");
assert(id_set != NULL, "invariant");
assert(id_set != nullptr, "invariant");
return JfrMutablePredicate<traceid, compare_traceid>::test(id_set, method_id);
}
void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
if (is_processed(method_id) || is_klass_unloaded(JfrMethodLookup::klass_id(method_id))) {
return;
}
const Method* const method = JfrMethodLookup::lookup(ik, method_id);
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
assert(method->method_holder() == ik, "invariant");
JfrTraceId::load_leakp(ik, method);
}
void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
assert(trace != NULL, "invariant");
assert(trace != nullptr, "invariant");
// JfrStackTrace
writer.write(trace->id());
writer.write((u1)!trace->_reached_root);
@ -341,7 +341,7 @@ static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWrite
}
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
write_stacktrace_blob(sample, writer, reset);
write_thread_blob(sample, writer, reset);
write_type_set_blob(sample, writer, reset);
@ -378,9 +378,9 @@ static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thre
}
void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
assert(sampler != NULL, "invariant");
assert(edge_store != NULL, "invariant");
assert(thread != NULL, "invariant");
assert(sampler != nullptr, "invariant");
assert(edge_store != nullptr, "invariant");
assert(thread != nullptr, "invariant");
write_sample_blobs(sampler, emit_all, thread);
// write reference chains
if (!edge_store->is_empty()) {
@ -430,7 +430,7 @@ void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
assert(LeakProfiler::is_running(), "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
const ObjectSample* last = ObjectSampler::sampler()->last();
if (writer.has_data() && last != NULL) {
if (writer.has_data() && last != nullptr) {
save_type_set_blob(writer);
install_type_set_blobs();
ObjectSampler::sampler()->set_last_resolved(last);
@ -440,7 +440,7 @@ void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(LeakProfiler::is_running(), "invariant");
if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
if (writer.has_data() && ObjectSampler::sampler()->last() != nullptr) {
save_type_set_blob(writer, true);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
#include "runtime/javaThread.hpp"
#include "utilities/ostream.hpp"
static Symbol* symbol_size = NULL;
static Symbol* symbol_size = nullptr;
ObjectDescriptionBuilder::ObjectDescriptionBuilder() {
reset();
@ -78,11 +78,11 @@ void ObjectDescriptionBuilder::print_description(outputStream* out) {
const char* ObjectDescriptionBuilder::description() {
if (_buffer[0] == '\0') {
return NULL;
return nullptr;
}
const size_t len = strlen(_buffer);
char* copy = NEW_RESOURCE_ARRAY(char, len + 1);
assert(copy != NULL, "invariant");
assert(copy != nullptr, "invariant");
strncpy(copy, _buffer, len + 1);
return copy;
}
@ -92,7 +92,7 @@ ObjectSampleDescription::ObjectSampleDescription(oop object) :
}
void ObjectSampleDescription::ensure_initialized() {
if (symbol_size == NULL) {
if (symbol_size == nullptr) {
symbol_size = SymbolTable::new_permanent_symbol("size");
}
}
@ -150,13 +150,13 @@ void ObjectSampleDescription::write_object_details() {
void ObjectSampleDescription::write_class_name() {
assert(_object->is_a(vmClasses::Class_klass()), "invariant");
const Klass* const k = java_lang_Class::as_Klass(_object);
if (k == NULL) {
if (k == nullptr) {
// might represent a primitive
const Klass* const ak = java_lang_Class::array_klass_acquire(_object);
// If ak is NULL, this is most likely a mirror associated with a
// If ak is null, this is most likely a mirror associated with a
// jvmti redefine/retransform scratch klass. We can't get any additional
// information from it.
if (ak != NULL) {
if (ak != nullptr) {
write_text(type2name(java_lang_Class::primitive_type(_object)));
}
return;
@ -168,7 +168,7 @@ void ObjectSampleDescription::write_class_name() {
return;
}
const Symbol* name = ik->name();
if (name != NULL) {
if (name != nullptr) {
write_text("Class Name: ");
write_text(name->as_klass_external_name());
}
@ -178,7 +178,7 @@ void ObjectSampleDescription::write_class_name() {
void ObjectSampleDescription::write_thread_group_name() {
assert(_object->is_a(vmClasses::ThreadGroup_klass()), "invariant");
const char* tg_name = java_lang_ThreadGroup::name(_object);
if (tg_name != NULL) {
if (tg_name != nullptr) {
write_text("Thread Group: ");
write_text(tg_name);
}
@ -187,9 +187,9 @@ void ObjectSampleDescription::write_thread_group_name() {
void ObjectSampleDescription::write_thread_name() {
assert(_object->is_a(vmClasses::Thread_klass()), "invariant");
oop name = java_lang_Thread::name(_object);
if (name != NULL) {
if (name != nullptr) {
char* p = java_lang_String::as_utf8_string(name);
if (p != NULL) {
if (p != nullptr) {
write_text("Thread Name: ");
write_text(p);
}
@ -208,7 +208,7 @@ bool ObjectSampleDescription::read_int_size(jint* result_size) {
Klass* klass = _object->klass();
if (klass->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(klass);
if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != NULL) {
if (ik->find_field(symbol_size, vmSymbols::int_signature(), false, &fd) != nullptr) {
jint size = _object->int_field(fd.offset());
*result_size = size;
return true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ class ObjectSampleFieldInfo : public ResourceObj {
public:
const Symbol* _field_name_symbol;
jshort _field_modifiers;
ObjectSampleFieldInfo() : _field_name_symbol(NULL), _field_modifiers(0) {}
ObjectSampleFieldInfo() : _field_name_symbol(nullptr), _field_modifiers(0) {}
};
class ObjectSampleRootDescriptionData {
@ -67,8 +67,8 @@ class ObjectSampleRootDescriptionData {
const char* _description;
OldObjectRoot::System _system;
OldObjectRoot::Type _type;
ObjectSampleRootDescriptionData() : _root_edge(NULL),
_description(NULL),
ObjectSampleRootDescriptionData() : _root_edge(nullptr),
_description(nullptr),
_system(OldObjectRoot::_system_undetermined),
_type(OldObjectRoot::_type_undetermined) {}
};
@ -94,26 +94,26 @@ class SampleSet : public ResourceObj {
private:
GrowableArray<Data>* _storage;
public:
SampleSet() : _storage(NULL) {}
SampleSet() : _storage(nullptr) {}
traceid store(Data data) {
assert(data != NULL, "invariant");
if (_storage == NULL) {
assert(data != nullptr, "invariant");
if (_storage == nullptr) {
_storage = new GrowableArray<Data>(initial_storage_size);
}
assert(_storage != NULL, "invariant");
assert(_storage != nullptr, "invariant");
assert(_storage->find(data) == -1, "invariant");
_storage->append(data);
return data->_id;
}
size_t size() const {
return _storage != NULL ? (size_t)_storage->length() : 0;
return _storage != nullptr ? (size_t)_storage->length() : 0;
}
template <typename Functor>
void iterate(Functor& functor) {
if (_storage != NULL) {
if (_storage != nullptr) {
for (int i = 0; i < _storage->length(); ++i) {
functor(_storage->at(i));
}
@ -147,30 +147,30 @@ class FieldTable : public ResourceObj {
const ObjectSampleFieldInfo* _lookup;
void on_link(FieldInfoEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
entry->set_id(++_field_id_counter);
}
bool on_equals(uintptr_t hash, const FieldInfoEntry* entry) {
assert(hash == entry->hash(), "invariant");
assert(_lookup != NULL, "invariant");
assert(_lookup != nullptr, "invariant");
return entry->literal()->_field_modifiers == _lookup->_field_modifiers;
}
void on_unlink(FieldInfoEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
// nothing
}
public:
FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {}
FieldTable() : _table(new FieldInfoTable(this)), _lookup(nullptr) {}
~FieldTable() {
assert(_table != NULL, "invariant");
assert(_table != nullptr, "invariant");
delete _table;
}
traceid store(const ObjectSampleFieldInfo* field_info) {
assert(field_info != NULL, "invariant");
assert(field_info != nullptr, "invariant");
_lookup = field_info;
const FieldInfoEntry& entry = _table->lookup_put(field_info->_field_name_symbol->identity_hash(), field_info);
return entry.id();
@ -193,18 +193,18 @@ typedef SampleSet<const ReferenceInfo*> RefInfo;
typedef SampleSet<const ObjectSampleArrayInfo*> ArrayInfo;
typedef SampleSet<const ObjectSampleRootDescriptionInfo*> RootDescriptionInfo;
static SampleInfo* sample_infos = NULL;
static RefInfo* ref_infos = NULL;
static ArrayInfo* array_infos = NULL;
static FieldTable* field_infos = NULL;
static RootDescriptionInfo* root_infos = NULL;
static SampleInfo* sample_infos = nullptr;
static RefInfo* ref_infos = nullptr;
static ArrayInfo* array_infos = nullptr;
static FieldTable* field_infos = nullptr;
static RootDescriptionInfo* root_infos = nullptr;
int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) {
assert(writer != NULL, "invariant");
assert(si != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(si != nullptr, "invariant");
const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si;
oop object = oosi->_data._object;
assert(object != NULL, "invariant");
assert(object != nullptr, "invariant");
writer->write(oosi->_id);
writer->write(cast_from_oop<u8>(object));
writer->write(const_cast<const Klass*>(object->klass()));
@ -218,15 +218,15 @@ typedef JfrTypeWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__>
typedef JfrTypeWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter;
static void write_sample_infos(JfrCheckpointWriter& writer) {
if (sample_infos != NULL) {
if (sample_infos != nullptr) {
SampleWriter sw(&writer);
sample_infos->iterate(sw);
}
}
int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) {
assert(writer != NULL, "invariant");
assert(ri != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(ri != nullptr, "invariant");
const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri;
writer->write(ref_info->_id);
writer->write(ref_info->_data._array_info_id);
@ -240,15 +240,15 @@ typedef JfrTypeWriterImplHost<const ReferenceInfo*, __write_reference_info__> Re
typedef JfrTypeWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter;
static void write_reference_infos(JfrCheckpointWriter& writer) {
if (ref_infos != NULL) {
if (ref_infos != nullptr) {
ReferenceWriter rw(&writer);
ref_infos->iterate(rw);
}
}
int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) {
assert(writer != NULL, "invariant");
assert(ai != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(ai != nullptr, "invariant");
const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai;
writer->write(osai->_id);
writer->write(osai->_data._array_size);
@ -260,13 +260,13 @@ static traceid get_array_info_id(const Edge& edge, traceid id) {
if (edge.is_root() || !EdgeUtils::is_array_element(edge)) {
return 0;
}
if (array_infos == NULL) {
if (array_infos == nullptr) {
array_infos = new ArrayInfo();
}
assert(array_infos != NULL, "invariant");
assert(array_infos != nullptr, "invariant");
ObjectSampleArrayInfo* const osai = new ObjectSampleArrayInfo();
assert(osai != NULL, "invariant");
assert(osai != nullptr, "invariant");
osai->_id = id;
osai->_data._array_size = EdgeUtils::array_size(edge);
osai->_data._array_index = EdgeUtils::array_index(edge);
@ -277,15 +277,15 @@ typedef JfrTypeWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__
typedef JfrTypeWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter;
static void write_array_infos(JfrCheckpointWriter& writer) {
if (array_infos != NULL) {
if (array_infos != nullptr) {
ArrayWriter aw(&writer);
array_infos->iterate(aw);
}
}
int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) {
assert(writer != NULL, "invariant");
assert(fi != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(fi != nullptr, "invariant");
const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi;
writer->write(field_info_entry->id());
const ObjectSampleFieldInfo* const osfi = field_info_entry->literal();
@ -301,15 +301,15 @@ static traceid get_field_info_id(const Edge& edge) {
assert(!EdgeUtils::is_array_element(edge), "invariant");
jshort field_modifiers;
const Symbol* const field_name_symbol = EdgeUtils::field_name(edge, &field_modifiers);
if (field_name_symbol == NULL) {
if (field_name_symbol == nullptr) {
return 0;
}
if (field_infos == NULL) {
if (field_infos == nullptr) {
field_infos = new FieldTable();
}
assert(field_infos != NULL, "invariant");
assert(field_infos != nullptr, "invariant");
ObjectSampleFieldInfo* const osfi = new ObjectSampleFieldInfo();
assert(osfi != NULL, "invariant");
assert(osfi != nullptr, "invariant");
osfi->_field_name_symbol = field_name_symbol;
osfi->_field_modifiers = field_modifiers;
return field_infos->store(osfi);
@ -319,17 +319,17 @@ typedef JfrTypeWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_i
typedef JfrTypeWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter;
static void write_field_infos(JfrCheckpointWriter& writer) {
if (field_infos != NULL) {
if (field_infos != nullptr) {
FieldWriter fw(&writer);
field_infos->iterate(fw);
}
}
static const char* description(const ObjectSampleRootDescriptionInfo* osdi) {
assert(osdi != NULL, "invariant");
assert(osdi != nullptr, "invariant");
if (osdi->_data._description == NULL) {
return NULL;
if (osdi->_data._description == nullptr) {
return nullptr;
}
ObjectDescriptionBuilder description;
@ -341,8 +341,8 @@ static const char* description(const ObjectSampleRootDescriptionInfo* osdi) {
}
int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) {
assert(writer != NULL, "invariant");
assert(di != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(di != nullptr, "invariant");
const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di;
writer->write(osdi->_id);
writer->write(description(osdi));
@ -353,10 +353,10 @@ int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di)
static traceid get_gc_root_description_info_id(const Edge& edge, traceid id) {
assert(edge.is_root(), "invariant");
if (root_infos == NULL) {
if (root_infos == nullptr) {
root_infos = new RootDescriptionInfo();
}
assert(root_infos != NULL, "invariant");
assert(root_infos != nullptr, "invariant");
ObjectSampleRootDescriptionInfo* const oodi = new ObjectSampleRootDescriptionInfo();
oodi->_id = id;
oodi->_data._root_edge = &edge;
@ -381,7 +381,7 @@ static int find_sorted(const RootCallbackInfo& callback_info,
const GrowableArray<const ObjectSampleRootDescriptionInfo*>* arr,
int length,
bool& found) {
assert(arr != NULL, "invariant");
assert(arr != nullptr, "invariant");
assert(length >= 0, "invariant");
assert(length <= arr->length(), "invariant");
@ -417,14 +417,14 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
}
bool in_set_address_range(const RootCallbackInfo& callback_info) const {
assert(callback_info._low == NULL, "invariant");
assert(callback_info._low == nullptr, "invariant");
const uintptr_t addr = (uintptr_t)callback_info._high;
return low() <= addr && high() >= addr;
}
int compare_to_range(const RootCallbackInfo& callback_info) const {
assert(callback_info._high != NULL, "invariant");
assert(callback_info._low != NULL, "invariant");
assert(callback_info._high != nullptr, "invariant");
assert(callback_info._low != nullptr, "invariant");
for (int i = 0; i < _unresolved_roots->length(); ++i) {
const uintptr_t ref_addr = _unresolved_roots->at(i)->_data._root_edge->reference().addr<uintptr_t>();
@ -436,7 +436,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
}
int exact(const RootCallbackInfo& callback_info) const {
assert(callback_info._high != NULL, "invariant");
assert(callback_info._high != nullptr, "invariant");
assert(in_set_address_range(callback_info), "invariant");
bool found;
@ -450,7 +450,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
ObjectSampleRootDescriptionInfo* const desc =
const_cast<ObjectSampleRootDescriptionInfo*>(_unresolved_roots->at(idx));
assert(desc != NULL, "invariant");
assert(desc != nullptr, "invariant");
assert((uintptr_t)callback_info._high == desc->_data._root_edge->reference().addr<uintptr_t>(), "invariant");
desc->_data._system = callback_info._system;
@ -458,7 +458,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
if (callback_info._system == OldObjectRoot::_threads) {
const JavaThread* jt = (const JavaThread*)callback_info._context;
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
desc->_data._description = jt->name();
}
@ -467,13 +467,13 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
}
public:
RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(NULL) {
assert(info != NULL, "invariant");
RootResolutionSet(RootDescriptionInfo* info) : _unresolved_roots(nullptr) {
assert(info != nullptr, "invariant");
// construct a sorted copy
const GrowableArray<const ObjectSampleRootDescriptionInfo*>& info_storage = info->storage();
const int length = info_storage.length();
_unresolved_roots = new GrowableArray<const ObjectSampleRootDescriptionInfo*>(length);
assert(_unresolved_roots != NULL, "invariant");
assert(_unresolved_roots != nullptr, "invariant");
for (int i = 0; i < length; ++i) {
_unresolved_roots->insert_sorted<_root_desc_compare_>(info_storage.at(i));
@ -481,14 +481,14 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
}
bool process(const RootCallbackInfo& callback_info) {
if (NULL == callback_info._low) {
if (nullptr == callback_info._low) {
if (in_set_address_range(callback_info)) {
const int idx = exact(callback_info);
return idx == -1 ? false : resolve_root(callback_info, idx);
}
return false;
}
assert(callback_info._low != NULL, "invariant");
assert(callback_info._low != nullptr, "invariant");
const int idx = compare_to_range(callback_info);
return idx == -1 ? false : resolve_root(callback_info, idx);
}
@ -505,7 +505,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
};
static void write_root_descriptors(JfrCheckpointWriter& writer) {
if (root_infos != NULL) {
if (root_infos != nullptr) {
// resolve roots
RootResolutionSet rrs(root_infos);
RootResolver::resolve(rrs);
@ -516,28 +516,28 @@ static void write_root_descriptors(JfrCheckpointWriter& writer) {
}
static void add_old_object_sample_info(const StoredEdge* current, traceid id) {
assert(current != NULL, "invariant");
if (sample_infos == NULL) {
assert(current != nullptr, "invariant");
if (sample_infos == nullptr) {
sample_infos = new SampleInfo();
}
assert(sample_infos != NULL, "invariant");
assert(sample_infos != nullptr, "invariant");
OldObjectSampleInfo* const oosi = new OldObjectSampleInfo();
assert(oosi != NULL, "invariant");
assert(oosi != nullptr, "invariant");
oosi->_id = id;
oosi->_data._object = current->pointee();
oosi->_data._reference_id = current->parent() == NULL ? 0 : id;
oosi->_data._reference_id = current->parent() == nullptr ? 0 : id;
sample_infos->store(oosi);
}
static void add_reference_info(const StoredEdge* current, traceid id, traceid parent_id) {
assert(current != NULL, "invariant");
if (ref_infos == NULL) {
assert(current != nullptr, "invariant");
if (ref_infos == nullptr) {
ref_infos = new RefInfo();
}
assert(ref_infos != NULL, "invariant");
assert(ref_infos != nullptr, "invariant");
ReferenceInfo* const ri = new ReferenceInfo();
assert(ri != NULL, "invariant");
assert(ri != nullptr, "invariant");
ri->_id = id;
ri->_data._array_info_id = current->is_skip_edge() ? 0 : get_array_info_id(*current, id);
@ -548,22 +548,22 @@ static void add_reference_info(const StoredEdge* current, traceid id, traceid pa
}
static bool is_gc_root(const StoredEdge* current) {
assert(current != NULL, "invariant");
return current->parent() == NULL && current->gc_root_id() != 0;
assert(current != nullptr, "invariant");
return current->parent() == nullptr && current->gc_root_id() != 0;
}
static traceid add_gc_root_info(const StoredEdge* root, traceid id) {
assert(root != NULL, "invariant");
assert(root != nullptr, "invariant");
assert(is_gc_root(root), "invariant");
return get_gc_root_description_info_id(*root, id);
}
void ObjectSampleWriter::write(const StoredEdge* edge) {
assert(edge != NULL, "invariant");
assert(edge != nullptr, "invariant");
const traceid id = _store->get_id(edge);
add_old_object_sample_info(edge, id);
const StoredEdge* const parent = edge->parent();
if (parent != NULL) {
if (parent != nullptr) {
add_reference_info(edge, id, _store->get_id(parent));
return;
}
@ -609,14 +609,14 @@ static void register_serializers() {
ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) :
_writer(writer),
_store(store) {
assert(store != NULL, "invariant");
assert(store != nullptr, "invariant");
assert(!store->is_empty(), "invariant");
register_serializers();
assert(field_infos == NULL, "Invariant");
assert(sample_infos == NULL, "Invariant");
assert(ref_infos == NULL, "Invariant");
assert(array_infos == NULL, "Invariant");
assert(root_infos == NULL, "Invariant");
assert(field_infos == nullptr, "Invariant");
assert(sample_infos == nullptr, "Invariant");
assert(ref_infos == nullptr, "Invariant");
assert(array_infos == nullptr, "Invariant");
assert(root_infos == nullptr, "Invariant");
}
ObjectSampleWriter::~ObjectSampleWriter() {
@ -627,14 +627,14 @@ ObjectSampleWriter::~ObjectSampleWriter() {
write_root_descriptors(_writer);
// Following are RA allocated, memory will be released automatically.
if (field_infos != NULL) {
if (field_infos != nullptr) {
field_infos->~FieldTable();
field_infos = NULL;
field_infos = nullptr;
}
sample_infos = NULL;
ref_infos = NULL;
array_infos = NULL;
root_infos = NULL;
sample_infos = nullptr;
ref_infos = nullptr;
array_infos = nullptr;
root_infos = nullptr;
}
bool ObjectSampleWriter::operator()(StoredEdge& e) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,8 +60,8 @@ class ReferenceLocateClosure : public OopClosure {
const void* context) : _callback(callback),
_info(),
_complete(false) {
_info._high = NULL;
_info._low = NULL;
_info._high = nullptr;
_info._low = nullptr;
_info._system = system;
_info._type = type;
_info._context = context;
@ -107,9 +107,9 @@ class ReferenceToRootClosure : public StackObj {
ReferenceToRootClosure(RootCallback& callback) : _callback(callback),
_info(),
_complete(false) {
_info._high = NULL;
_info._low = NULL;
_info._context = NULL;
_info._high = nullptr;
_info._low = nullptr;
_info._context = nullptr;
_info._system = OldObjectRoot::_system_undetermined;
_info._type = OldObjectRoot::_type_undetermined;
@ -124,7 +124,7 @@ class ReferenceToRootClosure : public StackObj {
bool ReferenceToRootClosure::do_cldg_roots() {
assert(!complete(), "invariant");
ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, NULL);
ReferenceLocateClosure rlc(_callback, OldObjectRoot::_class_loader_data, OldObjectRoot::_type_undetermined, nullptr);
CLDToOopClosure cldt_closure(&rlc, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
return rlc.complete();
@ -139,7 +139,7 @@ bool ReferenceToRootClosure::do_oop_storage_roots() {
OldObjectRoot::_global_jni_handle :
OldObjectRoot::_global_oop_handle;
OldObjectRoot::System system = OldObjectRoot::System(OldObjectRoot::_strong_oop_storage_set_first + Range().index(id));
ReferenceLocateClosure rlc(_callback, system, type, NULL);
ReferenceLocateClosure rlc(_callback, system, type, nullptr);
oop_storage->oops_do(&rlc);
if (rlc.complete()) {
return true;
@ -195,7 +195,7 @@ class ReferenceToThreadRootClosure : public StackObj {
};
bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_handle_area, jt);
jt->handle_area()->oops_do(&rcl);
@ -203,7 +203,7 @@ bool ReferenceToThreadRootClosure::do_thread_handle_area(JavaThread* jt) {
}
bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_local_jni_handle, jt);
@ -212,7 +212,7 @@ bool ReferenceToThreadRootClosure::do_thread_jni_handles(JavaThread* jt) {
}
bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!complete(), "invariant");
if (_callback.entries() == 0) {
@ -221,8 +221,8 @@ bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) {
}
RootCallbackInfo info;
info._high = NULL;
info._low = NULL;
info._high = nullptr;
info._low = nullptr;
info._context = jt;
info._system = OldObjectRoot::_threads;
info._type = OldObjectRoot::_stack_variable;
@ -242,7 +242,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) {
}
bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_stack_variable, jt);
@ -250,7 +250,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
if (jt->has_last_Java_frame()) {
// Traverse the monitor chunks
MonitorChunk* chunk = jt->monitor_chunks();
for (; chunk != NULL; chunk = chunk->next()) {
for (; chunk != nullptr; chunk = chunk->next()) {
chunk->oops_do(&rcl);
}
@ -260,7 +260,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
// Traverse the execution stack
for (StackFrameStream fst(jt, true /* update */, true /* process_frames */); !fst.is_done(); fst.next()) {
fst.current()->oops_do(&rcl, NULL, fst.register_map());
fst.current()->oops_do(&rcl, nullptr, fst.register_map());
}
} // last java frame
@ -270,7 +270,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
}
GrowableArrayView<jvmtiDeferredLocalVariableSet*>* const list = JvmtiDeferredUpdates::deferred_locals(jt);
if (list != NULL) {
if (list != nullptr) {
for (int i = 0; i < list->length(); i++) {
list->at(i)->oops_do(&rcl);
}
@ -290,24 +290,24 @@ bool ReferenceToThreadRootClosure::do_thread_stack_detailed(JavaThread* jt) {
*/
JvmtiThreadState* const jvmti_thread_state = jt->jvmti_thread_state();
if (jvmti_thread_state != NULL) {
jvmti_thread_state->oops_do(&rcl, NULL);
if (jvmti_thread_state != nullptr) {
jvmti_thread_state->oops_do(&rcl, nullptr);
}
return rcl.complete();
}
bool ReferenceToThreadRootClosure::do_java_threads_oops(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_threads, OldObjectRoot::_global_jni_handle, jt);
jt->oops_do(&rcl, NULL);
jt->oops_do(&rcl, nullptr);
return rcl.complete();
}
bool ReferenceToThreadRootClosure::do_thread_roots(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
if (do_thread_stack_fast(jt)) {
_complete = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,14 +84,14 @@ void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all, bool skip_bf
}
// exclusive access to object sampler instance
ObjectSampler* const sampler = ObjectSampler::acquire();
assert(sampler != NULL, "invariant");
assert(sampler != nullptr, "invariant");
EventEmitter::emit(sampler, cutoff_ticks, emit_all, skip_bfs);
ObjectSampler::release();
}
void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) {
assert(is_running(), "invariant");
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
assert(thread->thread_state() == _thread_in_vm, "invariant");
// exclude compiler threads

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ const oop ObjectSample::object() const {
}
bool ObjectSample::is_dead() const {
return _object.peek() == NULL;
return _object.peek() == nullptr;
}
const oop* ObjectSample::object_addr() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,8 +69,8 @@ class ObjectSample : public JfrCHeapObj {
void reset();
public:
ObjectSample() : _next(NULL),
_previous(NULL),
ObjectSample() : _next(nullptr),
_previous(nullptr),
_stacktrace(),
_thread(),
_type_set(),

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,13 +57,13 @@ static bool volatile _dead_samples = false;
// The OopStorage instance is used to hold weak references to sampled objects.
// It is constructed and registered during VM initialization. This is a singleton
// that persist independent of the state of the ObjectSampler.
static OopStorage* _oop_storage = NULL;
static OopStorage* _oop_storage = nullptr;
OopStorage* ObjectSampler::oop_storage() { return _oop_storage; }
// Callback invoked by the GC after an iteration over the oop storage
// that may have cleared dead referents. num_dead is the number of entries
// already NULL or cleared by the iteration.
// already nullptr or cleared by the iteration.
void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {
if (num_dead != 0) {
// The ObjectSampler instance may have already been cleaned or a new
@ -76,15 +76,15 @@ void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {
bool ObjectSampler::create_oop_storage() {
_oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing);
assert(_oop_storage != NULL, "invariant");
assert(_oop_storage != nullptr, "invariant");
_oop_storage->register_num_dead_callback(&oop_storage_gc_notification);
return true;
}
static ObjectSampler* _instance = NULL;
static ObjectSampler* _instance = nullptr;
static ObjectSampler& instance() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
return *_instance;
}
@ -100,22 +100,22 @@ ObjectSampler::ObjectSampler(size_t size) :
ObjectSampler::~ObjectSampler() {
delete _priority_queue;
_priority_queue = NULL;
_priority_queue = nullptr;
delete _list;
_list = NULL;
_list = nullptr;
}
bool ObjectSampler::create(size_t size) {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert(_oop_storage != NULL, "should be already created");
assert(_oop_storage != nullptr, "should be already created");
ObjectSampleCheckpoint::clear();
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new ObjectSampler(size);
return _instance != NULL;
return _instance != nullptr;
}
bool ObjectSampler::is_created() {
return _instance != NULL;
return _instance != nullptr;
}
ObjectSampler* ObjectSampler::sampler() {
@ -125,9 +125,9 @@ ObjectSampler* ObjectSampler::sampler() {
void ObjectSampler::destroy() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
if (_instance != NULL) {
if (_instance != nullptr) {
ObjectSampler* const sampler = _instance;
_instance = NULL;
_instance = nullptr;
delete sampler;
}
}
@ -145,13 +145,13 @@ void ObjectSampler::release() {
}
static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) {
assert(thread != NULL, "invariant");
assert(virtual_thread != NULL, "invariant");
if (thread->threadObj() == NULL) {
assert(thread != nullptr, "invariant");
assert(virtual_thread != nullptr, "invariant");
if (thread->threadObj() == nullptr) {
return 0;
}
const JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
assert(tl != nullptr, "invariant");
if (tl->is_excluded()) {
return 0;
}
@ -160,9 +160,9 @@ static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) {
}
static JfrBlobHandle get_thread_blob(JavaThread* thread, traceid tid, bool virtual_thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
assert(tl != nullptr, "invariant");
assert(!tl->is_excluded(), "invariant");
if (virtual_thread) {
// TODO: blob cache for virtual threads
@ -195,7 +195,7 @@ class RecordStackTrace {
};
void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
assert(is_created(), "invariant");
bool virtual_thread = false;
const traceid thread_id = get_thread_id(thread, &virtual_thread);
@ -215,9 +215,9 @@ void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread)
}
void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread) {
assert(obj != NULL, "invariant");
assert(obj != nullptr, "invariant");
assert(thread_id != 0, "invariant");
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
if (Atomic::load(&_dead_samples)) {
// There's a small race where a GC scan might reset this to true, potentially
@ -241,7 +241,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool
sample = _list->get();
}
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
sample->set_thread_id(thread_id);
if (virtual_thread) {
sample->set_thread_is_virtual();
@ -265,7 +265,7 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool
void ObjectSampler::scavenge() {
ObjectSample* current = _list->last();
while (current != NULL) {
while (current != nullptr) {
ObjectSample* next = current->next();
if (current->is_dead()) {
remove_dead(current);
@ -275,13 +275,13 @@ void ObjectSampler::scavenge() {
}
void ObjectSampler::remove_dead(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
assert(sample->is_dead(), "invariant");
sample->release();
ObjectSample* const previous = sample->prev();
// push span onto previous
if (previous != NULL) {
if (previous != nullptr) {
_priority_queue->remove(previous);
previous->add_span(sample->span());
_priority_queue->push(previous);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
SampleList::SampleList(size_t limit, size_t cache_size) :
_free_list(),
_in_use_list(),
_last_resolved(NULL),
_last_resolved(nullptr),
_allocated(0),
_limit(limit),
_cache_size(cache_size) {
@ -59,12 +59,12 @@ void SampleList::set_last_resolved(const ObjectSample* sample) {
}
void SampleList::link(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
_in_use_list.prepend(sample);
}
void SampleList::unlink(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
if (_last_resolved == sample) {
_last_resolved = sample->next();
}
@ -72,7 +72,7 @@ void SampleList::unlink(ObjectSample* sample) {
}
ObjectSample* SampleList::reuse(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
unlink(sample);
link(sample);
return sample;
@ -83,7 +83,7 @@ void SampleList::populate_cache() {
const size_t cache_delta = _cache_size - _free_list.count();
for (size_t i = 0; i < cache_delta; ++i) {
ObjectSample* sample = newSample();
if (sample != NULL) {
if (sample != nullptr) {
_free_list.append(sample);
}
}
@ -92,7 +92,7 @@ void SampleList::populate_cache() {
ObjectSample* SampleList::newSample() const {
if (_limit == _allocated) {
return NULL;
return nullptr;
}
++_allocated;
return new ObjectSample();
@ -100,22 +100,22 @@ ObjectSample* SampleList::newSample() const {
ObjectSample* SampleList::get() {
ObjectSample* sample = _free_list.head();
if (sample != NULL) {
if (sample != nullptr) {
link(_free_list.remove(sample));
} else {
sample = newSample();
if (sample != NULL) {
if (sample != nullptr) {
_in_use_list.prepend(sample);
}
}
if (_cache_size > 0 && sample != NULL) {
if (_cache_size > 0 && sample != nullptr) {
populate_cache();
}
return sample;
}
void SampleList::release(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
unlink(sample);
_free_list.append(sample);
}
@ -123,7 +123,7 @@ void SampleList::release(ObjectSample* sample) {
void SampleList::deallocate_samples(List& list) {
if (list.count() > 0) {
ObjectSample* sample = list.head();
while (sample != NULL) {
while (sample != nullptr) {
list.remove(sample);
delete sample;
sample = list.head();
@ -133,7 +133,7 @@ void SampleList::deallocate_samples(List& list) {
}
void SampleList::reset(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample != nullptr, "invariant");
sample->reset();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,12 +38,12 @@ SamplePriorityQueue::SamplePriorityQueue(size_t size) :
SamplePriorityQueue::~SamplePriorityQueue() {
FREE_C_HEAP_ARRAY(ObjectSample*, _items);
_items = NULL;
_items = nullptr;
}
void SamplePriorityQueue::push(ObjectSample* item) {
assert(item != NULL, "invariant");
assert(_items[_count] == NULL, "invariant");
assert(item != nullptr, "invariant");
assert(_items[_count] == nullptr, "invariant");
_items[_count] = item;
_items[_count]->set_index(_count);
@ -58,16 +58,16 @@ size_t SamplePriorityQueue::total() const {
ObjectSample* SamplePriorityQueue::pop() {
if (_count == 0) {
return NULL;
return nullptr;
}
ObjectSample* const s = _items[0];
assert(s != NULL, "invariant");
assert(s != nullptr, "invariant");
swap(0, _count - 1);
_count--;
assert(s == _items[_count], "invariant");
// clear from heap
_items[_count] = NULL;
_items[_count] = nullptr;
moveDown(0);
_total -= s->span();
return s;
@ -128,7 +128,7 @@ void SamplePriorityQueue::moveUp(int i) {
}
void SamplePriorityQueue::remove(ObjectSample* s) {
assert(s != NULL, "invariant");
assert(s != nullptr, "invariant");
const size_t realSpan = s->span();
s->set_span(0);
moveUp(s->index());
@ -141,7 +141,7 @@ int SamplePriorityQueue::count() const {
}
const ObjectSample* SamplePriorityQueue::peek() const {
return _count == 0 ? NULL : _items[0];
return _count == 0 ? nullptr : _items[0];
}
ObjectSample* SamplePriorityQueue::item_at(int index) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,12 +38,12 @@ OopStorage* OldObjectRoot::system_oop_storage(System system) {
auto id = static_cast<StrongId>(first + (val - _strong_oop_storage_set_first));
return OopStorageSet::storage(id);
}
return NULL;
return nullptr;
}
const char* OldObjectRoot::system_description(System system) {
OopStorage* oop_storage = system_oop_storage(system);
if (oop_storage != NULL) {
if (oop_storage != nullptr) {
return oop_storage->name();
}
switch (system) {
@ -64,7 +64,7 @@ const char* OldObjectRoot::system_description(System system) {
default:
ShouldNotReachHere();
}
return NULL;
return nullptr;
}
const char* OldObjectRoot::type_description(Type type) {
@ -84,5 +84,5 @@ const char* OldObjectRoot::type_description(Type type) {
default:
ShouldNotReachHere();
}
return NULL;
return nullptr;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ inline bool UnifiedOopRef::is_null() const {
template <typename T>
inline UnifiedOopRef create_with_tag(T ref, uintptr_t tag) {
assert(ref != NULL, "invariant");
assert(ref != nullptr, "invariant");
uintptr_t value = reinterpret_cast<uintptr_t>(ref);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,17 +36,17 @@
#include "services/finalizerService.hpp"
static void send_event(const FinalizerEntry* fe, const InstanceKlass* ik, const JfrTicks& timestamp, Thread* thread) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(ik->has_finalizer(), "invariant");
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
const char* const url = fe != nullptr ? fe->codesource() : nullptr;
const traceid url_symbol_id = url != NULL ? JfrSymbolTable::add(url) : 0;
const traceid url_symbol_id = url != nullptr ? JfrSymbolTable::add(url) : 0;
EventFinalizerStatistics event(UNTIMED);
event.set_starttime(timestamp);
event.set_endtime(timestamp);
event.set_finalizableClass(ik);
event.set_codeSource(url_symbol_id);
if (fe == NULL) {
if (fe == nullptr) {
event.set_objects(0);
event.set_totalFinalizersRun(0);
} else {
@ -75,7 +75,7 @@ class FinalizerStatisticsEventClosure : public FinalizerEntryClosure {
public:
FinalizerStatisticsEventClosure(Thread* thread) : _thread(thread), _timestamp(JfrTicks::now()) {}
virtual bool do_entry(const FinalizerEntry* fe) {
assert(fe != NULL, "invariant");
assert(fe != nullptr, "invariant");
send_event(fe, fe->klass(), _timestamp, _thread);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,23 +78,23 @@ static void write_module_export_event(const void* package, const ModuleEntry* qu
void ModuleDependencyClosure::do_module(ModuleEntry* to_module) {
assert_locked_or_safepoint(Module_lock);
assert(to_module != NULL, "invariant");
assert(_module != NULL, "invariant");
assert(_event_func != NULL, "invariant");
assert(to_module != nullptr, "invariant");
assert(_module != nullptr, "invariant");
assert(_event_func != nullptr, "invariant");
_event_func(_module, to_module);
}
void ModuleExportClosure::do_module(ModuleEntry* qualified_export) {
assert_locked_or_safepoint(Module_lock);
assert(qualified_export != NULL, "invariant");
assert(_package != NULL, "invariant");
assert(_event_func != NULL, "invariant");
assert(qualified_export != nullptr, "invariant");
assert(_package != nullptr, "invariant");
assert(_event_func != nullptr, "invariant");
_event_func(_package, qualified_export);
}
static void module_dependency_event_callback(ModuleEntry* module) {
assert_locked_or_safepoint(Module_lock);
assert(module != NULL, "invariant");
assert(module != nullptr, "invariant");
if (module->has_reads_list()) {
// create an individual event for each directed edge
ModuleDependencyClosure directed_edges(module, &write_module_dependency_event);
@ -104,7 +104,7 @@ static void module_dependency_event_callback(ModuleEntry* module) {
static void module_export_event_callback(PackageEntry* package) {
assert_locked_or_safepoint(Module_lock);
assert(package != NULL, "invariant");
assert(package != nullptr, "invariant");
if (package->is_exported()) {
if (package->has_qual_exports_list()) {
// package is qualifiedly exported to a set of modules,
@ -116,9 +116,9 @@ static void module_export_event_callback(PackageEntry* package) {
assert(!package->is_qual_exported() || package->is_exported_allUnnamed(), "invariant");
// no qualified exports
// only create a single event with NULL
// only create a single event with nullptr
// for the qualified_exports module
write_module_export_event(package, NULL);
write_module_export_event(package, nullptr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,27 +42,27 @@ struct InterfaceEntry {
mutable bool written;
};
static GrowableArray<InterfaceEntry>* _interfaces = NULL;
static GrowableArray<InterfaceEntry>* _interfaces = nullptr;
void JfrNetworkUtilization::destroy() {
if (_interfaces != NULL) {
if (_interfaces != nullptr) {
for (int i = 0; i < _interfaces->length(); ++i) {
FREE_C_HEAP_ARRAY(char, _interfaces->at(i).name);
}
delete _interfaces;
_interfaces = NULL;
_interfaces = nullptr;
}
}
static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray<InterfaceEntry>* interfaces) {
assert(iface != NULL, "invariant");
assert(interfaces != NULL, "invariant");
assert(iface != nullptr, "invariant");
assert(interfaces != nullptr, "invariant");
// single threaded premise
static traceid interface_id = 0;
const char* name = iface->get_name();
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
InterfaceEntry entry;
const size_t length = strlen(name);
@ -76,7 +76,7 @@ static InterfaceEntry& new_entry(const NetworkInterface* iface, GrowableArray<In
}
static GrowableArray<InterfaceEntry>* get_interfaces() {
if (_interfaces == NULL) {
if (_interfaces == nullptr) {
_interfaces = new (mtTracing) GrowableArray<InterfaceEntry>(10, mtTracing);
}
return _interfaces;
@ -88,7 +88,7 @@ static InterfaceEntry& get_entry(const NetworkInterface* iface) {
static int saved_index = -1;
GrowableArray<InterfaceEntry>* interfaces = get_interfaces();
assert(interfaces != NULL, "invariant");
assert(interfaces != nullptr, "invariant");
for (int i = 0; i < _interfaces->length(); ++i) {
saved_index = (saved_index + 1) % _interfaces->length();
if (strcmp(_interfaces->at(saved_index).name, iface->get_name()) == 0) {
@ -123,7 +123,7 @@ class JfrNetworkInterfaceName : public JfrSerializer {
};
static bool register_network_interface_name_serializer() {
assert(_interfaces != NULL, "invariant");
assert(_interfaces != nullptr, "invariant");
return JfrSerializer::register_serializer(TYPE_NETWORKINTERFACENAME,
false, // disallow caching; we want a callback every rotation
new JfrNetworkInterfaceName());
@ -160,7 +160,7 @@ void JfrNetworkUtilization::send_events() {
const JfrTicks cur_time = JfrTicks::now();
if (cur_time > last_sample_instant) {
const JfrTickspan interval = cur_time - last_sample_instant;
for (NetworkInterface *cur = network_interfaces; cur != NULL; cur = cur->next()) {
for (NetworkInterface *cur = network_interfaces; cur != nullptr; cur = cur->next()) {
InterfaceEntry& entry = get_entry(cur);
const uint64_t current_bytes_in = cur->get_bytes_in();
const uint64_t current_bytes_out = cur->get_bytes_out();

View File

@ -35,23 +35,23 @@
#include <stdlib.h> // for environment variables
static JfrOSInterface* _instance = NULL;
static JfrOSInterface* _instance = nullptr;
JfrOSInterface& JfrOSInterface::instance() {
return *_instance;
}
JfrOSInterface* JfrOSInterface::create() {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrOSInterface();
return _instance;
}
void JfrOSInterface::destroy() {
JfrNetworkUtilization::destroy();
if (_instance != NULL) {
if (_instance != nullptr) {
delete _instance;
_instance = NULL;
_instance = nullptr;
}
}
@ -91,47 +91,47 @@ class JfrOSInterface::JfrOSInterfaceImpl : public JfrCHeapObj {
int network_utilization(NetworkInterface** network_interfaces);
};
JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(NULL),
_cpu_perf_interface(NULL),
_system_process_interface(NULL),
_network_performance_interface(NULL) {}
JfrOSInterface::JfrOSInterfaceImpl::JfrOSInterfaceImpl() : _cpu_info_interface(nullptr),
_cpu_perf_interface(nullptr),
_system_process_interface(nullptr),
_network_performance_interface(nullptr) {}
template <typename T>
static T* create_interface() {
ResourceMark rm;
T* iface = new T();
if (iface != NULL) {
if (iface != nullptr) {
if (!iface->initialize()) {
delete iface;
iface = NULL;
iface = nullptr;
}
}
return iface;
}
CPUInformationInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_info_interface() {
if (_cpu_info_interface == NULL) {
if (_cpu_info_interface == nullptr) {
_cpu_info_interface = create_interface<CPUInformationInterface>();
}
return _cpu_info_interface;
}
CPUPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::cpu_perf_interface() {
if (_cpu_perf_interface == NULL) {
if (_cpu_perf_interface == nullptr) {
_cpu_perf_interface = create_interface<CPUPerformanceInterface>();
}
return _cpu_perf_interface;
}
SystemProcessInterface* JfrOSInterface::JfrOSInterfaceImpl::system_process_interface() {
if (_system_process_interface == NULL) {
if (_system_process_interface == nullptr) {
_system_process_interface = create_interface<SystemProcessInterface>();
}
return _system_process_interface;
}
NetworkPerformanceInterface* JfrOSInterface::JfrOSInterfaceImpl::network_performance_interface() {
if (_network_performance_interface == NULL) {
if (_network_performance_interface == nullptr) {
_network_performance_interface = create_interface<NetworkPerformanceInterface>();
}
return _network_performance_interface;
@ -142,67 +142,67 @@ bool JfrOSInterface::JfrOSInterfaceImpl::initialize() {
}
JfrOSInterface::JfrOSInterfaceImpl::~JfrOSInterfaceImpl(void) {
if (_cpu_info_interface != NULL) {
if (_cpu_info_interface != nullptr) {
delete _cpu_info_interface;
_cpu_info_interface = NULL;
_cpu_info_interface = nullptr;
}
if (_cpu_perf_interface != NULL) {
if (_cpu_perf_interface != nullptr) {
delete _cpu_perf_interface;
_cpu_perf_interface = NULL;
_cpu_perf_interface = nullptr;
}
if (_system_process_interface != NULL) {
if (_system_process_interface != nullptr) {
delete _system_process_interface;
_system_process_interface = NULL;
_system_process_interface = nullptr;
}
if (_network_performance_interface != NULL) {
if (_network_performance_interface != nullptr) {
delete _network_performance_interface;
_network_performance_interface = NULL;
_network_performance_interface = nullptr;
}
}
int JfrOSInterface::JfrOSInterfaceImpl::cpu_information(CPUInformation& cpu_info) {
CPUInformationInterface* const iface = cpu_info_interface();
return iface == NULL ? OS_ERR : iface->cpu_information(cpu_info);
return iface == nullptr ? OS_ERR : iface->cpu_information(cpu_info);
}
int JfrOSInterface::JfrOSInterfaceImpl::cpu_load(int which_logical_cpu, double* cpu_load) {
CPUPerformanceInterface* const iface = cpu_perf_interface();
return iface == NULL ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load);
return iface == nullptr ? OS_ERR : iface->cpu_load(which_logical_cpu, cpu_load);
}
int JfrOSInterface::JfrOSInterfaceImpl::context_switch_rate(double* rate) {
CPUPerformanceInterface* const iface = cpu_perf_interface();
return iface == NULL ? OS_ERR : iface->context_switch_rate(rate);
return iface == nullptr ? OS_ERR : iface->context_switch_rate(rate);
}
int JfrOSInterface::JfrOSInterfaceImpl::cpu_load_total_process(double* cpu_load) {
CPUPerformanceInterface* const iface = cpu_perf_interface();
return iface == NULL ? OS_ERR : iface->cpu_load_total_process(cpu_load);
return iface == nullptr ? OS_ERR : iface->cpu_load_total_process(cpu_load);
}
int JfrOSInterface::JfrOSInterfaceImpl::cpu_loads_process(double* pjvmUserLoad,
double* pjvmKernelLoad,
double* psystemTotal) {
CPUPerformanceInterface* const iface = cpu_perf_interface();
return iface == NULL ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal);
return iface == nullptr ? OS_ERR : iface->cpu_loads_process(pjvmUserLoad, pjvmKernelLoad, psystemTotal);
}
int JfrOSInterface::JfrOSInterfaceImpl::system_processes(SystemProcess** system_processes, int* no_of_sys_processes) {
assert(system_processes != NULL, "system_processes pointer is NULL!");
assert(no_of_sys_processes != NULL, "no_of_sys_processes pointer is NULL!");
assert(system_processes != nullptr, "system_processes pointer is null!");
assert(no_of_sys_processes != nullptr, "no_of_sys_processes pointer is null!");
SystemProcessInterface* const iface = system_process_interface();
return iface == NULL ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes);
return iface == nullptr ? OS_ERR : iface->system_processes(system_processes, no_of_sys_processes);
}
int JfrOSInterface::JfrOSInterfaceImpl::network_utilization(NetworkInterface** network_interfaces) {
NetworkPerformanceInterface* const iface = network_performance_interface();
return iface == NULL ? OS_ERR : iface->network_utilization(network_interfaces);
return iface == nullptr ? OS_ERR : iface->network_utilization(network_interfaces);
}
// assigned char* is RESOURCE_HEAP_ALLOCATED
// caller need to ensure proper ResourceMark placement.
int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const {
assert(os_version != NULL, "os_version pointer is NULL!");
assert(os_version != nullptr, "os_version pointer is null!");
stringStream os_ver_info;
os::print_os_info_brief(&os_ver_info);
*os_version = os_ver_info.as_string();
@ -210,18 +210,18 @@ int JfrOSInterface::JfrOSInterfaceImpl::os_version(char** os_version) const {
}
JfrOSInterface::JfrOSInterface() {
_impl = NULL;
_impl = nullptr;
}
bool JfrOSInterface::initialize() {
_impl = new JfrOSInterface::JfrOSInterfaceImpl();
return _impl != NULL && _impl->initialize();
return _impl != nullptr && _impl->initialize();
}
JfrOSInterface::~JfrOSInterface() {
if (_impl != NULL) {
if (_impl != nullptr) {
delete _impl;
_impl = NULL;
_impl = nullptr;
}
}
@ -275,17 +275,17 @@ const char* JfrOSInterface::virtualization_name() {
}
int JfrOSInterface::generate_initial_environment_variable_events() {
if (os::get_environ() == NULL) {
if (os::get_environ() == nullptr) {
return OS_ERR;
}
if (EventInitialEnvironmentVariable::is_enabled()) {
// One time stamp for all events, so they can be grouped together
JfrTicks time_stamp = JfrTicks::now();
for (char** p = os::get_environ(); *p != NULL; p++) {
for (char** p = os::get_environ(); *p != nullptr; p++) {
char* variable = *p;
char* equal_sign = strchr(variable, '=');
if (equal_sign != NULL) {
if (equal_sign != nullptr) {
// Extract key/value
ResourceMark rm;
ptrdiff_t key_length = equal_sign - variable;

View File

@ -231,7 +231,7 @@ TRACE_REQUEST_FUNC(CPUTimeStampCounter) {
TRACE_REQUEST_FUNC(SystemProcess) {
char pid_buf[16];
SystemProcess* processes = NULL;
SystemProcess* processes = nullptr;
int num_of_processes = 0;
JfrTicks start_time = JfrTicks::now();
int ret_val = JfrOSInterface::system_processes(&processes, &num_of_processes);
@ -245,16 +245,16 @@ TRACE_REQUEST_FUNC(SystemProcess) {
}
if (ret_val == OS_OK) {
// feature is implemented, write real event
while (processes != NULL) {
while (processes != nullptr) {
SystemProcess* tmp = processes;
const char* info = processes->command_line();
if (info == NULL) {
if (info == nullptr) {
info = processes->path();
}
if (info == NULL) {
if (info == nullptr) {
info = processes->name();
}
if (info == NULL) {
if (info == nullptr) {
info = "?";
}
jio_snprintf(pid_buf, sizeof(pid_buf), "%d", processes->pid());
@ -333,7 +333,7 @@ TRACE_REQUEST_FUNC(ThreadContextSwitchRate) {
#define SEND_FLAGS_OF_TYPE(eventType, flagType) \
do { \
JVMFlag *flag = JVMFlag::flags; \
while (flag->name() != NULL) { \
while (flag->name() != nullptr) { \
if (flag->is_ ## flagType()) { \
if (flag->is_unlocked()) { \
Event ## eventType event; \
@ -379,7 +379,7 @@ TRACE_REQUEST_FUNC(StringFlag) {
class VM_GC_SendObjectCountEvent : public VM_GC_HeapInspection {
public:
VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(NULL, true) {}
VM_GC_SendObjectCountEvent() : VM_GC_HeapInspection(nullptr, true) {}
virtual void doit() {
ObjectCountEventSender::enable_requestable_event();
collect();
@ -459,7 +459,7 @@ TRACE_REQUEST_FUNC(YoungGenerationConfiguration) {
TRACE_REQUEST_FUNC(InitialSystemProperty) {
SystemProperty* p = Arguments::system_properties();
JfrTicks time_stamp = JfrTicks::now();
while (p != NULL) {
while (p != nullptr) {
if (!p->internal()) {
EventInitialSystemProperty event(UNTIMED);
event.set_key(p->key());
@ -481,7 +481,7 @@ TRACE_REQUEST_FUNC(ThreadAllocationStatistics) {
JfrJavaThreadIterator iter;
while (iter.has_next()) {
JavaThread* const jt = iter.next();
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
allocated.append(jt->cooked_allocated_bytes());
thread_ids.append(JFR_JVM_THREAD_ID(jt));
}
@ -539,13 +539,13 @@ TRACE_REQUEST_FUNC(ClassLoadingStatistics) {
class JfrClassLoaderStatsClosure : public ClassLoaderStatsClosure {
public:
JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(NULL) {}
JfrClassLoaderStatsClosure() : ClassLoaderStatsClosure(nullptr) {}
bool do_entry(oop const& key, ClassLoaderStats const& cls) {
const ClassLoaderData* this_cld = cls._class_loader != NULL ?
java_lang_ClassLoader::loader_data_acquire(cls._class_loader) : NULL;
const ClassLoaderData* parent_cld = cls._parent != NULL ?
java_lang_ClassLoader::loader_data_acquire(cls._parent) : NULL;
const ClassLoaderData* this_cld = cls._class_loader != nullptr ?
java_lang_ClassLoader::loader_data_acquire(cls._class_loader) : nullptr;
const ClassLoaderData* parent_cld = cls._parent != nullptr ?
java_lang_ClassLoader::loader_data_acquire(cls._parent) : nullptr;
EventClassLoaderStatistics event;
event.set_classLoader(this_cld);
event.set_parentClassLoader(parent_cld);
@ -567,7 +567,7 @@ public:
class JfrClassLoaderStatsVMOperation : public ClassLoaderStatsVMOperation {
public:
JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(NULL) { }
JfrClassLoaderStatsVMOperation() : ClassLoaderStatsVMOperation(nullptr) { }
void doit() {
JfrClassLoaderStatsClosure clsc;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -115,7 +115,7 @@ void JfrThreadCPULoadEvent::send_events() {
int number_of_threads = 0;
while (iter.has_next()) {
JavaThread* const jt = iter.next();
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
++number_of_threads;
EventThreadCPULoad event(UNTIMED);
if (JfrThreadCPULoadEvent::update_event(event, jt, cur_wallclock_time, processor_count)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
#include "runtime/registerMap.hpp"
bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) {
assert(top_frame.cb() != NULL, "invariant");
assert(top_frame.cb() != nullptr, "invariant");
RegisterMap map(_thread,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
@ -44,7 +44,7 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f
for (u4 i = 0; i < MAX_STACK_DEPTH * 2; ++i) {
if (candidate.is_entry_frame()) {
JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(_thread);
if (jcw == NULL || jcw->is_first_frame()) {
if (jcw == nullptr || jcw->is_first_frame()) {
return false;
}
}
@ -75,11 +75,11 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f
if (_in_java) {
PcDesc* pc_desc = nm->pc_desc_near(candidate.pc() + 1);
if (pc_desc == NULL || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
if (pc_desc == nullptr || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
return false;
}
candidate.set_pc(pc_desc->real_pc(nm));
assert(nm->pc_desc_at(candidate.pc()) != NULL, "invalid pc");
assert(nm->pc_desc_at(candidate.pc()) != nullptr, "invalid pc");
}
first_frame = candidate;
return true;
@ -92,7 +92,7 @@ bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& f
}
candidate = candidate.sender(&map);
if (candidate.cb() == NULL) {
if (candidate.cb() == nullptr) {
return false;
}
}
@ -104,14 +104,14 @@ bool JfrGetCallTrace::get_topframe(void* ucontext, frame& topframe) {
return false;
}
if (topframe.cb() == NULL) {
if (topframe.cb() == nullptr) {
return false;
}
frame first_java_frame;
Method* method = NULL;
Method* method = nullptr;
if (find_top_frame(topframe, &method, first_java_frame)) {
if (method == NULL) {
if (method == nullptr) {
return false;
}
topframe = first_java_frame;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ enum JfrSampleType {
};
static bool thread_state_in_java(JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
switch(thread->thread_state()) {
case _thread_new:
case _thread_uninitialized:
@ -77,7 +77,7 @@ static bool thread_state_in_java(JavaThread* thread) {
}
static bool thread_state_in_native(JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
switch(thread->thread_state()) {
case _thread_new:
case _thread_uninitialized:
@ -241,12 +241,12 @@ void JfrNativeSamplerCallback::call() {
frame topframe = _jt->last_frame();
frame first_java_frame;
Method* method = NULL;
Method* method = nullptr;
JfrGetCallTrace gct(false, _jt);
if (!gct.find_top_frame(topframe, &method, first_java_frame)) {
return;
}
if (method == NULL) {
if (method == nullptr) {
return;
}
topframe = first_java_frame;
@ -387,7 +387,7 @@ static void clear_transition_block(JavaThread* jt) {
}
static bool is_excluded(JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
return thread->is_hidden_from_external_view() || thread->in_deopt_handler() || thread->jfr_thread_local()->is_excluded();
}
@ -418,10 +418,10 @@ bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame*
JfrThreadSampler::JfrThreadSampler(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames) :
_sample(),
_sampler_thread(NULL),
_sampler_thread(nullptr),
_frames(JfrCHeapObj::new_array<JfrStackFrame>(max_frames)),
_last_thread_java(NULL),
_last_thread_native(NULL),
_last_thread_java(nullptr),
_last_thread_native(nullptr),
_java_period_millis(java_period_millis),
_native_period_millis(native_period_millis),
_min_size(max_frames * 2 * wordSize), // each frame tags at most 2 words, min size is a full stacktrace
@ -464,10 +464,10 @@ void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) {
}
JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
assert(t_list != NULL, "invariant");
assert(t_list != nullptr, "invariant");
assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
assert((current == nullptr && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
if ((uint)_cur_index + 1 == t_list->length()) {
// wrap
_cur_index = 0;
@ -476,7 +476,7 @@ JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first
}
assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
JavaThread* const next = t_list->thread_at(_cur_index);
return next != first_sampled ? next : NULL;
return next != first_sampled ? next : nullptr;
}
void JfrThreadSampler::start_thread() {
@ -508,7 +508,7 @@ static int64_t get_monotonic_ms() {
}
void JfrThreadSampler::run() {
assert(_sampler_thread == NULL, "invariant");
assert(_sampler_thread == nullptr, "invariant");
_sampler_thread = this;
@ -588,7 +588,7 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr
const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES;
uint num_samples = 0;
JavaThread* start = NULL;
JavaThread* start = nullptr;
{
elapsedTimer sample_time;
sample_time.start();
@ -596,9 +596,9 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr
MutexLocker tlock(Threads_lock);
ThreadsListHandle tlh;
// Resolve a sample session relative start position index into the thread list array.
// In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1.
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
JavaThread* current = _cur_index != -1 ? *last_thread : NULL;
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
// Explicitly monitor the available space of the thread-local buffer used by the load barrier
// for enqueuing klasses as part of tagging methods. We do this because if space becomes sparse,
@ -611,10 +611,10 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr
while (num_samples < sample_limit) {
current = next_thread(tlh.list(), start, current);
if (current == NULL) {
if (current == nullptr) {
break;
}
if (start == NULL) {
if (start == nullptr) {
start = current; // remember the thread where we started to attempt sampling
}
if (current->is_Compiler_thread()) {
@ -637,29 +637,29 @@ void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thr
}
}
static JfrThreadSampling* _instance = NULL;
static JfrThreadSampling* _instance = nullptr;
JfrThreadSampling& JfrThreadSampling::instance() {
return *_instance;
}
JfrThreadSampling* JfrThreadSampling::create() {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrThreadSampling();
return _instance;
}
void JfrThreadSampling::destroy() {
if (_instance != NULL) {
if (_instance != nullptr) {
delete _instance;
_instance = NULL;
_instance = nullptr;
}
}
JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {}
JfrThreadSampling::JfrThreadSampling() : _sampler(nullptr) {}
JfrThreadSampling::~JfrThreadSampling() {
if (_sampler != NULL) {
if (_sampler != nullptr) {
_sampler->disenroll();
}
}
@ -722,7 +722,7 @@ void JfrThreadSampling::set_sampling_period(bool is_java_period, int64_t period_
void JfrThreadSampling::set_java_sample_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
if (_instance == NULL && 0 == period_millis) {
if (_instance == nullptr && 0 == period_millis) {
return;
}
instance().set_sampling_period(true, period_millis);
@ -730,7 +730,7 @@ void JfrThreadSampling::set_java_sample_period(int64_t period_millis) {
void JfrThreadSampling::set_native_sample_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
if (_instance == NULL && 0 == period_millis) {
if (_instance == nullptr && 0 == period_millis) {
return;
}
instance().set_sampling_period(false, period_millis);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,22 +59,22 @@ typedef JfrCheckpointManager::BufferPtr BufferPtr;
typedef JfrCheckpointManager::ConstBufferPtr ConstBufferPtr;
static JfrSignal _new_checkpoint;
static JfrCheckpointManager* _instance = NULL;
static JfrCheckpointManager* _instance = nullptr;
JfrCheckpointManager& JfrCheckpointManager::instance() {
return *_instance;
}
JfrCheckpointManager* JfrCheckpointManager::create(JfrChunkWriter& cw) {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrCheckpointManager(cw);
return _instance;
}
void JfrCheckpointManager::destroy() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
delete _instance;
_instance = NULL;
_instance = nullptr;
}
JfrCheckpointManager::JfrCheckpointManager(JfrChunkWriter& cw) :
@ -100,9 +100,9 @@ static const size_t virtual_thread_local_buffer_prealloc_count = 0;
static const size_t virtual_thread_local_buffer_size = 4 * K;
bool JfrCheckpointManager::initialize() {
assert(_global_mspace == NULL, "invariant");
assert(_global_mspace == nullptr, "invariant");
_global_mspace = create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(global_buffer_size, 0, 0, false, this); // post-pone preallocation
if (_global_mspace == NULL) {
if (_global_mspace == nullptr) {
return false;
}
// preallocate buffer count to each of the epoch live lists
@ -112,17 +112,17 @@ bool JfrCheckpointManager::initialize() {
}
assert(_global_mspace->free_list_is_empty(), "invariant");
assert(_thread_local_mspace == NULL, "invariant");
assert(_thread_local_mspace == nullptr, "invariant");
_thread_local_mspace = new JfrThreadLocalCheckpointMspace();
if (_thread_local_mspace == NULL || !_thread_local_mspace->initialize(thread_local_buffer_size,
if (_thread_local_mspace == nullptr || !_thread_local_mspace->initialize(thread_local_buffer_size,
thread_local_buffer_prealloc_count,
thread_local_buffer_prealloc_count)) {
return false;
}
assert(_virtual_thread_local_mspace == NULL, "invariant");
assert(_virtual_thread_local_mspace == nullptr, "invariant");
_virtual_thread_local_mspace = new JfrThreadLocalCheckpointMspace();
if (_virtual_thread_local_mspace == NULL || !_virtual_thread_local_mspace->initialize(virtual_thread_local_buffer_size,
if (_virtual_thread_local_mspace == nullptr || !_virtual_thread_local_mspace->initialize(virtual_thread_local_buffer_size,
JFR_MSPACE_UNLIMITED_CACHE_SIZE,
virtual_thread_local_buffer_prealloc_count)) {
return false;
@ -140,13 +140,13 @@ static void assert_lease(ConstBufferPtr buffer) {
}
static void assert_release(ConstBufferPtr buffer) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->lease(), "invariant");
assert(buffer->acquired_by_self(), "invariant");
}
static void assert_retired(ConstBufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->acquired_by(thread), "invariant");
assert(buffer->retired(), "invariant");
}
@ -158,28 +158,28 @@ void JfrCheckpointManager::register_full(BufferPtr buffer, Thread* thread) {
}
static inline bool is_global(ConstBufferPtr buffer) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
return buffer->context() == JFR_GLOBAL;
}
static inline bool is_thread_local(ConstBufferPtr buffer) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
return buffer->context() == JFR_THREADLOCAL;
}
static inline bool is_virtual_thread_local(ConstBufferPtr buffer) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
return buffer->context() == JFR_VIRTUAL_THREADLOCAL;
}
BufferPtr JfrCheckpointManager::lease_global(Thread* thread, bool previous_epoch /* false */, size_t size /* 0 */) {
JfrCheckpointMspace* const mspace = instance()._global_mspace;
assert(mspace != NULL, "invariant");
assert(mspace != nullptr, "invariant");
static const size_t max_elem_size = mspace->min_element_size(); // min is max
BufferPtr buffer;
if (size <= max_elem_size) {
buffer = mspace_acquire_live(size, mspace, thread, previous_epoch);
if (buffer != NULL) {
if (buffer != nullptr) {
buffer->set_lease();
DEBUG_ONLY(assert_lease(buffer);)
return buffer;
@ -202,13 +202,13 @@ BufferPtr JfrCheckpointManager::lease_thread_local(Thread* thread, size_t size)
}
BufferPtr JfrCheckpointManager::get_virtual_thread_local(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
return JfrTraceIdEpoch::epoch() ? thread->jfr_thread_local()->_checkpoint_buffer_epoch_1 :
thread->jfr_thread_local()->_checkpoint_buffer_epoch_0;
}
void JfrCheckpointManager::set_virtual_thread_local(Thread* thread, BufferPtr buffer) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
if (JfrTraceIdEpoch::epoch()) {
thread->jfr_thread_local()->_checkpoint_buffer_epoch_1 = buffer;
} else {
@ -239,7 +239,7 @@ BufferPtr JfrCheckpointManager::acquire_virtual_thread_local(Thread* thread, siz
}
BufferPtr JfrCheckpointManager::renew(ConstBufferPtr old, Thread* thread, size_t size, JfrCheckpointBufferKind kind /* JFR_THREADLOCAL */) {
assert(old != NULL, "invariant");
assert(old != nullptr, "invariant");
assert(old->acquired_by_self(), "invariant");
if (kind == JFR_GLOBAL) {
return lease_global(thread, instance()._global_mspace->in_previous_epoch_list(old), size);
@ -285,14 +285,14 @@ static inline JfrCheckpointBufferKind kind(ConstBufferPtr buffer) {
}
BufferPtr JfrCheckpointManager::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
assert(old != NULL, "invariant");
assert(old != nullptr, "invariant");
if (0 == requested) {
// indicates a lease is being returned
assert(old->lease(), "invariant");
release(old);
// signal completion of a new checkpoint
_new_checkpoint.signal();
return NULL;
return nullptr;
}
BufferPtr new_buffer = renew(old, thread, used + requested, kind(old));
if (new_buffer != nullptr) {
@ -353,7 +353,7 @@ static uint64_t calculate_event_size_bytes(JfrChunkWriter& cw, const u1* data, i
}
static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
assert(data != NULL, "invariant");
assert(data != nullptr, "invariant");
const int64_t event_begin = cw.current_offset();
const int64_t last_checkpoint_event = cw.last_checkpoint_offset();
cw.set_last_checkpoint_offset(event_begin);
@ -373,7 +373,7 @@ static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) {
static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) {
assert(cw.is_valid(), "invariant");
assert(data != NULL, "invariant");
assert(data != nullptr, "invariant");
assert(size > 0, "invariant");
const u1* const limit = data + size;
const u1* next = data;
@ -388,7 +388,7 @@ static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size)
}
static size_t write_thread_checkpoint_content(JfrChunkWriter& cw, const u1* data) {
assert(data != NULL, "invariant");
assert(data != nullptr, "invariant");
const size_t size = total_size(data);
assert(size > 0, "invariant");
assert(checkpoint_type(data) == THREADS, "invariant");
@ -400,7 +400,7 @@ static size_t write_thread_checkpoint_content(JfrChunkWriter& cw, const u1* data
static size_t write_thread_checkpoint_payloads(JfrChunkWriter& cw, const u1* data, size_t size, u4& elements) {
assert(cw.is_valid(), "invariant");
assert(data != NULL, "invariant");
assert(data != nullptr, "invariant");
assert(size > 0, "invariant");
const u1* const limit = data + size;
const u1* next = data;
@ -528,14 +528,14 @@ size_t JfrCheckpointManager::clear() {
}
size_t JfrCheckpointManager::write_static_type_set(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
JfrCheckpointWriter writer(true, thread, STATICS);
JfrTypeManager::write_static_types(writer);
return writer.used_size();
}
size_t JfrCheckpointManager::write_threads(JavaThread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
// can safepoint here
ThreadInVMfromNative transition(thread);
ResourceMark rm(thread);
@ -585,7 +585,7 @@ void JfrCheckpointManager::write_type_set() {
ObjectSampleCheckpoint::on_type_set(leakp_writer);
} else {
JfrCheckpointWriter writer(true, thread);
JfrTypeSet::serialize(&writer, NULL, false, false);
JfrTypeSet::serialize(&writer, nullptr, false, false);
}
}
write();
@ -601,11 +601,11 @@ void JfrCheckpointManager::on_unloading_classes() {
}
static size_t flush_type_set(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
JfrCheckpointWriter writer(thread);
MutexLocker cld_lock(thread, ClassLoaderDataGraph_lock);
MutexLocker module_lock(thread, Module_lock);
return JfrTypeSet::serialize(&writer, NULL, false, true);
return JfrTypeSet::serialize(&writer, nullptr, false, true);
}
size_t JfrCheckpointManager::flush_type_set() {
@ -636,7 +636,7 @@ size_t JfrCheckpointManager::flush_type_set() {
}
JfrBlobHandle JfrCheckpointManager::create_thread_blob(JavaThread* jt, traceid tid /* 0 */, oop vthread /* nullptr */) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(Thread::current() == jt, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
return JfrTypeManager::create_thread_blob(jt, tid, vthread);
@ -649,7 +649,7 @@ void JfrCheckpointManager::write_checkpoint(Thread* thread, traceid tid /* 0 */,
class JfrNotifyClosure : public ThreadClosure {
public:
void do_thread(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
assert_locked_or_safepoint(Threads_lock);
JfrJavaEventWriter::notify(JavaThread::cast(thread));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -103,8 +103,8 @@ class JfrCheckpointManager : public JfrCHeapObj {
void register_full(BufferPtr buffer, Thread* thread);
public:
static JfrBlobHandle create_thread_blob(JavaThread* jt, traceid tid = 0, oop vthread = NULL);
static void write_checkpoint(Thread* t, traceid tid = 0, oop vthread = NULL);
static JfrBlobHandle create_thread_blob(JavaThread* jt, traceid tid = 0, oop vthread = nullptr);
static void write_checkpoint(Thread* t, traceid tid = 0, oop vthread = nullptr);
size_t flush_type_set();
friend class Jfr;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,7 +75,7 @@ JfrCheckpointWriter::JfrCheckpointWriter(bool previous_epoch, Thread* thread, Jf
}
static void write_checkpoint_header(u1* pos, int64_t size, jlong time, u4 checkpoint_type, u4 type_count) {
assert(pos != NULL, "invariant");
assert(pos != nullptr, "invariant");
JfrBigEndianWriter be_writer(pos, sizeof(JfrCheckpointEntry));
be_writer.write(size);
be_writer.write(time);
@ -153,9 +153,9 @@ const u1* JfrCheckpointWriter::session_data(size_t* size, bool move /* false */,
assert(this->is_acquired(), "wrong state!");
if (!this->is_valid()) {
*size = 0;
return NULL;
return nullptr;
}
if (ctx != NULL) {
if (ctx != nullptr) {
const u1* session_start_pos = this->start_pos() + ctx->offset;
*size = this->current_pos() - session_start_pos;
return session_start_pos;
@ -195,7 +195,7 @@ JfrBlobHandle JfrCheckpointWriter::move(const JfrCheckpointContext* ctx /* 0 */)
size_t size = 0;
const u1* data = session_data(&size, true, ctx);
JfrBlobHandle blob = JfrBlob::make(data, size);
if (ctx != NULL) {
if (ctx != nullptr) {
const_cast<JfrCheckpointContext*>(ctx)->count = 0;
set_context(*ctx);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ class JfrCheckpointWriter : public JfrCheckpointWriterBase {
u4 count() const;
void set_count(u4 count);
void increment();
const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = NULL);
const u1* session_data(size_t* size, bool move = false, const JfrCheckpointContext* ctx = nullptr);
void release();
JfrCheckpointWriter(bool previous_epoch, Thread* thread, JfrCheckpointType type = GENERIC);
public:
@ -81,8 +81,8 @@ public:
const JfrCheckpointContext context() const;
void set_context(const JfrCheckpointContext ctx);
bool has_data() const;
JfrBlobHandle copy(const JfrCheckpointContext* ctx = NULL);
JfrBlobHandle move(const JfrCheckpointContext* ctx = NULL);
JfrBlobHandle copy(const JfrCheckpointContext* ctx = nullptr);
JfrBlobHandle move(const JfrCheckpointContext* ctx = nullptr);
};
#endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTWRITER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
#include "runtime/javaThread.hpp"
#include "utilities/exceptions.hpp"
static jbyteArray metadata_blob = NULL;
static jbyteArray metadata_blob = nullptr;
static u8 metadata_id = 0;
static u8 last_metadata_id = 0;
@ -52,14 +52,14 @@ static void check_internal_types() {
static void write_metadata_blob(JfrChunkWriter& chunkwriter, JavaThread* thread) {
assert(chunkwriter.is_valid(), "invariant");
assert(thread != NULL, "invariant");
assert(metadata_blob != NULL, "invariant");
assert(thread != nullptr, "invariant");
assert(metadata_blob != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
const typeArrayOop arr = (typeArrayOop)JfrJavaSupport::resolve_non_null(metadata_blob);
assert(arr != NULL, "invariant");
assert(arr != nullptr, "invariant");
const int length = arr->length();
const Klass* const k = arr->klass();
assert(k != NULL && k->is_array_klass(), "invariant");
assert(k != nullptr && k->is_array_klass(), "invariant");
const TypeArrayKlass* const byte_arr_klass = TypeArrayKlass::cast(k);
const jbyte* const data_address = arr->byte_at_addr(0);
chunkwriter.write_unbuffered(data_address, length);
@ -93,11 +93,11 @@ void JfrMetadataEvent::write(JfrChunkWriter& chunkwriter) {
void JfrMetadataEvent::update(jbyteArray metadata) {
JavaThread* thread = JavaThread::current();
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
if (metadata_blob != NULL) {
if (metadata_blob != nullptr) {
JfrJavaSupport::destroy_global_jni_handle(metadata_blob);
}
const oop new_desc_oop = JfrJavaSupport::resolve_non_null(metadata);
assert(new_desc_oop != NULL, "invariant");
assert(new_desc_oop != nullptr, "invariant");
metadata_blob = (jbyteArray)JfrJavaSupport::global_jni_handle(new_desc_oop, thread);
++metadata_id;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ class ThreadGroupExclusiveAccess : public StackObj {
};
Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
JfrThreadGroup* JfrThreadGroup::_instance = NULL;
JfrThreadGroup* JfrThreadGroup::_instance = nullptr;
class JfrThreadGroupPointers : public ResourceObj {
private:
@ -71,19 +71,19 @@ jweak JfrThreadGroupPointers::thread_group_weak_ref() const {
}
oopDesc* const JfrThreadGroupPointers::thread_group_oop() const {
assert(_thread_group_weak_ref == NULL ||
assert(_thread_group_weak_ref == nullptr ||
JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant");
return _thread_group_handle();
}
jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() {
jweak temp = _thread_group_weak_ref;
_thread_group_weak_ref = NULL;
_thread_group_weak_ref = nullptr;
return temp;
}
void JfrThreadGroupPointers::clear_weak_ref() {
if (NULL != _thread_group_weak_ref) {
if (nullptr != _thread_group_weak_ref) {
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
}
}
@ -118,7 +118,7 @@ JfrThreadGroupsHelper::~JfrThreadGroupsHelper() {
}
JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) {
assert(_thread_group_hierarchy != NULL, "invariant");
assert(_thread_group_hierarchy != nullptr, "invariant");
assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant");
return *(_thread_group_hierarchy->at(index));
}
@ -128,7 +128,7 @@ bool JfrThreadGroupsHelper::has_next() const {
}
bool JfrThreadGroupsHelper::is_valid() const {
return (_thread_group_hierarchy != NULL && _thread_group_hierarchy->length() > 0);
return (_thread_group_hierarchy != nullptr && _thread_group_hierarchy->length() > 0);
}
JfrThreadGroupPointers& JfrThreadGroupsHelper::next() {
@ -147,9 +147,9 @@ JfrThreadGroupPointers& JfrThreadGroupsHelper::next() {
* (not here).
*/
int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) {
assert(jt != NULL && jt->is_Java_thread(), "invariant");
assert(current != NULL, "invariant");
assert(_thread_group_hierarchy != NULL, "invariant");
assert(jt != nullptr && jt->is_Java_thread(), "invariant");
assert(current != nullptr, "invariant");
assert(_thread_group_hierarchy != nullptr, "invariant");
oop thread_oop = jt->threadObj();
if (thread_oop == nullptr) {
@ -157,12 +157,12 @@ int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt,
}
// immediate thread group
Handle thread_group_handle(current, java_lang_Thread::threadGroup(thread_oop));
if (thread_group_handle == NULL) {
if (thread_group_handle == nullptr) {
return 0;
}
const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint();
jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : NULL;
jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : nullptr;
JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref);
_thread_group_hierarchy->append(thread_group_pointers);
@ -172,7 +172,7 @@ int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt,
// and check parents parents...
while (parent_thread_group_handle != nullptr) {
const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : NULL;
const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : nullptr;
thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref);
_thread_group_hierarchy->append(thread_group_pointers);
parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle());
@ -223,25 +223,25 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) :
_thread_group_id(0),
_parent_group_id(0),
_thread_group_name(NULL),
_thread_group_oop(NULL),
_thread_group_weak_ref(NULL) {
_thread_group_name(nullptr),
_thread_group_oop(nullptr),
_thread_group_weak_ref(nullptr) {
set_thread_group_name(tgname);
set_thread_group(ptrs);
}
JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() {
if (_thread_group_name != NULL) {
if (_thread_group_name != nullptr) {
JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1);
}
if (_thread_group_weak_ref != NULL) {
if (_thread_group_weak_ref != nullptr) {
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
}
}
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) {
assert(_thread_group_name == NULL, "invariant");
if (tgname != NULL) {
assert(_thread_group_name == nullptr, "invariant");
if (tgname != nullptr) {
size_t len = strlen(tgname);
_thread_group_name = JfrCHeapObj::new_array<char>(len + 1);
strncpy(_thread_group_name, tgname, len + 1);
@ -249,16 +249,16 @@ void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgna
}
const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
return _thread_group_weak_ref != NULL ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
}
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) {
_thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership();
if (_thread_group_weak_ref == NULL) {
if (_thread_group_weak_ref == nullptr) {
_thread_group_oop = ptrs.thread_group_oop();
assert(_thread_group_oop != NULL, "invariant");
assert(_thread_group_oop != nullptr, "invariant");
} else {
_thread_group_oop = NULL;
_thread_group_oop = nullptr;
}
}
@ -266,7 +266,7 @@ JfrThreadGroup::JfrThreadGroup() :
_list(new (mtTracing) GrowableArray<JfrThreadGroupEntry*>(initial_array_size, mtTracing)) {}
JfrThreadGroup::~JfrThreadGroup() {
if (_list != NULL) {
if (_list != nullptr) {
for (int i = 0; i < _list->length(); i++) {
JfrThreadGroupEntry* e = _list->at(i);
delete e;
@ -296,22 +296,22 @@ traceid JfrThreadGroup::thread_group_id(JavaThread* const jt) {
traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) {
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* tg_instance = instance();
if (tg_instance == NULL) {
if (tg_instance == nullptr) {
tg_instance = new JfrThreadGroup();
if (tg_instance == NULL) {
if (tg_instance == nullptr) {
return 0;
}
set_instance(tg_instance);
}
JfrThreadGroupEntry* tge = NULL;
JfrThreadGroupEntry* tge = nullptr;
int parent_thread_group_id = 0;
while (helper.has_next()) {
JfrThreadGroupPointers& ptrs = helper.next();
tge = tg_instance->find_entry(ptrs);
if (NULL == tge) {
if (nullptr == tge) {
tge = tg_instance->new_entry(ptrs);
assert(tge != NULL, "invariant");
assert(tge != nullptr, "invariant");
tge->set_parent_group_id(parent_thread_group_id);
}
parent_thread_group_id = tge->thread_group_id();
@ -332,7 +332,7 @@ JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const {
return curtge;
}
}
return (JfrThreadGroupEntry*) NULL;
return (JfrThreadGroupEntry*) nullptr;
}
// Assumes you already searched for the existence
@ -345,22 +345,22 @@ JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) {
}
int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) {
assert(tge != NULL, "attempting to add a null entry!");
assert(tge != nullptr, "attempting to add a null entry!");
assert(0 == tge->thread_group_id(), "id must be unassigned!");
tge->set_thread_group_id(next_id());
return _list->append(tge);
}
void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const {
assert(_list != NULL && !_list->is_empty(), "should not need be here!");
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
const int number_of_tg_entries = _list->length();
writer.write_count(number_of_tg_entries + 1); // + VirtualThread group
writer.write_key(1); // 1 is reserved for VirtualThread group
writer.write<traceid>(0); // parent
const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP();
assert(vgroup != (oop)NULL, "invariant");
assert(vgroup != (oop)nullptr, "invariant");
const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup);
assert(vgroup_name != NULL, "invariant");
assert(vgroup_name != nullptr, "invariant");
writer.write(vgroup_name);
for (int index = 0; index < number_of_tg_entries; ++index) {
const JfrThreadGroupEntry* const curtge = _list->at(index);
@ -371,8 +371,8 @@ void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) con
}
void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const {
assert(writer != NULL, "invariant");
assert(_list != NULL && !_list->is_empty(), "should not need be here!");
assert(writer != nullptr, "invariant");
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
assert(thread_group_id != 1, "should not need be here!");
const int number_of_tg_entries = _list->length();
@ -404,15 +404,15 @@ void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, t
void JfrThreadGroup::serialize(JfrCheckpointWriter& writer) {
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* tg_instance = instance();
assert(tg_instance != NULL, "invariant");
assert(tg_instance != nullptr, "invariant");
tg_instance->write_thread_group_entries(writer);
}
// for writing a particular thread group
void JfrThreadGroup::serialize(JfrCheckpointWriter* writer, traceid thread_group_id) {
assert(writer != NULL, "invariant");
assert(writer != nullptr, "invariant");
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* const tg_instance = instance();
assert(tg_instance != NULL, "invariant");
assert(tg_instance != nullptr, "invariant");
tg_instance->write_selective_thread_group(writer, thread_group_id);
}

View File

@ -97,13 +97,13 @@ traceid JfrThreadId::id(const Thread* t, oop vthread) {
}
traceid JfrThreadId::os_id(const Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
const OSThread* const os_thread = t->osthread();
return os_thread != NULL ? os_thread->thread_id() : 0;
return os_thread != nullptr ? os_thread->thread_id() : 0;
}
traceid JfrThreadId::jfr_id(const Thread* t, traceid tid) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return tid != 0 ? tid : JfrThreadLocal::jvm_thread_id(t);
}
@ -111,8 +111,8 @@ traceid JfrThreadId::jfr_id(const Thread* t, traceid tid) {
const char* get_java_thread_name(const JavaThread* jt, int& length, oop vthread) {
assert(jt != nullptr, "invariant");
const char* name_str = "<no-name - thread name unresolved>";
oop thread_obj = vthread != NULL ? vthread : jt->threadObj();
if (thread_obj == NULL) {
oop thread_obj = vthread != nullptr ? vthread : jt->threadObj();
if (thread_obj == nullptr) {
if (jt->is_attaching_via_jni()) {
name_str = "<no-name - thread is attaching>";
}

View File

@ -37,7 +37,7 @@ class JfrThreadState : public AllStatic {
class JfrThreadId : public AllStatic {
public:
static traceid id(const Thread* t, oop vthread = NULL);
static traceid id(const Thread* t, oop vthread = nullptr);
static traceid os_id(const Thread* t);
static traceid jfr_id(const Thread* t, traceid tid = 0);
};
@ -45,7 +45,7 @@ public:
class JfrThreadName : public AllStatic {
public:
// Requires a ResourceMark for get_thread_name/as_utf8
static const char* name(const Thread* t, int& length, oop vthread = NULL);
static const char* name(const Thread* t, int& length, oop vthread = nullptr);
};
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADSTATE_HPP

View File

@ -90,14 +90,14 @@ class JfrCheckpointThreadClosure : public ThreadClosure {
// Only static thread ids, virtual threads are handled dynamically.
void JfrCheckpointThreadClosure::do_thread(Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
++_count;
const traceid tid = JfrThreadId::jfr_id(t);
assert(tid != 0, "invariant");
_writer.write_key(tid);
int length = -1;
const char* const name = JfrThreadName::name(t, length);
assert(name != NULL, "invariant");
assert(name != nullptr, "invariant");
_writer.write(name);
_writer.write<traceid>(JfrThreadId::os_id(t));
if (!t->is_Java_thread()) {
@ -225,7 +225,7 @@ static const char* reference_type_to_string(ReferenceType rt) {
case REF_PHANTOM: return "Phantom reference";
default:
ShouldNotReachHere();
return NULL;
return nullptr;
}
}
@ -278,7 +278,7 @@ void JfrThreadConstant::write_name(JfrCheckpointWriter& writer, const char* name
}
void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
assert(_thread != NULL, "invariant");
assert(_thread != nullptr, "invariant");
const bool vthread = _vthread != nullptr;
writer.write_key(JfrThreadId::jfr_id(_thread, _tid));
int length = -1;

View File

@ -109,7 +109,7 @@ class JfrThreadConstant : public JfrSerializer {
oop _vthread;
void write_name(JfrCheckpointWriter& writer, const char* name, int length);
public:
JfrThreadConstant(Thread* t, traceid tid, oop vthread = NULL) : _thread(t), _tid(tid), _vthread(vthread) {}
JfrThreadConstant(Thread* t, traceid tid, oop vthread = nullptr) : _thread(t), _tid(tid), _vthread(vthread) {}
void serialize(JfrCheckpointWriter& writer);
};

View File

@ -50,7 +50,7 @@ class JfrSerializerRegistration : public JfrCHeapObj {
bool _permit_cache;
public:
JfrSerializerRegistration(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) :
_next(NULL), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {}
_next(nullptr), _serializer(serializer), _cache(), _id(id), _permit_cache(permit_cache) {}
~JfrSerializerRegistration() {
delete _serializer;
}
@ -104,7 +104,7 @@ void JfrTypeManager::write_threads(JfrCheckpointWriter& writer) {
}
JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /* 0 */, oop vthread /* nullptr */) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
ResourceMark rm(jt);
JfrCheckpointWriter writer(jt, true, THREADS, JFR_THREADLOCAL); // Thread local lease for blob creation.
// TYPE_THREAD and count is written unconditionally for blobs, also for vthreads.
@ -116,9 +116,9 @@ JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /*
}
void JfrTypeManager::write_checkpoint(Thread* t, traceid tid /* 0 */, oop vthread /* nullptr */) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
Thread* const current = Thread::current(); // not necessarily the same as t
assert(current != NULL, "invariant");
assert(current != nullptr, "invariant");
const bool is_vthread = vthread != nullptr;
ResourceMark rm(current);
JfrCheckpointWriter writer(current, true, THREADS, is_vthread ? JFR_VIRTUAL_THREADLOCAL : JFR_THREADLOCAL);
@ -155,7 +155,7 @@ void JfrTypeManager::destroy() {
JfrSerializerRegistration* registration;
while (types.is_nonempty()) {
registration = types.remove();
assert(registration != NULL, "invariant");
assert(registration != nullptr, "invariant");
delete registration;
}
}
@ -163,7 +163,7 @@ void JfrTypeManager::destroy() {
class InvokeOnRotation {
public:
bool process(const JfrSerializerRegistration* r) {
assert(r != NULL, "invariant");
assert(r != nullptr, "invariant");
r->on_rotation();
return true;
}
@ -182,7 +182,7 @@ class Diversity {
public:
Diversity(JfrTypeId id) : _id(id) {}
bool process(const JfrSerializerRegistration* r) {
assert(r != NULL, "invariant");
assert(r != nullptr, "invariant");
assert(r->id() != _id, "invariant");
return true;
}
@ -195,9 +195,9 @@ static void assert_not_registered_twice(JfrTypeId id, List& list) {
#endif
static bool register_static_type(JfrTypeId id, bool permit_cache, JfrSerializer* serializer) {
assert(serializer != NULL, "invariant");
assert(serializer != nullptr, "invariant");
JfrSerializerRegistration* const registration = new JfrSerializerRegistration(id, permit_cache, serializer);
if (registration == NULL) {
if (registration == nullptr) {
delete serializer;
return false;
}
@ -256,7 +256,7 @@ class InvokeSerializer {
public:
InvokeSerializer(JfrCheckpointWriter& writer) : _writer(writer) {}
bool process(const JfrSerializerRegistration* r) {
assert(r != NULL, "invariant");
assert(r != nullptr, "invariant");
r->invoke(_writer);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,10 +58,10 @@ typedef const Symbol* SymbolPtr;
typedef const JfrSymbolTable::SymbolEntry* SymbolEntryPtr;
typedef const JfrSymbolTable::StringEntry* StringEntryPtr;
static JfrCheckpointWriter* _writer = NULL;
static JfrCheckpointWriter* _leakp_writer = NULL;
static JfrArtifactSet* _artifacts = NULL;
static JfrArtifactClosure* _subsystem_callback = NULL;
static JfrCheckpointWriter* _writer = nullptr;
static JfrCheckpointWriter* _leakp_writer = nullptr;
static JfrArtifactSet* _artifacts = nullptr;
static JfrArtifactClosure* _subsystem_callback = nullptr;
static bool _class_unload = false;
static bool _flushpoint = false;
static bool _initial_type_set = true;
@ -83,11 +83,11 @@ static bool is_complete() {
}
static traceid mark_symbol(KlassPtr klass, bool leakp) {
return klass != NULL ? _artifacts->mark(klass, leakp) : 0;
return klass != nullptr ? _artifacts->mark(klass, leakp) : 0;
}
static traceid mark_symbol(Symbol* symbol, bool leakp) {
return symbol != NULL ? _artifacts->mark(symbol, leakp) : 0;
return symbol != nullptr ? _artifacts->mark(symbol, leakp) : 0;
}
static traceid get_bootstrap_name(bool leakp) {
@ -106,33 +106,33 @@ static const char* primitive_name(KlassPtr type_array_klass) {
case JVM_SIGNATURE_DOUBLE: return "double";
}
assert(false, "invalid type array klass");
return NULL;
return nullptr;
}
static Symbol* primitive_symbol(KlassPtr type_array_klass) {
if (type_array_klass == NULL) {
if (type_array_klass == nullptr) {
// void.class
static Symbol* const void_class_name = SymbolTable::probe("void", 4);
assert(void_class_name != NULL, "invariant");
assert(void_class_name != nullptr, "invariant");
return void_class_name;
}
const char* const primitive_type_str = primitive_name(type_array_klass);
assert(primitive_type_str != NULL, "invariant");
assert(primitive_type_str != nullptr, "invariant");
Symbol* const primitive_type_sym = SymbolTable::probe(primitive_type_str, (int)strlen(primitive_type_str));
assert(primitive_type_sym != NULL, "invariant");
assert(primitive_type_sym != nullptr, "invariant");
return primitive_type_sym;
}
template <typename T>
static traceid artifact_id(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return JfrTraceId::load_raw(ptr);
}
static traceid package_id(KlassPtr klass, bool leakp) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
PkgPtr pkg_entry = klass->package();
if (pkg_entry == NULL) {
if (pkg_entry == nullptr) {
return 0;
}
if (leakp) {
@ -143,9 +143,9 @@ static traceid package_id(KlassPtr klass, bool leakp) {
}
static traceid module_id(PkgPtr pkg, bool leakp) {
assert(pkg != NULL, "invariant");
assert(pkg != nullptr, "invariant");
ModPtr module_entry = pkg->module();
if (module_entry == NULL) {
if (module_entry == nullptr) {
return 0;
}
if (leakp) {
@ -157,13 +157,13 @@ static traceid module_id(PkgPtr pkg, bool leakp) {
}
static traceid method_id(KlassPtr klass, MethodPtr method) {
assert(klass != NULL, "invariant");
assert(method != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
return METHOD_ID(klass, method);
}
static traceid cld_id(CldPtr cld, bool leakp) {
assert(cld != NULL, "invariant");
assert(cld != nullptr, "invariant");
if (leakp) {
SET_LEAKP(cld);
} else {
@ -174,7 +174,7 @@ static traceid cld_id(CldPtr cld, bool leakp) {
template <typename T>
static s4 get_flags(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return ptr->access_flags().get_flags();
}
@ -184,17 +184,17 @@ static u4 get_primitive_flags() {
}
static ClassLoaderData* get_cld(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
if (klass->is_objArray_klass()) {
klass = ObjArrayKlass::cast(klass)->bottom_klass();
}
if (klass->is_non_strong_hidden()) return NULL;
if (klass->is_non_strong_hidden()) return nullptr;
return klass->class_loader_data();
}
template <typename T>
static void set_serialized(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
SET_SERIALIZED(ptr);
assert(IS_SERIALIZED(ptr), "invariant");
if (current_epoch()) {
@ -211,12 +211,12 @@ static void set_serialized(const T* ptr) {
*/
static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp) {
assert(writer != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(klass != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(_artifacts != nullptr, "invariant");
assert(klass != nullptr, "invariant");
writer->write(artifact_id(klass));
ClassLoaderData* cld = get_cld(klass);
writer->write(cld != NULL ? cld_id(cld, leakp) : 0);
writer->write(cld != nullptr ? cld_id(cld, leakp) : 0);
writer->write(mark_symbol(klass, leakp));
writer->write(package_id(klass, leakp));
writer->write(get_flags(klass));
@ -225,34 +225,34 @@ static int write_klass(JfrCheckpointWriter* writer, KlassPtr klass, bool leakp)
}
int write__klass(JfrCheckpointWriter* writer, const void* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
KlassPtr klass = (KlassPtr)k;
set_serialized(klass);
return write_klass(writer, klass, false);
}
int write__klass__leakp(JfrCheckpointWriter* writer, const void* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
KlassPtr klass = (KlassPtr)k;
CLEAR_LEAKP(klass);
return write_klass(writer, klass, true);
}
static bool is_implied(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return klass->is_subclass_of(vmClasses::ClassLoader_klass()) || klass == vmClasses::Object_klass();
}
static void do_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(_flushpoint ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass), "invariant");
assert(_subsystem_callback != NULL, "invariant");
assert(_subsystem_callback != nullptr, "invariant");
_subsystem_callback->do_artifact(klass);
}
static traceid primitive_id(KlassPtr array_klass) {
if (array_klass == NULL) {
if (array_klass == nullptr) {
// The first klass id is reserved for the void.class.
return LAST_TYPE_ID + 1;
}
@ -261,8 +261,8 @@ static traceid primitive_id(KlassPtr array_klass) {
}
static void write_primitive(JfrCheckpointWriter* writer, KlassPtr type_array_klass) {
assert(writer != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(_artifacts != nullptr, "invariant");
writer->write(primitive_id(type_array_klass));
writer->write(cld_id(get_cld(Universe::boolArrayKlassObj()), false));
writer->write(mark_symbol(primitive_symbol(type_array_klass), false));
@ -272,8 +272,8 @@ static void write_primitive(JfrCheckpointWriter* writer, KlassPtr type_array_kla
}
static void do_loader_klass(const Klass* klass) {
if (klass != NULL && _artifacts->should_do_loader_klass(klass)) {
if (_leakp_writer != NULL) {
if (klass != nullptr && _artifacts->should_do_loader_klass(klass)) {
if (_leakp_writer != nullptr) {
SET_LEAKP(klass);
}
SET_TRANSIENT(klass);
@ -282,7 +282,7 @@ static void do_loader_klass(const Klass* klass) {
}
static bool register_klass_unload(Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return JfrKlassUnloading::on_unload(klass);
}
@ -296,8 +296,8 @@ static size_t register_unloading_klasses() {
}
static void do_unloading_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(_subsystem_callback != nullptr, "invariant");
if (register_klass_unload(klass)) {
_subsystem_callback->do_artifact(klass);
do_loader_klass(klass->class_loader_data()->class_loader_klass());
@ -311,7 +311,7 @@ static void do_unloading_klass(Klass* klass) {
* trigger initialization.
*/
static bool is_classloader_klass_allowed(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
return !(k->is_abstract() || k->should_be_initialized());
}
@ -342,7 +342,7 @@ static void do_primitives() {
write_primitive(_writer, Universe::longArrayKlassObj());
write_primitive(_writer, Universe::floatArrayKlassObj());
write_primitive(_writer, Universe::doubleArrayKlassObj());
write_primitive(_writer, NULL); // void.class
write_primitive(_writer, nullptr); // void.class
}
}
@ -373,7 +373,7 @@ class LeakPredicate<const Klass*> {
public:
LeakPredicate(bool class_unload) {}
bool operator()(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return IS_LEAKP(klass) || is_implied(klass);
}
};
@ -388,11 +388,11 @@ typedef JfrArtifactCallbackHost<KlassPtr, CompositeKlassWriterRegistration> Comp
static bool write_klasses() {
assert(!_artifacts->has_klass_entries(), "invariant");
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
KlassArtifactRegistrator reg(_artifacts);
KlassWriter kw(_writer, _class_unload);
KlassWriterRegistration kwr(&kw, &reg);
if (_leakp_writer == NULL) {
if (_leakp_writer == nullptr) {
KlassCallback callback(&_subsystem_callback, &kwr);
do_klasses();
} else {
@ -416,8 +416,8 @@ static bool write_klasses() {
template <typename T>
static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
assert(callback != NULL, "invariant");
assert(value != NULL, "invariant");
assert(callback != nullptr, "invariant");
assert(value != nullptr, "invariant");
if (USED_PREVIOUS_EPOCH(value)) {
callback->do_artifact(value);
}
@ -430,8 +430,8 @@ static void do_previous_epoch_artifact(JfrArtifactClosure* callback, T* value) {
typedef JfrArtifactCallbackHost<KlassPtr, KlassArtifactRegistrator> RegisterKlassCallback;
static void register_klass(Klass* klass) {
assert(klass != NULL, "invariant");
assert(_subsystem_callback != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(_subsystem_callback != nullptr, "invariant");
do_previous_epoch_artifact(_subsystem_callback, klass);
}
@ -443,9 +443,9 @@ static void register_klasses() {
}
static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
assert(writer != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(pkg != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(_artifacts != nullptr, "invariant");
assert(pkg != nullptr, "invariant");
writer->write(artifact_id(pkg));
writer->write(mark_symbol(pkg->name(), leakp));
writer->write(module_id(pkg, leakp));
@ -454,14 +454,14 @@ static int write_package(JfrCheckpointWriter* writer, PkgPtr pkg, bool leakp) {
}
int write__package(JfrCheckpointWriter* writer, const void* p) {
assert(p != NULL, "invariant");
assert(p != nullptr, "invariant");
PkgPtr pkg = (PkgPtr)p;
set_serialized(pkg);
return write_package(writer, pkg, false);
}
int write__package__leakp(JfrCheckpointWriter* writer, const void* p) {
assert(p != NULL, "invariant");
assert(p != nullptr, "invariant");
PkgPtr pkg = (PkgPtr)p;
CLEAR_LEAKP(pkg);
return write_package(writer, pkg, true);
@ -479,7 +479,7 @@ class PackageFieldSelector {
public:
typedef PkgPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return klass->package();
}
};
@ -502,7 +502,7 @@ typedef CompositeFunctor<PkgPtr, CompositePackageWriter, ClearArtifact<PkgPtr> >
typedef JfrArtifactCallbackHost<PkgPtr, CompositePackageWriterWithClear> CompositePackageCallback;
static void write_packages() {
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
PackageWriter pw(_writer, _class_unload);
KlassPackageWriter kpw(&pw);
if (current_epoch()) {
@ -511,7 +511,7 @@ static void write_packages() {
return;
}
assert(previous_epoch(), "invariant");
if (_leakp_writer == NULL) {
if (_leakp_writer == nullptr) {
_artifacts->iterate_klasses(kpw);
ClearArtifact<PkgPtr> clear;
PackageWriterWithClear pwwc(&pw, &clear);
@ -539,8 +539,8 @@ static void clear_packages() {
}
static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) {
assert(mod != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(mod != nullptr, "invariant");
assert(_artifacts != nullptr, "invariant");
writer->write(artifact_id(mod));
writer->write(mark_symbol(mod->name(), leakp));
writer->write(mark_symbol(mod->version(), leakp));
@ -550,14 +550,14 @@ static int write_module(JfrCheckpointWriter* writer, ModPtr mod, bool leakp) {
}
int write__module(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
ModPtr mod = (ModPtr)m;
set_serialized(mod);
return write_module(writer, mod, false);
}
int write__module__leakp(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
ModPtr mod = (ModPtr)m;
CLEAR_LEAKP(mod);
return write_module(writer, mod, true);
@ -575,9 +575,9 @@ class ModuleFieldSelector {
public:
typedef ModPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
PkgPtr pkg = klass->package();
return pkg != NULL ? pkg->module() : NULL;
return pkg != nullptr ? pkg->module() : nullptr;
}
};
@ -598,7 +598,7 @@ typedef CompositeFunctor<ModPtr, CompositeModuleWriter, ClearArtifact<ModPtr> >
typedef JfrArtifactCallbackHost<ModPtr, CompositeModuleWriterWithClear> CompositeModuleCallback;
static void write_modules() {
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
ModuleWriter mw(_writer, _class_unload);
KlassModuleWriter kmw(&mw);
if (current_epoch()) {
@ -607,7 +607,7 @@ static void write_modules() {
return;
}
assert(previous_epoch(), "invariant");
if (_leakp_writer == NULL) {
if (_leakp_writer == nullptr) {
_artifacts->iterate_klasses(kmw);
ClearArtifact<ModPtr> clear;
ModuleWriterWithClear mwwc(&mw, &clear);
@ -635,10 +635,10 @@ static void clear_modules() {
}
static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp) {
assert(cld != NULL, "invariant");
assert(cld != nullptr, "invariant");
// class loader type
const Klass* class_loader_klass = cld->class_loader_klass();
if (class_loader_klass == NULL) {
if (class_loader_klass == nullptr) {
// (primordial) boot class loader
writer->write(artifact_id(cld)); // class loader instance id
writer->write((traceid)0); // class loader type id (absence of)
@ -653,14 +653,14 @@ static int write_classloader(JfrCheckpointWriter* writer, CldPtr cld, bool leakp
}
int write__classloader(JfrCheckpointWriter* writer, const void* c) {
assert(c != NULL, "invariant");
assert(c != nullptr, "invariant");
CldPtr cld = (CldPtr)c;
set_serialized(cld);
return write_classloader(writer, cld, false);
}
int write__classloader__leakp(JfrCheckpointWriter* writer, const void* c) {
assert(c != NULL, "invariant");
assert(c != nullptr, "invariant");
CldPtr cld = (CldPtr)c;
CLEAR_LEAKP(cld);
return write_classloader(writer, cld, true);
@ -674,7 +674,7 @@ class KlassCldFieldSelector {
public:
typedef CldPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return get_cld(klass);
}
};
@ -683,9 +683,9 @@ class ModuleCldFieldSelector {
public:
typedef CldPtr TypePtr;
static TypePtr select(KlassPtr klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
ModPtr mod = ModuleFieldSelector::select(klass);
return mod != NULL ? mod->loader_data() : NULL;
return mod != nullptr ? mod->loader_data() : nullptr;
}
};
@ -693,7 +693,7 @@ class CLDCallback : public CLDClosure {
public:
CLDCallback() {}
void do_cld(ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
assert(cld != nullptr, "invariant");
if (cld->has_class_mirror_holder()) {
return;
}
@ -727,7 +727,7 @@ typedef CompositeFunctor<CldPtr, CompositeCldWriter, ClearArtifact<CldPtr> > Com
typedef JfrArtifactCallbackHost<CldPtr, CompositeCldWriterWithClear> CompositeCldCallback;
static void write_classloaders() {
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
CldWriter cldw(_writer, _class_unload);
KlassCldWriter kcw(&cldw);
ModuleCldWriter mcw(&cldw);
@ -738,7 +738,7 @@ static void write_classloaders() {
return;
}
assert(previous_epoch(), "invariant");
if (_leakp_writer == NULL) {
if (_leakp_writer == nullptr) {
_artifacts->iterate_klasses(kmcw);
ClearArtifact<CldPtr> clear;
CldWriterWithClear cldwwc(&cldw, &clear);
@ -768,13 +768,13 @@ static void clear_classloaders() {
}
static u1 get_visibility(MethodPtr method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
return const_cast<Method*>(method)->is_hidden() ? (u1)1 : (u1)0;
}
template <>
void set_serialized<Method>(MethodPtr method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
SET_METHOD_SERIALIZED(method);
assert(IS_METHOD_SERIALIZED(method), "invariant");
if (current_epoch()) {
@ -783,11 +783,11 @@ void set_serialized<Method>(MethodPtr method) {
}
static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leakp) {
assert(writer != NULL, "invariant");
assert(method != NULL, "invariant");
assert(_artifacts != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(_artifacts != nullptr, "invariant");
KlassPtr klass = method->method_holder();
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
writer->write(method_id(klass, method));
writer->write(artifact_id(klass));
writer->write(mark_symbol(method->name(), leakp));
@ -798,14 +798,14 @@ static int write_method(JfrCheckpointWriter* writer, MethodPtr method, bool leak
}
int write__method(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
MethodPtr method = (MethodPtr)m;
set_serialized(method);
return write_method(writer, method, false);
}
int write__method__leakp(JfrCheckpointWriter* writer, const void* m) {
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
MethodPtr method = (MethodPtr)m;
CLEAR_LEAKP_METHOD(method);
return write_method(writer, method, true);
@ -855,7 +855,7 @@ class MethodIteratorHost {
bool operator()(KlassPtr klass) {
if (_method_used_predicate(klass)) {
const InstanceKlass* ik = InstanceKlass::cast(klass);
while (ik != NULL) {
while (ik != nullptr) {
const int len = ik->methods()->length();
for (int i = 0; i < len; ++i) {
MethodPtr method = ik->methods()->at(i);
@ -905,9 +905,9 @@ typedef MethodIteratorHost<LeakMethodWriterImpl, KlassCallbackStub, BitMapFilter
typedef CompositeFunctor<KlassPtr, LeakMethodWriter, MethodWriter> CompositeMethodWriter;
static void write_methods() {
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
MethodWriter mw(_writer, current_epoch(), _class_unload);
if (_leakp_writer == NULL) {
if (_leakp_writer == nullptr) {
_artifacts->iterate_klasses(mw);
} else {
LeakMethodWriter lpmw(_leakp_writer, current_epoch(), _class_unload);
@ -919,21 +919,21 @@ static void write_methods() {
template <>
void set_serialized<JfrSymbolTable::SymbolEntry>(SymbolEntryPtr ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
ptr->set_serialized();
assert(ptr->is_serialized(), "invariant");
}
template <>
void set_serialized<JfrSymbolTable::StringEntry>(StringEntryPtr ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
ptr->set_serialized();
assert(ptr->is_serialized(), "invariant");
}
static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool leakp) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(entry != nullptr, "invariant");
ResourceMark rm;
writer->write(entry->id());
writer->write(entry->value()->as_C_string());
@ -941,35 +941,35 @@ static int write_symbol(JfrCheckpointWriter* writer, SymbolEntryPtr entry, bool
}
int write__symbol(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
assert(e != nullptr, "invariant");
SymbolEntryPtr entry = (SymbolEntryPtr)e;
set_serialized(entry);
return write_symbol(writer, entry, false);
}
int write__symbol__leakp(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
assert(e != nullptr, "invariant");
SymbolEntryPtr entry = (SymbolEntryPtr)e;
return write_symbol(writer, entry, true);
}
static int write_string(JfrCheckpointWriter* writer, StringEntryPtr entry, bool leakp) {
assert(writer != NULL, "invariant");
assert(entry != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(entry != nullptr, "invariant");
writer->write(entry->id());
writer->write(entry->value());
return 1;
}
int write__string(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
assert(e != nullptr, "invariant");
StringEntryPtr entry = (StringEntryPtr)e;
set_serialized(entry);
return write_string(writer, entry, false);
}
int write__string__leakp(JfrCheckpointWriter* writer, const void* e) {
assert(e != NULL, "invariant");
assert(e != nullptr, "invariant");
StringEntryPtr entry = (StringEntryPtr)e;
return write_string(writer, entry, true);
}
@ -991,7 +991,7 @@ typedef JfrTypeWriterHost<LeakStringEntryWriterImpl, TYPE_SYMBOL> LeakStringEntr
typedef CompositeFunctor<StringEntryPtr, LeakStringEntryWriter, StringEntryWriter> CompositeStringWriter;
static void write_symbols_with_leakp() {
assert(_leakp_writer != NULL, "invariant");
assert(_leakp_writer != nullptr, "invariant");
SymbolEntryWriter sw(_writer, _class_unload);
LeakSymbolEntryWriter lsw(_leakp_writer, _class_unload);
CompositeSymbolWriter csw(&lsw, &sw);
@ -1006,8 +1006,8 @@ static void write_symbols_with_leakp() {
}
static void write_symbols() {
assert(_writer != NULL, "invariant");
if (_leakp_writer != NULL) {
assert(_writer != nullptr, "invariant");
if (_leakp_writer != nullptr) {
write_symbols_with_leakp();
return;
}
@ -1029,7 +1029,7 @@ static void clear_klasses_and_methods() {
}
static size_t teardown() {
assert(_artifacts != NULL, "invariant");
assert(_artifacts != nullptr, "invariant");
const size_t total_count = _artifacts->total_count();
if (previous_epoch()) {
clear_klasses_and_methods();
@ -1047,7 +1047,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer
_leakp_writer = leakp_writer;
_class_unload = class_unload;
_flushpoint = flushpoint;
if (_artifacts == NULL) {
if (_artifacts == nullptr) {
_artifacts = new JfrArtifactSet(class_unload);
} else {
_artifacts->initialize(class_unload);
@ -1055,7 +1055,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer
if (!_class_unload) {
JfrKlassUnloading::sort(previous_epoch());
}
assert(_artifacts != NULL, "invariant");
assert(_artifacts != nullptr, "invariant");
assert(!_artifacts->has_klass_entries(), "invariant");
}
@ -1063,7 +1063,7 @@ static void setup(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer
* Write all "tagged" (in-use) constant artifacts and their dependencies.
*/
size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* leakp_writer, bool class_unload, bool flushpoint) {
assert(writer != NULL, "invariant");
assert(writer != nullptr, "invariant");
ResourceMark rm;
setup(writer, leakp_writer, class_unload, flushpoint);
// write order is important because an individual write step
@ -1085,10 +1085,10 @@ size_t JfrTypeSet::serialize(JfrCheckpointWriter* writer, JfrCheckpointWriter* l
void JfrTypeSet::clear() {
ResourceMark rm;
JfrKlassUnloading::clear();
if (_artifacts != NULL) {
if (_artifacts != nullptr) {
_artifacts->clear();
}
setup(NULL, NULL, false, false);
setup(nullptr, nullptr, false, false);
register_klasses();
clear_packages();
clear_modules();
@ -1102,7 +1102,7 @@ size_t JfrTypeSet::on_unloading_classes(JfrCheckpointWriter* writer) {
// happen in arbitrary threads, we invoke it explicitly.
JfrTraceIdEpoch::has_changed_tag_state_no_reset();
if (JfrRecorder::is_recording()) {
return serialize(writer, NULL, true, false);
return serialize(writer, nullptr, true, false);
}
return register_unloading_klasses();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,22 +30,22 @@
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_table(NULL),
_klass_list(NULL),
JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_table(nullptr),
_klass_list(nullptr),
_total_count(0) {
initialize(class_unload);
assert(_klass_list != NULL, "invariant");
assert(_klass_list != nullptr, "invariant");
}
static const size_t initial_klass_list_size = 256;
const int initial_klass_loader_set_size = 64;
void JfrArtifactSet::initialize(bool class_unload) {
if (_symbol_table == NULL) {
if (_symbol_table == nullptr) {
_symbol_table = JfrSymbolTable::create();
assert(_symbol_table != NULL, "invariant");
assert(_symbol_table != nullptr, "invariant");
}
assert(_symbol_table != NULL, "invariant");
assert(_symbol_table != nullptr, "invariant");
_symbol_table->set_class_unload(class_unload);
_total_count = 0;
// resource allocation
@ -54,7 +54,7 @@ void JfrArtifactSet::initialize(bool class_unload) {
}
void JfrArtifactSet::clear() {
if (_symbol_table != NULL) {
if (_symbol_table != nullptr) {
_symbol_table->clear();
}
}
@ -98,14 +98,14 @@ int JfrArtifactSet::entries() const {
}
bool JfrArtifactSet::should_do_loader_klass(const Klass* k) {
assert(k != NULL, "invariant");
assert(_klass_loader_set != NULL, "invariant");
assert(k != nullptr, "invariant");
assert(_klass_loader_set != nullptr, "invariant");
return !JfrMutablePredicate<const Klass*, compare_klasses>::test(_klass_loader_set, k);
}
void JfrArtifactSet::register_klass(const Klass* k) {
assert(k != NULL, "invariant");
assert(_klass_list != NULL, "invariant");
assert(k != nullptr, "invariant");
assert(_klass_list != nullptr, "invariant");
_klass_list->append(k);
}
@ -114,7 +114,7 @@ size_t JfrArtifactSet::total_count() const {
}
void JfrArtifactSet::increment_checkpoint_id() {
assert(_symbol_table != NULL, "invariant");
assert(_symbol_table != nullptr, "invariant");
_symbol_table->increment_checkpoint_id();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,8 +42,8 @@ class CompositeFunctor {
Func2* _g;
public:
CompositeFunctor(Func1* f, Func2* g) : _f(f), _g(g) {
assert(f != NULL, "invariant");
assert(g != NULL, "invariant");
assert(f != nullptr, "invariant");
assert(g != nullptr, "invariant");
}
bool operator()(T const& value) {
return (*_f)(value) && (*_g)(value);
@ -63,11 +63,11 @@ class JfrArtifactCallbackHost : public JfrArtifactClosure {
public:
JfrArtifactCallbackHost(JfrArtifactClosure** subsystem_callback_loc, Callback* callback) :
_subsystem_callback_loc(subsystem_callback_loc), _callback(callback) {
assert(*_subsystem_callback_loc == NULL, "Subsystem callback should not be set yet");
assert(*_subsystem_callback_loc == nullptr, "Subsystem callback should not be set yet");
*_subsystem_callback_loc = this;
}
~JfrArtifactCallbackHost() {
*_subsystem_callback_loc = NULL;
*_subsystem_callback_loc = nullptr;
}
void do_artifact(const void* artifact) {
(*_callback)(reinterpret_cast<T const&>(artifact));
@ -81,7 +81,7 @@ class KlassToFieldEnvelope {
KlassToFieldEnvelope(Letter* letter) : _letter(letter) {}
bool operator()(const Klass* klass) {
typename FieldSelector::TypePtr t = FieldSelector::select(klass);
return t != NULL ? (*_letter)(t) : true;
return t != nullptr ? (*_letter)(t) : true;
}
};
@ -116,7 +116,7 @@ class SerializePredicate {
public:
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
assert(value != NULL, "invariant");
assert(value != nullptr, "invariant");
return _class_unload ? true : IS_NOT_SERIALIZED(value);
}
};
@ -127,7 +127,7 @@ class SerializePredicate<const Method*> {
public:
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(const Method* method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
return _class_unload ? true : METHOD_NOT_SERIALIZED(method);
}
};
@ -138,7 +138,7 @@ class SymbolPredicate {
public:
SymbolPredicate(bool class_unload) : _class_unload(class_unload) {}
bool operator()(T const& value) {
assert(value != NULL, "invariant");
assert(value != nullptr, "invariant");
if (_class_unload) {
return leakp ? value->is_leakp() : value->is_unloading();
}
@ -182,7 +182,7 @@ class LeakPredicate<const Method*> {
public:
LeakPredicate(bool class_unload) {}
bool operator()(const Method* method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
return IS_METHOD_LEAKP_USED(method);
}
};
@ -266,11 +266,11 @@ class KlassArtifactRegistrator {
public:
KlassArtifactRegistrator(JfrArtifactSet* artifacts) :
_artifacts(artifacts) {
assert(_artifacts != NULL, "invariant");
assert(_artifacts != nullptr, "invariant");
}
bool operator()(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
_artifacts->register_klass(klass);
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,27 +74,27 @@ static bool found_jdk_internal_event_klass = false;
static bool found_jdk_jfr_event_klass = false;
static void check_klass(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
if (found_jdk_internal_event_klass && found_jdk_jfr_event_klass) {
return;
}
static const Symbol* jdk_internal_event_sym = NULL;
if (jdk_internal_event_sym == NULL) {
static const Symbol* jdk_internal_event_sym = nullptr;
if (jdk_internal_event_sym == nullptr) {
// setup when loading the first TypeArrayKlass (Universe::genesis) hence single threaded invariant
jdk_internal_event_sym = SymbolTable::new_permanent_symbol("jdk/internal/event/Event");
}
assert(jdk_internal_event_sym != NULL, "invariant");
assert(jdk_internal_event_sym != nullptr, "invariant");
static const Symbol* jdk_jfr_event_sym = NULL;
if (jdk_jfr_event_sym == NULL) {
static const Symbol* jdk_jfr_event_sym = nullptr;
if (jdk_jfr_event_sym == nullptr) {
// setup when loading the first TypeArrayKlass (Universe::genesis) hence single threaded invariant
jdk_jfr_event_sym = SymbolTable::new_permanent_symbol("jdk/jfr/Event");
}
assert(jdk_jfr_event_sym != NULL, "invariant");
assert(jdk_jfr_event_sym != nullptr, "invariant");
const Symbol* const klass_name = klass->name();
if (!found_jdk_internal_event_klass) {
if (jdk_internal_event_sym == klass_name && klass->class_loader() == NULL) {
if (jdk_internal_event_sym == klass_name && klass->class_loader() == nullptr) {
found_jdk_internal_event_klass = true;
JfrTraceId::tag_as_jdk_jfr_event(klass);
return;
@ -102,7 +102,7 @@ static void check_klass(const Klass* klass) {
}
if (!found_jdk_jfr_event_klass) {
if (jdk_jfr_event_sym == klass_name && klass->class_loader() == NULL) {
if (jdk_jfr_event_sym == klass_name && klass->class_loader() == nullptr) {
found_jdk_jfr_event_klass = true;
JfrTraceId::tag_as_jdk_jfr_event(klass);
return;
@ -145,17 +145,17 @@ void JfrTraceId::assign(const Klass* klass) {
}
void JfrTraceId::assign(const ModuleEntry* module) {
assert(module != NULL, "invariant");
assert(module != nullptr, "invariant");
module->set_trace_id(next_module_id());
}
void JfrTraceId::assign(const PackageEntry* package) {
assert(package != NULL, "invariant");
assert(package != nullptr, "invariant");
package->set_trace_id(next_package_id());
}
void JfrTraceId::assign(const ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
assert(cld != nullptr, "invariant");
if (cld->has_class_mirror_holder()) {
cld->set_trace_id(0);
return;
@ -176,7 +176,7 @@ static traceid load_primitive(const oop mirror) {
assert(java_lang_Class::is_primitive(mirror), "invariant");
const Klass* const tak = java_lang_Class::array_klass_acquire(mirror);
traceid id;
if (tak == NULL) {
if (tak == nullptr) {
// The first klass id is reserved for the void.class
id = LAST_TYPE_ID + 1;
} else {
@ -187,12 +187,12 @@ static traceid load_primitive(const oop mirror) {
}
traceid JfrTraceId::load(jclass jc, bool raw /* false */) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
const Klass* const k = java_lang_Class::as_Klass(mirror);
return k != NULL ? (raw ? load_raw(k) : load(k)) : load_primitive(mirror);
return k != nullptr ? (raw ? load_raw(k) : load(k)) : load_primitive(mirror);
}
traceid JfrTraceId::load_raw(jclass jc) {
@ -202,7 +202,7 @@ traceid JfrTraceId::load_raw(jclass jc) {
#if INCLUDE_CDS
// used by CDS / APPCDS as part of "remove_unshareable_info"
void JfrTraceId::remove(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
// Mask off and store the event flags.
// This mechanism will retain the event specific flags
// in the archive, allowing for event flag restoration
@ -212,14 +212,14 @@ void JfrTraceId::remove(const Klass* k) {
// used by CDS / APPCDS as part of "remove_unshareable_info"
void JfrTraceId::remove(const Method* method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
// Clear all bits.
method->set_trace_flags(0);
}
// used by CDS / APPCDS as part of "restore_unshareable_info"
void JfrTraceId::restore(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
if (IS_JDK_JFR_EVENT_KLASS(k)) {
found_jdk_jfr_event_klass = true;
}
@ -234,61 +234,61 @@ void JfrTraceId::restore(const Klass* k) {
#endif // INCLUDE_CDS
bool JfrTraceId::in_visible_set(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return in_visible_set(java_lang_Class::as_Klass(mirror));
}
bool JfrTraceId::in_jdk_jfr_event_hierarchy(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return in_jdk_jfr_event_hierarchy(java_lang_Class::as_Klass(mirror));
}
bool JfrTraceId::is_jdk_jfr_event_sub(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return is_jdk_jfr_event_sub(java_lang_Class::as_Klass(mirror));
}
bool JfrTraceId::is_jdk_jfr_event(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return is_jdk_jfr_event(java_lang_Class::as_Klass(mirror));
}
bool JfrTraceId::is_event_host(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return is_event_host(java_lang_Class::as_Klass(mirror));
}
void JfrTraceId::tag_as_jdk_jfr_event_sub(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
const Klass* const k = java_lang_Class::as_Klass(mirror);
tag_as_jdk_jfr_event_sub(k);
assert(IS_JDK_JFR_EVENT_SUBKLASS(k), "invariant");
}
void JfrTraceId::tag_as_event_host(const jclass jc) {
assert(jc != NULL, "invariant");
assert(jc != nullptr, "invariant");
const oop mirror = JNIHandles::resolve(jc);
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
const Klass* const k = java_lang_Class::as_Klass(mirror);
tag_as_event_host(k);
assert(IS_EVENT_HOST_KLASS(k), "invariant");
}
void JfrTraceId::untag_jdk_jfr_event_sub(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
if (JfrTraceId::is_jdk_jfr_event_sub(k)) {
CLEAR_JDK_JFR_EVENT_SUBKLASS(k);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,7 @@ inline traceid JfrTraceId::load_leakp(const Klass* klass, const Method* method)
template <typename T>
inline traceid raw_load(const T* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return TRACE_ID(t);
}
@ -91,29 +91,29 @@ inline traceid JfrTraceId::load_raw(const ClassLoaderData* cld) {
}
inline bool JfrTraceId::in_visible_set(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(JavaThread::current()->thread_state() == _thread_in_vm, "invariant");
return (IS_JDK_JFR_EVENT_SUBKLASS(klass) && !klass->is_abstract()) || IS_EVENT_HOST_KLASS(klass);
}
inline bool JfrTraceId::is_jdk_jfr_event(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
return IS_JDK_JFR_EVENT_KLASS(k);
}
inline void JfrTraceId::tag_as_jdk_jfr_event(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
SET_JDK_JFR_EVENT_KLASS(klass);
assert(IS_JDK_JFR_EVENT_KLASS(klass), "invariant");
}
inline bool JfrTraceId::is_jdk_jfr_event_sub(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
return IS_JDK_JFR_EVENT_SUBKLASS(k);
}
inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
if (IS_NOT_AN_EVENT_SUB_KLASS(k)) {
SET_JDK_JFR_EVENT_SUBKLASS(k);
}
@ -121,21 +121,21 @@ inline void JfrTraceId::tag_as_jdk_jfr_event_sub(const Klass* k) {
}
inline bool JfrTraceId::in_jdk_jfr_event_hierarchy(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
if (is_jdk_jfr_event(klass)) {
return true;
}
const Klass* const super = klass->super();
return super != NULL ? IS_EVENT_KLASS(super) : false;
return super != nullptr ? IS_EVENT_KLASS(super) : false;
}
inline bool JfrTraceId::is_event_host(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
return IS_EVENT_HOST_KLASS(k);
}
inline void JfrTraceId::tag_as_event_host(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
SET_EVENT_HOST_KLASS(k);
assert(IS_EVENT_HOST_KLASS(k), "invariant");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ const int meta_offset = low_offset - 1;
#endif
inline jbyte* low_addr(jbyte* addr) {
assert(addr != NULL, "invariant");
assert(addr != nullptr, "invariant");
return addr + low_offset;
}
@ -49,7 +49,7 @@ inline jbyte* low_addr(traceid* addr) {
}
inline jbyte* meta_addr(jbyte* addr) {
assert(addr != NULL, "invariant");
assert(addr != nullptr, "invariant");
return addr + meta_offset;
}
@ -59,25 +59,25 @@ inline jbyte* meta_addr(traceid* addr) {
template <typename T>
inline jbyte* traceid_tag_byte(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return low_addr(ptr->trace_id_addr());
}
template <>
inline jbyte* traceid_tag_byte<Method>(const Method* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return ptr->trace_flags_addr();
}
template <typename T>
inline jbyte* traceid_meta_byte(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return meta_addr(ptr->trace_id_addr());
}
template <>
inline jbyte* traceid_meta_byte<Method>(const Method* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return ptr->trace_meta_addr();
}
@ -95,14 +95,14 @@ inline jbyte traceid_xor(jbyte bits, jbyte current) {
template <jbyte op(jbyte, jbyte)>
inline void set_form(jbyte bits, jbyte* dest) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
*dest = op(bits, *dest);
OrderAccess::storestore();
}
template <jbyte op(jbyte, jbyte)>
inline void set_cas_form(jbyte bits, jbyte volatile* dest) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
do {
const jbyte current = *dest;
const jbyte new_value = op(bits, current);
@ -114,24 +114,24 @@ inline void set_cas_form(jbyte bits, jbyte volatile* dest) {
template <typename T>
inline void JfrTraceIdBits::cas(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
set_cas_form<traceid_or>(bits, traceid_tag_byte(ptr));
}
template <typename T>
inline traceid JfrTraceIdBits::load(const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
return ptr->trace_id();
}
inline void set(jbyte bits, jbyte* dest) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
set_form<traceid_or>(bits, dest);
}
template <typename T>
inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
// gcc12 warns "writing 1 byte into a region of size 0" when T == Klass.
// The warning seems to be a false positive. And there is no warning for
// other types that use the same mechanisms. The warning also sometimes
@ -145,7 +145,7 @@ inline void JfrTraceIdBits::store(jbyte bits, const T* ptr) {
template <typename T>
inline void JfrTraceIdBits::meta_store(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
set(bits, traceid_meta_byte(ptr));
}
@ -155,13 +155,13 @@ inline void set_mask(jbyte mask, jbyte* dest) {
template <typename T>
inline void JfrTraceIdBits::mask_store(jbyte mask, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
set_mask(mask, traceid_tag_byte(ptr));
}
template <typename T>
inline void JfrTraceIdBits::meta_mask_store(jbyte mask, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
set_mask(mask, traceid_meta_byte(ptr));
}
@ -171,7 +171,7 @@ inline void clear_bits(jbyte bits, jbyte* dest) {
template <typename T>
inline void JfrTraceIdBits::clear(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
clear_bits(bits, traceid_tag_byte(ptr));
}
@ -181,13 +181,13 @@ inline void clear_bits_cas(jbyte bits, jbyte* dest) {
template <typename T>
inline void JfrTraceIdBits::clear_cas(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
clear_bits_cas(bits, traceid_tag_byte(ptr));
}
template <typename T>
inline void JfrTraceIdBits::meta_clear(jbyte bits, const T* ptr) {
assert(ptr != NULL, "invariant");
assert(ptr != nullptr, "invariant");
clear_bits(bits, traceid_meta_byte(ptr));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,7 @@ static bool can_compress_element(traceid id) {
}
static size_t element_size(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return element_size(can_compress_element(JfrTraceId::load_raw(klass)));
}
@ -117,7 +117,7 @@ static traceid read_uncompressed_element(const u1* pos, const Klass** klass) {
}
static traceid read_element(const u1* pos, const Klass** klass, bool compressed) {
assert(pos != NULL, "invariant");
assert(pos != nullptr, "invariant");
return compressed ? read_compressed_element(pos, klass) : read_uncompressed_element(pos, klass);
}
@ -143,8 +143,8 @@ static void store_uncompressed_element(traceid id, const Klass* klass, u1* pos)
}
static void store_element(const Klass* klass, u1* pos) {
assert(pos != NULL, "invariant");
assert(klass != NULL, "invariant");
assert(pos != nullptr, "invariant");
assert(klass != nullptr, "invariant");
const traceid id = JfrTraceId::load_raw(klass);
if (can_compress_element(id)) {
store_compressed_element(id, klass, pos);
@ -171,7 +171,7 @@ static bool _clear = false;
template <typename Buffer>
size_t JfrEpochQueueKlassPolicy<Buffer>::operator()(const u1* pos, KlassFunctor& callback, bool previous_epoch) {
assert(pos != NULL, "invariant");
assert(pos != nullptr, "invariant");
const bool compressed = is_compressed(pos);
const size_t size = ::element_size(compressed);
if (_clear || is_unloaded(pos)) {
@ -184,35 +184,35 @@ size_t JfrEpochQueueKlassPolicy<Buffer>::operator()(const u1* pos, KlassFunctor&
set_unloaded(pos);
return size;
}
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
callback(const_cast<Klass*>(klass));
return size;
}
template <typename Buffer>
void JfrEpochQueueKlassPolicy<Buffer>::store_element(const Klass* klass, Buffer* buffer) {
assert(klass != NULL, "invariant");
assert(buffer != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->free_size() >= ::element_size(klass), "invariant");
::store_element(klass, buffer->pos());
}
template <typename Buffer>
inline size_t JfrEpochQueueKlassPolicy<Buffer>::element_size(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
return ::element_size(klass);
}
template <typename Buffer>
inline Buffer* JfrEpochQueueKlassPolicy<Buffer>::thread_local_storage(Thread* thread) const {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
JfrThreadLocal* tl = thread->jfr_thread_local();
return JfrTraceIdEpoch::epoch() ? tl->_load_barrier_buffer_epoch_1 : tl->_load_barrier_buffer_epoch_0;
}
template <typename Buffer>
inline void JfrEpochQueueKlassPolicy<Buffer>::set_thread_local_storage(Buffer* buffer, Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
JfrThreadLocal* tl = thread->jfr_thread_local();
if (JfrTraceIdEpoch::epoch()) {
tl->_load_barrier_buffer_epoch_1 = buffer;
@ -228,22 +228,22 @@ JfrTraceIdKlassQueue::~JfrTraceIdKlassQueue() {
}
bool JfrTraceIdKlassQueue::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
assert(_queue == NULL, "invariant");
assert(_queue == nullptr, "invariant");
_queue = new JfrEpochQueue<JfrEpochQueueKlassPolicy>();
return _queue != NULL && _queue->initialize(min_elem_size, free_list_cache_count_limit, cache_prealloc_count);
return _queue != nullptr && _queue->initialize(min_elem_size, free_list_cache_count_limit, cache_prealloc_count);
}
void JfrTraceIdKlassQueue::clear() {
if (_queue != NULL) {
if (_queue != nullptr) {
_clear = true;
KlassFunctor functor(NULL);
KlassFunctor functor(nullptr);
_queue->iterate(functor, true);
_clear = false;
}
}
void JfrTraceIdKlassQueue::enqueue(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
_queue->enqueue(klass);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,19 +45,19 @@ inline bool is_not_tagged(traceid value) {
template <typename T>
inline bool should_tag(const T* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return is_not_tagged(TRACE_ID_RAW(t));
}
template <>
inline bool should_tag<Method>(const Method* method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
return is_not_tagged((traceid)method->trace_flags());
}
template <typename T>
inline traceid set_used_and_get(const T* type) {
assert(type != NULL, "invariant");
assert(type != nullptr, "invariant");
if (should_tag(type)) {
SET_USED_THIS_EPOCH(type);
JfrTraceIdEpoch::set_changed_tag_state();
@ -73,7 +73,7 @@ inline void JfrTraceIdLoadBarrier::load_barrier(const Klass* klass) {
}
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
if (should_tag(klass)) {
load_barrier(klass);
}
@ -86,8 +86,8 @@ inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
}
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass, const Method* method) {
assert(klass != NULL, "invariant");
assert(method != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
if (should_tag(method)) {
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
@ -108,7 +108,7 @@ inline traceid JfrTraceIdLoadBarrier::load(const PackageEntry* package) {
}
inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
assert(cld != NULL, "invariant");
assert(cld != nullptr, "invariant");
if (cld->has_class_mirror_holder()) {
return 0;
}
@ -120,9 +120,9 @@ inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
}
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
assert(klass == method->method_holder(), "invariant");
if (should_tag(method)) {
// the method is already logically tagged, just like the klass,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -83,7 +83,7 @@ bool JfrRecorder::create_oop_storages() {
bool JfrRecorder::on_create_vm_1() {
if (!is_disabled()) {
if (FlightRecorder || StartFlightRecording != NULL) {
if (FlightRecorder || StartFlightRecording != nullptr) {
enable();
}
}
@ -94,16 +94,16 @@ bool JfrRecorder::on_create_vm_1() {
return JfrTime::initialize();
}
static GrowableArray<JfrStartFlightRecordingDCmd*>* dcmd_recordings_array = NULL;
static GrowableArray<JfrStartFlightRecordingDCmd*>* dcmd_recordings_array = nullptr;
static void release_recordings() {
if (dcmd_recordings_array != NULL) {
if (dcmd_recordings_array != nullptr) {
const int length = dcmd_recordings_array->length();
for (int i = 0; i < length; ++i) {
delete dcmd_recordings_array->at(i);
}
delete dcmd_recordings_array;
dcmd_recordings_array = NULL;
dcmd_recordings_array = nullptr;
}
}
@ -114,8 +114,8 @@ static void teardown_startup_support() {
// Parsing options here to detect errors as soon as possible
static bool parse_recording_options(const char* options, JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) {
assert(options != NULL, "invariant");
assert(dcmd_recording != NULL, "invariant");
assert(options != nullptr, "invariant");
assert(dcmd_recording != nullptr, "invariant");
CmdLine cmdline(options, strlen(options), true);
dcmd_recording->parse(&cmdline, ',', THREAD);
if (HAS_PENDING_EXCEPTION) {
@ -128,17 +128,17 @@ static bool parse_recording_options(const char* options, JfrStartFlightRecording
static bool validate_recording_options(TRAPS) {
const GrowableArray<const char*>* options = JfrOptionSet::start_flight_recording_options();
if (options == NULL) {
if (options == nullptr) {
return true;
}
const int length = options->length();
assert(length >= 1, "invariant");
assert(dcmd_recordings_array == NULL, "invariant");
assert(dcmd_recordings_array == nullptr, "invariant");
dcmd_recordings_array = new (mtTracing) GrowableArray<JfrStartFlightRecordingDCmd*>(length, mtTracing);
assert(dcmd_recordings_array != NULL, "invariant");
assert(dcmd_recordings_array != nullptr, "invariant");
for (int i = 0; i < length; ++i) {
JfrStartFlightRecordingDCmd* const dcmd_recording = new (mtTracing) JfrStartFlightRecordingDCmd(tty, true);
assert(dcmd_recording != NULL, "invariant");
assert(dcmd_recording != nullptr, "invariant");
dcmd_recordings_array->append(dcmd_recording);
if (!parse_recording_options(options->at(i), dcmd_recording, THREAD)) {
return false;
@ -148,7 +148,7 @@ static bool validate_recording_options(TRAPS) {
}
static bool launch_recording(JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS) {
assert(dcmd_recording != NULL, "invariant");
assert(dcmd_recording != nullptr, "invariant");
log_trace(jfr, system)("Starting a recording");
dcmd_recording->execute(DCmd_Source_Internal, THREAD);
if (HAS_PENDING_EXCEPTION) {
@ -162,7 +162,7 @@ static bool launch_recording(JfrStartFlightRecordingDCmd* dcmd_recording, TRAPS)
static bool launch_command_line_recordings(TRAPS) {
bool result = true;
if (dcmd_recordings_array != NULL) {
if (dcmd_recordings_array != nullptr) {
const int length = dcmd_recordings_array->length();
assert(length >= 1, "invariant");
for (int i = 0; i < length; ++i) {
@ -185,7 +185,7 @@ static void log_jdk_jfr_module_resolution_error(TRAPS) {
static bool is_cds_dump_requested() {
// we will not be able to launch recordings on startup if a cds dump is being requested
if (Arguments::is_dumping_archive() && JfrOptionSet::start_flight_recording_options() != NULL) {
if (Arguments::is_dumping_archive() && JfrOptionSet::start_flight_recording_options() != nullptr) {
warning("JFR will be disabled during CDS dumping");
teardown_startup_support();
return true;
@ -300,14 +300,14 @@ bool JfrRecorder::create_components() {
}
// subsystems
static JfrPostBox* _post_box = NULL;
static JfrStorage* _storage = NULL;
static JfrCheckpointManager* _checkpoint_manager = NULL;
static JfrRepository* _repository = NULL;
static JfrPostBox* _post_box = nullptr;
static JfrStorage* _storage = nullptr;
static JfrCheckpointManager* _checkpoint_manager = nullptr;
static JfrRepository* _repository = nullptr;
static JfrStackTraceRepository* _stack_trace_repository;
static JfrStringPool* _stringpool = NULL;
static JfrOSInterface* _os_interface = NULL;
static JfrThreadSampling* _thread_sampling = NULL;
static JfrStringPool* _stringpool = nullptr;
static JfrOSInterface* _os_interface = nullptr;
static JfrThreadSampling* _thread_sampling = nullptr;
bool JfrRecorder::create_java_event_writer() {
return JfrJavaEventWriter::initialize();
@ -318,55 +318,55 @@ bool JfrRecorder::create_jvmti_agent() {
}
bool JfrRecorder::create_post_box() {
assert(_post_box == NULL, "invariant");
assert(_post_box == nullptr, "invariant");
_post_box = JfrPostBox::create();
return _post_box != NULL;
return _post_box != nullptr;
}
bool JfrRecorder::create_chunk_repository() {
assert(_repository == NULL, "invariant");
assert(_post_box != NULL, "invariant");
assert(_repository == nullptr, "invariant");
assert(_post_box != nullptr, "invariant");
_repository = JfrRepository::create(*_post_box);
return _repository != NULL && _repository->initialize();
return _repository != nullptr && _repository->initialize();
}
bool JfrRecorder::create_os_interface() {
assert(_os_interface == NULL, "invariant");
assert(_os_interface == nullptr, "invariant");
_os_interface = JfrOSInterface::create();
return _os_interface != NULL && _os_interface->initialize();
return _os_interface != nullptr && _os_interface->initialize();
}
bool JfrRecorder::create_storage() {
assert(_repository != NULL, "invariant");
assert(_post_box != NULL, "invariant");
assert(_repository != nullptr, "invariant");
assert(_post_box != nullptr, "invariant");
_storage = JfrStorage::create(_repository->chunkwriter(), *_post_box);
return _storage != NULL && _storage->initialize();
return _storage != nullptr && _storage->initialize();
}
bool JfrRecorder::create_checkpoint_manager() {
assert(_checkpoint_manager == NULL, "invariant");
assert(_repository != NULL, "invariant");
assert(_checkpoint_manager == nullptr, "invariant");
assert(_repository != nullptr, "invariant");
_checkpoint_manager = JfrCheckpointManager::create(_repository->chunkwriter());
return _checkpoint_manager != NULL && _checkpoint_manager->initialize();
return _checkpoint_manager != nullptr && _checkpoint_manager->initialize();
}
bool JfrRecorder::create_stacktrace_repository() {
assert(_stack_trace_repository == NULL, "invariant");
assert(_stack_trace_repository == nullptr, "invariant");
_stack_trace_repository = JfrStackTraceRepository::create();
return _stack_trace_repository != NULL && _stack_trace_repository->initialize();
return _stack_trace_repository != nullptr && _stack_trace_repository->initialize();
}
bool JfrRecorder::create_stringpool() {
assert(_stringpool == NULL, "invariant");
assert(_repository != NULL, "invariant");
assert(_stringpool == nullptr, "invariant");
assert(_repository != nullptr, "invariant");
_stringpool = JfrStringPool::create(_repository->chunkwriter());
return _stringpool != NULL && _stringpool->initialize();
return _stringpool != nullptr && _stringpool->initialize();
}
bool JfrRecorder::create_thread_sampling() {
assert(_thread_sampling == NULL, "invariant");
assert(_thread_sampling == nullptr, "invariant");
_thread_sampling = JfrThreadSampling::create();
return _thread_sampling != NULL;
return _thread_sampling != nullptr;
}
bool JfrRecorder::create_event_throttler() {
@ -375,37 +375,37 @@ bool JfrRecorder::create_event_throttler() {
void JfrRecorder::destroy_components() {
JfrJvmtiAgent::destroy();
if (_post_box != NULL) {
if (_post_box != nullptr) {
JfrPostBox::destroy();
_post_box = NULL;
_post_box = nullptr;
}
if (_repository != NULL) {
if (_repository != nullptr) {
JfrRepository::destroy();
_repository = NULL;
_repository = nullptr;
}
if (_storage != NULL) {
if (_storage != nullptr) {
JfrStorage::destroy();
_storage = NULL;
_storage = nullptr;
}
if (_checkpoint_manager != NULL) {
if (_checkpoint_manager != nullptr) {
JfrCheckpointManager::destroy();
_checkpoint_manager = NULL;
_checkpoint_manager = nullptr;
}
if (_stack_trace_repository != NULL) {
if (_stack_trace_repository != nullptr) {
JfrStackTraceRepository::destroy();
_stack_trace_repository = NULL;
_stack_trace_repository = nullptr;
}
if (_stringpool != NULL) {
if (_stringpool != nullptr) {
JfrStringPool::destroy();
_stringpool = NULL;
_stringpool = nullptr;
}
if (_os_interface != NULL) {
if (_os_interface != nullptr) {
JfrOSInterface::destroy();
_os_interface = NULL;
_os_interface = nullptr;
}
if (_thread_sampling != NULL) {
if (_thread_sampling != nullptr) {
JfrThreadSampling::destroy();
_thread_sampling = NULL;
_thread_sampling = nullptr;
}
JfrEventThrottler::destroy();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -58,7 +58,7 @@ static jlong ticks_now() {
}
JfrChunk::JfrChunk() :
_path(NULL),
_path(nullptr),
_start_ticks(0),
_previous_start_ticks(invalid_time),
_start_nanos(0),
@ -74,9 +74,9 @@ JfrChunk::~JfrChunk() {
}
void JfrChunk::reset() {
if (_path != NULL) {
if (_path != nullptr) {
JfrCHeapObj::free(_path, strlen(_path) + 1);
_path = NULL;
_path = nullptr;
}
_last_checkpoint_offset = _last_metadata_offset = 0;
_generation = 1;
@ -180,7 +180,7 @@ int64_t JfrChunk::last_chunk_duration() const {
}
static char* copy_path(const char* path) {
assert(path != NULL, "invariant");
assert(path != nullptr, "invariant");
const size_t path_len = strlen(path);
char* new_path = JfrCHeapObj::new_array<char>(path_len + 1);
strncpy(new_path, path, path_len + 1);
@ -188,11 +188,11 @@ static char* copy_path(const char* path) {
}
void JfrChunk::set_path(const char* path) {
if (_path != NULL) {
if (_path != nullptr) {
JfrCHeapObj::free(_path, strlen(_path) + 1);
_path = NULL;
_path = nullptr;
}
if (path != NULL) {
if (path != nullptr) {
_path = copy_path(path);
}
}

View File

@ -30,12 +30,12 @@
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
static jobject chunk_monitor = NULL;
static jobject chunk_monitor = nullptr;
static int64_t threshold = 0;
static bool rotate = false;
static jobject install_chunk_monitor(JavaThread* thread) {
assert(chunk_monitor == NULL, "invariant");
assert(chunk_monitor == nullptr, "invariant");
// read static field
HandleMark hm(thread);
static const char klass[] = "jdk/jfr/internal/JVM";
@ -50,7 +50,7 @@ static jobject install_chunk_monitor(JavaThread* thread) {
// lazy install
static jobject get_chunk_monitor(JavaThread* thread) {
return chunk_monitor != NULL ? chunk_monitor : install_chunk_monitor(thread);
return chunk_monitor != nullptr ? chunk_monitor : install_chunk_monitor(thread);
}
static void notify() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,12 +46,12 @@ static const int64_t FLAG_OFFSET = GENERATION_OFFSET + 2;
static const int64_t HEADER_SIZE = FLAG_OFFSET + 2;
static fio_fd open_chunk(const char* path) {
return path != NULL ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd;
return path != nullptr ? os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE) : invalid_fd;
}
#ifdef ASSERT
static void assert_writer_position(JfrChunkWriter* writer, int64_t offset) {
assert(writer != NULL, "invariant");
assert(writer != nullptr, "invariant");
assert(offset == writer->current_offset(), "invariant");
}
#endif
@ -133,7 +133,7 @@ class JfrChunkHeadWriter : public StackObj {
void flush(int64_t size, bool finalize) {
assert(_writer->is_valid(), "invariant");
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
DEBUG_ONLY(assert_writer_position(_writer, SIZE_OFFSET);)
write_size_to_generation(size, finalize);
write_flags();
@ -142,7 +142,7 @@ class JfrChunkHeadWriter : public StackObj {
void initialize() {
assert(_writer->is_valid(), "invariant");
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
DEBUG_ONLY(assert_writer_position(_writer, 0);)
write_magic();
write_version();
@ -153,9 +153,9 @@ class JfrChunkHeadWriter : public StackObj {
}
JfrChunkHeadWriter(JfrChunkWriter* writer, int64_t offset, bool guard = true) : _writer(writer), _chunk(writer->_chunk) {
assert(_writer != NULL, "invariant");
assert(_writer != nullptr, "invariant");
assert(_writer->is_valid(), "invariant");
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
if (0 == _writer->current_offset()) {
assert(HEADER_SIZE == offset, "invariant");
initialize();
@ -213,12 +213,12 @@ int64_t JfrChunkWriter::write_chunk_header_checkpoint(bool flushpoint) {
}
void JfrChunkWriter::mark_chunk_final() {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
_chunk->mark_final();
}
int64_t JfrChunkWriter::flush_chunk(bool flushpoint) {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
const int64_t sz_written = write_chunk_header_checkpoint(flushpoint);
assert(size_written() == sz_written, "invariant");
JfrChunkHeadWriter head(this, SIZE_OFFSET);
@ -226,20 +226,20 @@ int64_t JfrChunkWriter::flush_chunk(bool flushpoint) {
return sz_written;
}
JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(NULL), _chunk(new JfrChunk()) {}
JfrChunkWriter::JfrChunkWriter() : JfrChunkWriterBase(nullptr), _chunk(new JfrChunk()) {}
JfrChunkWriter::~JfrChunkWriter() {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
delete _chunk;
}
void JfrChunkWriter::set_path(const char* path) {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
_chunk->set_path(path);
}
void JfrChunkWriter::set_time_stamp() {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
_chunk->set_time_stamp();
}
@ -248,32 +248,32 @@ int64_t JfrChunkWriter::size_written() const {
}
int64_t JfrChunkWriter::last_checkpoint_offset() const {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
return _chunk->last_checkpoint_offset();
}
int64_t JfrChunkWriter::current_chunk_start_nanos() const {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
return _chunk->start_nanos();
}
void JfrChunkWriter::set_last_checkpoint_offset(int64_t offset) {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
_chunk->set_last_checkpoint_offset(offset);
}
void JfrChunkWriter::set_last_metadata_offset(int64_t offset) {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
_chunk->set_last_metadata_offset(offset);
}
bool JfrChunkWriter::has_metadata() const {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
return _chunk->has_metadata();
}
bool JfrChunkWriter::open() {
assert(_chunk != NULL, "invariant");
assert(_chunk != nullptr, "invariant");
JfrChunkWriterBase::reset(open_chunk(_chunk->path()));
const bool is_open = this->has_valid_fd();
if (is_open) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,7 +70,7 @@ static bool is_path_empty() {
static size_t get_dump_directory() {
const char* dump_path = JfrEmergencyDump::get_dump_path();
if (*dump_path == '\0') {
if (os::get_current_directory(_path_buffer, sizeof(_path_buffer)) == NULL) {
if (os::get_current_directory(_path_buffer, sizeof(_path_buffer)) == nullptr) {
return 0;
}
} else {
@ -85,7 +85,7 @@ static size_t get_dump_directory() {
}
static fio_fd open_exclusivly(const char* path) {
assert((path != NULL) && (*path != '\0'), "invariant");
assert((path != nullptr) && (*path != '\0'), "invariant");
return os::open(path, O_CREAT | O_RDWR, S_IREAD | S_IWRITE);
}
@ -94,7 +94,7 @@ static bool is_emergency_dump_file_open() {
}
static bool open_emergency_dump_fd(const char* path) {
if (path == NULL) {
if (path == nullptr) {
return false;
}
assert(emergency_fd == invalid_fd, "invariant");
@ -113,9 +113,9 @@ static const char* create_emergency_dump_path() {
const size_t path_len = get_dump_directory();
if (path_len == 0) {
return NULL;
return nullptr;
}
const char* filename_fmt = NULL;
const char* filename_fmt = nullptr;
// fetch specific error cause
switch (JfrJavaSupport::cause()) {
case JfrJavaSupport::OUT_OF_MEMORY:
@ -128,7 +128,7 @@ static const char* create_emergency_dump_path() {
filename_fmt = vm_error_filename_fmt;
}
const bool result = Arguments::copy_expand_pid(filename_fmt, strlen(filename_fmt), _path_buffer + path_len, JVM_MAXPATHLEN - path_len);
return result ? _path_buffer : NULL;
return result ? _path_buffer : nullptr;
}
static bool open_emergency_dump_file() {
@ -149,12 +149,12 @@ static bool open_emergency_dump_file() {
}
static void report(outputStream* st, bool emergency_file_opened, const char* repository_path) {
assert(st != NULL, "invariant");
assert(st != nullptr, "invariant");
if (emergency_file_opened) {
st->print_raw("# JFR recording file will be written. Location: ");
st->print_raw_cr(_path_buffer);
st->print_raw_cr("#");
} else if (repository_path != NULL) {
} else if (repository_path != nullptr) {
st->print_raw("# The JFR repository may contain useful JFR files. Location: ");
st->print_raw_cr(repository_path);
st->print_raw_cr("#");
@ -166,7 +166,7 @@ static void report(outputStream* st, bool emergency_file_opened, const char* rep
}
void JfrEmergencyDump::set_dump_path(const char* path) {
if (path == NULL || *path == '\0') {
if (path == nullptr || *path == '\0') {
os::get_current_directory(_dump_path, sizeof(_dump_path));
} else {
if (strlen(path) < JVM_MAXPATHLEN) {
@ -181,24 +181,24 @@ const char* JfrEmergencyDump::get_dump_path() {
}
void JfrEmergencyDump::on_vm_error_report(outputStream* st, const char* repository_path) {
assert(st != NULL, "invariant");
assert(st != nullptr, "invariant");
Thread* thread = Thread::current_or_null_safe();
if (thread != NULL) {
if (thread != nullptr) {
report(st, open_emergency_dump_file(), repository_path);
} else if (repository_path != NULL) {
} else if (repository_path != nullptr) {
// a non-attached thread will not be able to write anything later
report(st, false, repository_path);
}
}
static int file_sort(const char** const file1, const char** file2) {
assert(NULL != *file1 && NULL != *file2, "invariant");
assert(nullptr != *file1 && nullptr != *file2, "invariant");
int cmp = strncmp(*file1, *file2, iso8601_len);
if (0 == cmp) {
const char* const dot1 = strchr(*file1, '.');
assert(NULL != dot1, "invariant");
assert(nullptr != dot1, "invariant");
const char* const dot2 = strchr(*file2, '.');
assert(NULL != dot2, "invariant");
assert(nullptr != dot2, "invariant");
ptrdiff_t file1_len = dot1 - *file1;
ptrdiff_t file2_len = dot2 - *file2;
if (file1_len < file2_len) {
@ -215,7 +215,7 @@ static int file_sort(const char** const file1, const char** file2) {
}
static void iso8601_to_date_time(char* iso8601_str) {
assert(iso8601_str != NULL, "invariant");
assert(iso8601_str != nullptr, "invariant");
assert(strlen(iso8601_str) == iso8601_len, "invariant");
// "YYYY-MM-DDTHH:MM:SS"
for (size_t i = 0; i < iso8601_len; ++i) {
@ -231,7 +231,7 @@ static void iso8601_to_date_time(char* iso8601_str) {
}
static void date_time(char* buffer, size_t buffer_len) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer_len >= iso8601_len, "buffer too small");
os::iso8601_time(buffer, buffer_len);
assert(strlen(buffer) >= iso8601_len + 1, "invariant");
@ -264,7 +264,7 @@ class RepositoryIterator : public StackObj {
// append the file_name at the _path_buffer_file_name_offset position
const char* RepositoryIterator::fully_qualified(const char* file_name) const {
assert(NULL != file_name, "invariant");
assert(nullptr != file_name, "invariant");
assert(!is_path_empty(), "invariant");
assert(_path_buffer_file_name_offset != 0, "invariant");
@ -272,13 +272,13 @@ const char* RepositoryIterator::fully_qualified(const char* file_name) const {
sizeof(_path_buffer) - _path_buffer_file_name_offset,
"%s",
file_name);
return result != -1 ? _path_buffer : NULL;
return result != -1 ? _path_buffer : nullptr;
}
// caller responsible for deallocation
const char* RepositoryIterator::filter(const char* file_name) const {
if (file_name == NULL) {
return NULL;
if (file_name == nullptr) {
return nullptr;
}
const size_t len = strlen(file_name);
if ((len < chunk_file_extension_length) ||
@ -286,36 +286,36 @@ const char* RepositoryIterator::filter(const char* file_name) const {
chunk_file_jfr_ext,
chunk_file_extension_length) != 0)) {
// not a .jfr file
return NULL;
return nullptr;
}
const char* fqn = fully_qualified(file_name);
if (fqn == NULL) {
return NULL;
if (fqn == nullptr) {
return nullptr;
}
const fio_fd fd = open_exclusivly(fqn);
if (invalid_fd == fd) {
return NULL;
return nullptr;
}
const int64_t size = file_size(fd);
::close(fd);
if (size <= chunk_file_header_size) {
return NULL;
return nullptr;
}
char* const file_name_copy = (char*)os::malloc(len + 1, mtTracing);
if (file_name_copy == NULL) {
if (file_name_copy == nullptr) {
log_error(jfr, system)("Unable to malloc memory during jfr emergency dump");
return NULL;
return nullptr;
}
strncpy(file_name_copy, file_name, len + 1);
return file_name_copy;
}
RepositoryIterator::RepositoryIterator(const char* repository_path) :
_file_names(NULL),
_file_names(nullptr),
_path_buffer_file_name_offset(0),
_iterator(0) {
DIR* dirp = os::opendir(repository_path);
if (dirp == NULL) {
if (dirp == nullptr) {
log_error(jfr, system)("Unable to open repository %s", repository_path);
return;
}
@ -329,15 +329,15 @@ RepositoryIterator::RepositoryIterator(const char* repository_path) :
return;
}
_file_names = new (mtTracing) GrowableArray<const char*>(10, mtTracing);
if (_file_names == NULL) {
if (_file_names == nullptr) {
log_error(jfr, system)("Unable to malloc memory during jfr emergency dump");
return;
}
// iterate files in the repository and append filtered file names to the files array
struct dirent* dentry;
while ((dentry = os::readdir(dirp)) != NULL) {
while ((dentry = os::readdir(dirp)) != nullptr) {
const char* file_name = filter(dentry->d_name);
if (file_name != NULL) {
if (file_name != nullptr) {
_file_names->append(file_name);
}
}
@ -348,7 +348,7 @@ RepositoryIterator::RepositoryIterator(const char* repository_path) :
}
RepositoryIterator::~RepositoryIterator() {
if (_file_names != NULL) {
if (_file_names != nullptr) {
for (int i = 0; i < _file_names->length(); ++i) {
os::free(const_cast<char*>(_file_names->at(i)));
}
@ -357,11 +357,11 @@ RepositoryIterator::~RepositoryIterator() {
}
bool RepositoryIterator::has_next() const {
return _file_names != NULL && _iterator < _file_names->length();
return _file_names != nullptr && _iterator < _file_names->length();
}
const char* RepositoryIterator::next() const {
return _iterator >= _file_names->length() ? NULL : fully_qualified(_file_names->at(_iterator++));
return _iterator >= _file_names->length() ? nullptr : fully_qualified(_file_names->at(_iterator++));
}
static void write_repository_files(const RepositoryIterator& iterator, char* const copy_block, size_t block_size) {
@ -369,7 +369,7 @@ static void write_repository_files(const RepositoryIterator& iterator, char* con
while (iterator.has_next()) {
fio_fd current_fd = invalid_fd;
const char* const fqn = iterator.next();
assert(fqn != NULL, "invariant");
assert(fqn != nullptr, "invariant");
current_fd = open_exclusivly(fqn);
if (current_fd != invalid_fd) {
const int64_t size = file_size(current_fd);
@ -396,7 +396,7 @@ static void write_repository_files(const RepositoryIterator& iterator, char* con
static void write_emergency_dump_file(const RepositoryIterator& iterator) {
static const size_t block_size = 1 * M; // 1 mb
char* const copy_block = (char*)os::malloc(block_size, mtTracing);
if (copy_block == NULL) {
if (copy_block == nullptr) {
log_error(jfr, system)("Unable to malloc memory during jfr emergency dump");
log_error(jfr, system)("Unable to write jfr emergency dump file");
} else {
@ -406,7 +406,7 @@ static void write_emergency_dump_file(const RepositoryIterator& iterator) {
}
void JfrEmergencyDump::on_vm_error(const char* repository_path) {
assert(repository_path != NULL, "invariant");
assert(repository_path != nullptr, "invariant");
if (open_emergency_dump_file()) {
RepositoryIterator iterator(repository_path);
write_emergency_dump_file(iterator);
@ -426,13 +426,13 @@ static const char* create_emergency_chunk_path(const char* repository_path) {
os::file_separator(),
date_time_buffer,
chunk_file_jfr_ext);
return result == -1 ? NULL : _path_buffer;
return result == -1 ? nullptr : _path_buffer;
}
const char* JfrEmergencyDump::chunk_path(const char* repository_path) {
if (repository_path == NULL) {
if (repository_path == nullptr) {
if (!open_emergency_dump_file()) {
return NULL;
return nullptr;
}
// We can directly use the emergency dump file name as the chunk.
// The chunk writer will open its own fd so we close this descriptor.
@ -454,7 +454,7 @@ const char* JfrEmergencyDump::chunk_path(const char* repository_path) {
*
*/
static bool prepare_for_emergency_dump(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
if (thread->is_Watcher_thread()) {
// need WatcherThread as a safeguard against potential deadlocks
return false;
@ -462,7 +462,7 @@ static bool prepare_for_emergency_dump(Thread* thread) {
#ifdef ASSERT
Mutex* owned_lock = thread->owned_locks();
while (owned_lock != NULL) {
while (owned_lock != nullptr) {
Mutex* next = owned_lock->next();
owned_lock->unlock();
owned_lock = next;
@ -531,9 +531,9 @@ class JavaThreadInVMAndNative : public StackObj {
JavaThreadState _original_state;
public:
JavaThreadInVMAndNative(Thread* t) : _jt(t->is_Java_thread() ? JavaThread::cast(t) : NULL),
JavaThreadInVMAndNative(Thread* t) : _jt(t->is_Java_thread() ? JavaThread::cast(t) : nullptr),
_original_state(_thread_max_state) {
if (_jt != NULL) {
if (_jt != nullptr) {
_original_state = _jt->thread_state();
if (_original_state != _thread_in_vm) {
_jt->set_thread_state(_thread_in_vm);
@ -548,7 +548,7 @@ class JavaThreadInVMAndNative : public StackObj {
}
void transition_to_native() {
if (_jt != NULL) {
if (_jt != nullptr) {
assert(_jt->thread_state() == _thread_in_vm, "invariant");
_jt->set_thread_state(_thread_in_native);
}
@ -575,7 +575,7 @@ void JfrEmergencyDump::on_vm_shutdown(bool exception_handler) {
return;
}
Thread* thread = Thread::current_or_null_safe();
if (thread == NULL) {
if (thread == nullptr) {
return;
}
// Ensure a JavaThread is _thread_in_vm when we make this call

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,53 +36,53 @@
#include "runtime/mutex.hpp"
#include "runtime/os.hpp"
static JfrRepository* _instance = NULL;
static JfrRepository* _instance = nullptr;
JfrRepository& JfrRepository::instance() {
return *_instance;
}
static JfrChunkWriter* _chunkwriter = NULL;
static JfrChunkWriter* _chunkwriter = nullptr;
JfrChunkWriter& JfrRepository::chunkwriter() {
return *_chunkwriter;
}
JfrRepository::JfrRepository(JfrPostBox& post_box) : _path(NULL), _post_box(post_box) {}
JfrRepository::JfrRepository(JfrPostBox& post_box) : _path(nullptr), _post_box(post_box) {}
bool JfrRepository::initialize() {
assert(_chunkwriter == NULL, "invariant");
assert(_chunkwriter == nullptr, "invariant");
_chunkwriter = new JfrChunkWriter();
return _chunkwriter != NULL;
return _chunkwriter != nullptr;
}
JfrRepository::~JfrRepository() {
if (_path != NULL) {
if (_path != nullptr) {
JfrCHeapObj::free(_path, strlen(_path) + 1);
_path = NULL;
_path = nullptr;
}
if (_chunkwriter != NULL) {
if (_chunkwriter != nullptr) {
delete _chunkwriter;
_chunkwriter = NULL;
_chunkwriter = nullptr;
}
}
JfrRepository* JfrRepository::create(JfrPostBox& post_box) {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrRepository(post_box);
return _instance;
}
void JfrRepository::destroy() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
delete _instance;
_instance = NULL;
_instance = nullptr;
}
void JfrRepository::on_vm_error() {
if (_path == NULL) {
if (_path == nullptr) {
// completed already
return;
}
@ -94,14 +94,14 @@ void JfrRepository::on_vm_error_report(outputStream* st) {
}
bool JfrRepository::set_path(const char* path) {
assert(path != NULL, "trying to set the repository path with a NULL string!");
if (_path != NULL) {
assert(path != nullptr, "trying to set the repository path with a null string!");
if (_path != nullptr) {
// delete existing
JfrCHeapObj::free(_path, strlen(_path) + 1);
}
const size_t path_len = strlen(path);
_path = JfrCHeapObj::new_array<char>(path_len + 1);
if (_path == NULL) {
if (_path == nullptr) {
return false;
}
strncpy(_path, path, path_len + 1);
@ -143,8 +143,8 @@ void JfrRepository::set_chunk_path(jstring path, JavaThread* jt) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
ResourceMark rm(jt);
const char* const canonical_chunk_path = JfrJavaSupport::c_str(path, jt);
if (NULL == canonical_chunk_path && !_chunkwriter->is_valid()) {
// new output is NULL and current output is NULL
if (nullptr == canonical_chunk_path && !_chunkwriter->is_valid()) {
// new output is nullptr and current output is null
return;
}
instance().set_chunk_path(canonical_chunk_path);
@ -155,7 +155,7 @@ void JfrRepository::set_path(jstring location, JavaThread* jt) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt));
ResourceMark rm(jt);
const char* const path = JfrJavaSupport::c_str(location, jt);
if (path != NULL) {
if (path != nullptr) {
instance().set_path(path);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, Datadog, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,7 +35,7 @@ constexpr static const JfrSamplerParams _disabled_params = {
false // reconfigure
};
static JfrEventThrottler* _throttler = NULL;
static JfrEventThrottler* _throttler = nullptr;
JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) :
JfrAdaptiveSampler(),
@ -48,29 +48,29 @@ JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) :
_update(false) {}
bool JfrEventThrottler::create() {
assert(_throttler == NULL, "invariant");
assert(_throttler == nullptr, "invariant");
_throttler = new JfrEventThrottler(JfrObjectAllocationSampleEvent);
return _throttler != NULL && _throttler->initialize();
return _throttler != nullptr && _throttler->initialize();
}
void JfrEventThrottler::destroy() {
delete _throttler;
_throttler = NULL;
_throttler = nullptr;
}
// There is currently only one throttler instance, for the jdk.ObjectAllocationSample event.
// When introducing additional throttlers, also add a lookup map keyed by event id.
JfrEventThrottler* JfrEventThrottler::for_event(JfrEventId event_id) {
assert(_throttler != NULL, "JfrEventThrottler has not been properly initialized");
assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized");
assert(event_id == JfrObjectAllocationSampleEvent, "Event type has an unconfigured throttler");
return event_id == JfrObjectAllocationSampleEvent ? _throttler : NULL;
return event_id == JfrObjectAllocationSampleEvent ? _throttler : nullptr;
}
void JfrEventThrottler::configure(JfrEventId event_id, int64_t sample_size, int64_t period_ms) {
if (event_id != JfrObjectAllocationSampleEvent) {
return;
}
assert(_throttler != NULL, "JfrEventThrottler has not been properly initialized");
assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized");
_throttler->configure(sample_size, period_ms);
}
@ -93,7 +93,7 @@ void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) {
// Predicate for event selection.
bool JfrEventThrottler::accept(JfrEventId event_id, int64_t timestamp /* 0 */) {
JfrEventThrottler* const throttler = for_event(event_id);
if (throttler == NULL) return true;
if (throttler == nullptr) return true;
return _throttler->_disabled ? true : _throttler->sample(timestamp);
}
@ -168,8 +168,8 @@ inline void set_sample_points_and_window_duration(JfrSamplerParams& params, int6
* If the input event sample size is large enough, normalize to per 1000 ms
*/
inline void normalize(int64_t* sample_size, int64_t* period_ms) {
assert(sample_size != NULL, "invariant");
assert(period_ms != NULL, "invariant");
assert(sample_size != nullptr, "invariant");
assert(period_ms != nullptr, "invariant");
if (*period_ms == MILLIUNITS) {
return;
}
@ -245,7 +245,7 @@ inline double compute_ewma_alpha_coefficient(size_t lookback_count) {
* When introducing additional throttlers, also provide a map from the event id to the event name.
*/
static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
assert(sample_size_ewma != NULL, "invariant");
assert(sample_size_ewma != nullptr, "invariant");
if (log_is_enabled(Debug, jfr, system, throttle)) {
*sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
log_debug(jfr, system, throttle)("jdk.ObjectAllocationSample: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n",
@ -266,7 +266,7 @@ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
* in the process of rotating windows.
*/
const JfrSamplerParams& JfrEventThrottler::next_window_params(const JfrSamplerWindow* expired) {
assert(expired != NULL, "invariant");
assert(expired != nullptr, "invariant");
assert(_lock, "invariant");
log(expired, &_sample_size_ewma);
if (_update) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -339,7 +339,7 @@ static void assert_post_condition(const JfrMemoryOptions* options) {
// MEMORY SIZING ALGORITHM
bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) {
assert(options != NULL, "invariant");
assert(options != nullptr, "invariant");
enum MemoryOptions {
MEMORY_SIZE = 1,
@ -361,7 +361,7 @@ bool JfrMemorySizer::adjust_options(JfrMemoryOptions* options) {
//
// Unordered selection:
//
// C(4, 0) = {} = NULL set = 1
// C(4, 0) = {} = null set = 1
// C(4, 1) = { (M), (G), (C), (T) } = 4
// C(4, 2) = { (M, G), (M, C), (M, T), (G, C), (G, T), (C, T) } = 6
// C(4, 3) = { (M, G, C), (M, G, T), (M, C, T), (G, C, T) } = 4

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -154,8 +154,8 @@ bool JfrOptionSet::allow_event_retransforms() {
}
// default options for the dcmd parser
const char* const default_repository = NULL;
const char* const default_dumppath = NULL;
const char* const default_repository = nullptr;
const char* const default_dumppath = nullptr;
const char* const default_global_buffer_size = "512k";
const char* const default_num_global_buffers = "20";
const char* const default_memory_size = "10m";
@ -281,7 +281,7 @@ static void register_parser_options() {
}
static bool parse_flight_recorder_options_internal(TRAPS) {
if (FlightRecorderOptions == NULL) {
if (FlightRecorderOptions == nullptr) {
return true;
}
const size_t length = strlen((const char*)FlightRecorderOptions);
@ -292,14 +292,14 @@ static bool parse_flight_recorder_options_internal(TRAPS) {
ObsoleteOption option = OBSOLETE_OPTIONS[index];
const char* p = strstr((const char*)FlightRecorderOptions, option.name);
const size_t option_length = strlen(option.name);
if (p != NULL && p[option_length] == '=') {
if (p != nullptr && p[option_length] == '=') {
log_error(arguments) ("-XX:FlightRecorderOptions=%s=... has been removed. %s", option.name, option.message);
return false;
}
}
ResourceMark rm(THREAD);
oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
if (message != NULL) {
if (message != nullptr) {
const char* msg = java_lang_String::as_utf8_string(message);
log_error(arguments) ("%s", msg);
}
@ -336,7 +336,7 @@ bool JfrOptionSet::initialize(JavaThread* thread) {
}
bool JfrOptionSet::configure(TRAPS) {
if (FlightRecorderOptions == NULL) {
if (FlightRecorderOptions == nullptr) {
return true;
}
ResourceMark rm(THREAD);
@ -345,10 +345,10 @@ bool JfrOptionSet::configure(TRAPS) {
JfrConfigureFlightRecorderDCmd configure(&st, false);
configure._repository_path.set_is_set(_dcmd_repository.is_set());
char* repo = _dcmd_repository.value();
if (repo != NULL) {
if (repo != nullptr) {
const size_t len = strlen(repo);
char* repo_copy = JfrCHeapObj::new_array<char>(len + 1);
if (NULL == repo_copy) {
if (nullptr == repo_copy) {
return false;
}
strncpy(repo_copy, repo, len + 1);
@ -357,10 +357,10 @@ bool JfrOptionSet::configure(TRAPS) {
configure._dump_path.set_is_set(_dcmd_dumppath.is_set());
char* dumppath = _dcmd_dumppath.value();
if (dumppath != NULL) {
if (dumppath != nullptr) {
const size_t len = strlen(dumppath);
char* dumppath_copy = JfrCHeapObj::new_array<char>(len + 1);
if (NULL == dumppath_copy) {
if (nullptr == dumppath_copy) {
return false;
}
strncpy(dumppath_copy, dumppath, len + 1);
@ -743,9 +743,9 @@ bool JfrOptionSet::adjust_memory_options() {
}
bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
assert(option != NULL, "invariant");
assert(delimiter != NULL, "invariant");
assert((*option)->optionString != NULL, "invariant");
assert(option != nullptr, "invariant");
assert(delimiter != nullptr, "invariant");
assert((*option)->optionString != nullptr, "invariant");
assert(strncmp((*option)->optionString, "-XX:FlightRecorderOptions", 25) == 0, "invariant");
if (*delimiter == '\0') {
// -XX:FlightRecorderOptions without any delimiter and values
@ -757,14 +757,14 @@ bool JfrOptionSet::parse_flight_recorder_option(const JavaVMOption** option, cha
return false;
}
static GrowableArray<const char*>* start_flight_recording_options_array = NULL;
static GrowableArray<const char*>* start_flight_recording_options_array = nullptr;
bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** option, char* delimiter) {
assert(option != NULL, "invariant");
assert(delimiter != NULL, "invariant");
assert((*option)->optionString != NULL, "invariant");
assert(option != nullptr, "invariant");
assert(delimiter != nullptr, "invariant");
assert((*option)->optionString != nullptr, "invariant");
assert(strncmp((*option)->optionString, "-XX:StartFlightRecording", 24) == 0, "invariant");
const char* value = NULL;
const char* value = nullptr;
if (*delimiter == '\0') {
// -XX:StartFlightRecording without any delimiter and values
// Add dummy value "dumponexit=false" so -XX:StartFlightRecording can be used without explicit values.
@ -777,13 +777,13 @@ bool JfrOptionSet::parse_start_flight_recording_option(const JavaVMOption** opti
*delimiter = '=';
value = delimiter + 1;
}
assert(value != NULL, "invariant");
assert(value != nullptr, "invariant");
const size_t value_length = strlen(value);
if (start_flight_recording_options_array == NULL) {
if (start_flight_recording_options_array == nullptr) {
start_flight_recording_options_array = new (mtTracing) GrowableArray<const char*>(8, mtTracing);
}
assert(start_flight_recording_options_array != NULL, "invariant");
assert(start_flight_recording_options_array != nullptr, "invariant");
char* const startup_value = NEW_C_HEAP_ARRAY(char, value_length + 1, mtTracing);
strncpy(startup_value, value, value_length + 1);
assert(strncmp(startup_value, value, value_length) == 0, "invariant");
@ -796,12 +796,12 @@ const GrowableArray<const char*>* JfrOptionSet::start_flight_recording_options()
}
void JfrOptionSet::release_start_flight_recording_options() {
if (start_flight_recording_options_array != NULL) {
if (start_flight_recording_options_array != nullptr) {
const int length = start_flight_recording_options_array->length();
for (int i = 0; i < length; ++i) {
FREE_C_HEAP_ARRAY(char, start_flight_recording_options_array->at(i));
}
delete start_flight_recording_options_array;
start_flight_recording_options_array = NULL;
start_flight_recording_options_array = nullptr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,22 +38,22 @@
(MSGBIT(MSG_FLUSHPOINT)) \
)
static JfrPostBox* _instance = NULL;
static JfrPostBox* _instance = nullptr;
JfrPostBox& JfrPostBox::instance() {
return *_instance;
}
JfrPostBox* JfrPostBox::create() {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrPostBox();
return _instance;
}
void JfrPostBox::destroy() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
delete _instance;
_instance = NULL;
_instance = nullptr;
}
JfrPostBox::JfrPostBox() :

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,7 +67,7 @@ class JfrRotationLock : public StackObj {
static bool acquire(Thread* thread) {
if (Atomic::cmpxchg(&_lock, 0, 1) == 0) {
assert(_owner_thread == NULL, "invariant");
assert(_owner_thread == nullptr, "invariant");
_owner_thread = thread;
return true;
}
@ -86,7 +86,7 @@ class JfrRotationLock : public StackObj {
public:
JfrRotationLock() : _thread(Thread::current()), _recursive(false) {
assert(_thread != NULL, "invariant");
assert(_thread != nullptr, "invariant");
if (_thread == _owner_thread) {
// Recursive case is not supported.
_recursive = true;
@ -103,7 +103,7 @@ class JfrRotationLock : public StackObj {
if (_recursive) {
return;
}
_owner_thread = NULL;
_owner_thread = nullptr;
OrderAccess::storestore();
_lock = 0;
}
@ -117,7 +117,7 @@ class JfrRotationLock : public StackObj {
}
};
const Thread* JfrRotationLock::_owner_thread = NULL;
const Thread* JfrRotationLock::_owner_thread = nullptr;
const int JfrRotationLock::retry_wait_millis = 10;
volatile int JfrRotationLock::_lock = 0;
@ -589,13 +589,13 @@ void JfrRecorderService::post_safepoint_write() {
}
static JfrBuffer* thread_local_buffer(Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return t->jfr_thread_local()->native_buffer();
}
static void reset_buffer(JfrBuffer* buffer, Thread* t) {
assert(buffer != NULL, "invariant");
assert(t != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(t != nullptr, "invariant");
assert(buffer == thread_local_buffer(t), "invariant");
buffer->set_pos(const_cast<u1*>(buffer->top()));
}
@ -606,7 +606,7 @@ static void reset_thread_local_buffer(Thread* t) {
static void write_thread_local_buffer(JfrChunkWriter& chunkwriter, Thread* t) {
JfrBuffer * const buffer = thread_local_buffer(t);
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
if (!buffer->empty()) {
chunkwriter.write_unbuffered(buffer->top(), buffer->pos() - buffer->top());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,23 +39,23 @@
static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
assert(thread_oop.not_null(), "invariant");
assert(proc != NULL, "invariant");
assert(proc != nullptr, "invariant");
JavaThread* new_thread = new JavaThread(proc);
// At this point it may be possible that no
// osthread was created for the JavaThread due to lack of resources.
if (new_thread->osthread() == NULL) {
if (new_thread->osthread() == nullptr) {
delete new_thread;
JfrJavaSupport::throw_out_of_memory_error("Unable to create native recording thread for JFR", THREAD);
return NULL;
return nullptr;
} else {
JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NormPriority);
return new_thread;
}
}
JfrPostBox* JfrRecorderThread::_post_box = NULL;
JfrPostBox* JfrRecorderThread::_post_box = nullptr;
JfrPostBox& JfrRecorderThread::post_box() {
return *_post_box;
@ -65,8 +65,8 @@ JfrPostBox& JfrRecorderThread::post_box() {
void recorderthread_entry(JavaThread*, JavaThread*);
bool JfrRecorderThread::start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS) {
assert(cp_manager != NULL, "invariant");
assert(post_box != NULL, "invariant");
assert(cp_manager != nullptr, "invariant");
assert(post_box != nullptr, "invariant");
_post_box = post_box;
static const char klass[] = "jdk/jfr/internal/JVMUpcalls";

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@
// The recorder thread executes service requests collected from the message system.
//
void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
#define START (msgs & (MSGBIT(MSG_START)))
#define SHUTDOWN (msgs & MSGBIT(MSG_SHUTDOWN))
#define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,8 +38,8 @@
#include "runtime/vframe.inline.hpp"
static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
assert(lhs_frames != NULL, "invariant");
assert(rhs_frames != NULL, "invariant");
assert(lhs_frames != nullptr, "invariant");
assert(rhs_frames != nullptr, "invariant");
if (length > 0) {
*lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing);
memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame));
@ -53,7 +53,7 @@ JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno, c
_klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
_next(NULL),
_next(nullptr),
_frames(frames),
_id(0),
_hash(0),
@ -66,7 +66,7 @@ JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
_next(next),
_frames(NULL),
_frames(nullptr),
_id(id),
_hash(trace._hash),
_nr_of_frames(trace._nr_of_frames),
@ -230,7 +230,7 @@ static inline bool is_full(const JfrBuffer* enqueue_buffer) {
}
bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(!_lineno, "invariant");
Thread* current_thread = Thread::current();
assert(jt != current_thread, "invariant");
@ -283,7 +283,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
}
bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(jt == Thread::current(), "invariant");
assert(!_lineno, "invariant");
// Must use ResetNoHandleMark here to bypass if any NoHandleMark exist on stack.
@ -333,7 +333,7 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) {
}
bool JfrStackTrace::record(JavaThread* current_thread, int skip) {
assert(current_thread != NULL, "invariant");
assert(current_thread != nullptr, "invariant");
assert(current_thread == Thread::current(), "invariant");
if (!current_thread->has_last_Java_frame()) {
return false;
@ -345,7 +345,7 @@ void JfrStackFrame::resolve_lineno() const {
assert(_klass, "no klass pointer");
assert(_line == 0, "already have linenumber");
const Method* const method = JfrMethodLookup::lookup(_klass, _methodid);
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
assert(method->method_holder() == _klass, "invariant");
_line = method->line_number_from_bci(_bci);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,17 +37,17 @@
* which is a decision postponed and taken during rotation.
*/
static JfrStackTraceRepository* _instance = NULL;
static JfrStackTraceRepository* _leak_profiler_instance = NULL;
static JfrStackTraceRepository* _instance = nullptr;
static JfrStackTraceRepository* _leak_profiler_instance = nullptr;
static traceid _next_id = 0;
JfrStackTraceRepository& JfrStackTraceRepository::instance() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
return *_instance;
}
static JfrStackTraceRepository& leak_profiler_instance() {
assert(_leak_profiler_instance != NULL, "invariant");
assert(_leak_profiler_instance != nullptr, "invariant");
return *_leak_profiler_instance;
}
@ -56,11 +56,11 @@ JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(
}
JfrStackTraceRepository* JfrStackTraceRepository::create() {
assert(_instance == NULL, "invariant");
assert(_leak_profiler_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
assert(_leak_profiler_instance == nullptr, "invariant");
_leak_profiler_instance = new JfrStackTraceRepository();
if (_leak_profiler_instance == NULL) {
return NULL;
if (_leak_profiler_instance == nullptr) {
return nullptr;
}
_instance = new JfrStackTraceRepository();
return _instance;
@ -86,11 +86,11 @@ bool JfrStackTraceRepository::initialize() {
}
void JfrStackTraceRepository::destroy() {
assert(_instance != NULL, "invarinat");
assert(_instance != nullptr, "invarinat");
delete _instance;
_instance = NULL;
_instance = nullptr;
delete _leak_profiler_instance;
_leak_profiler_instance = NULL;
_leak_profiler_instance = nullptr;
}
bool JfrStackTraceRepository::is_modified() const {
@ -106,7 +106,7 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
int count = 0;
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTrace* stacktrace = _table[i];
while (stacktrace != NULL) {
while (stacktrace != nullptr) {
JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
if (stacktrace->should_write()) {
stacktrace->write(sw);
@ -133,7 +133,7 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
}
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTrace* stacktrace = repo._table[i];
while (stacktrace != NULL) {
while (stacktrace != nullptr) {
JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
delete stacktrace;
stacktrace = next;
@ -149,7 +149,7 @@ size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */) {
assert(current_thread == Thread::current(), "invariant");
JfrThreadLocal* const tl = current_thread->jfr_thread_local();
assert(tl != NULL, "invariant");
assert(tl != nullptr, "invariant");
if (tl->has_cached_stack_trace()) {
return tl->cached_stack_trace_id();
}
@ -157,11 +157,11 @@ traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */
return 0;
}
JfrStackFrame* frames = tl->stackframes();
if (frames == NULL) {
if (frames == nullptr) {
// pending oom
return 0;
}
assert(frames != NULL, "invariant");
assert(frames != nullptr, "invariant");
assert(tl->stackframes() == frames, "invariant");
return instance().record(JavaThread::cast(current_thread), skip, frames, tl->stackdepth());
}
@ -185,10 +185,10 @@ traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
}
void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_thread, int skip /* 0 */) {
assert(current_thread != NULL, "invariant");
assert(current_thread != nullptr, "invariant");
assert(current_thread == Thread::current(), "invariant");
JfrThreadLocal* const tl = current_thread->jfr_thread_local();
assert(tl != NULL, "invariant");
assert(tl != nullptr, "invariant");
assert(!tl->has_cached_stack_trace(), "invariant");
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
stacktrace.record(current_thread, skip);
@ -204,7 +204,7 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
const size_t index = stacktrace._hash % TABLE_SIZE;
const JfrStackTrace* table_entry = _table[index];
while (table_entry != NULL) {
while (table_entry != nullptr) {
if (table_entry->equals(stacktrace)) {
return table_entry->id();
}
@ -225,10 +225,10 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned int hash, traceid id) {
const size_t index = (hash % TABLE_SIZE);
const JfrStackTrace* trace = leak_profiler_instance()._table[index];
while (trace != NULL && trace->id() != id) {
while (trace != nullptr && trace->id() != id) {
trace = trace->next();
}
assert(trace != NULL, "invariant");
assert(trace != nullptr, "invariant");
assert(trace->hash() == hash, "invariant");
assert(trace->id() == id, "invariant");
return trace;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,12 +26,12 @@
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "runtime/javaThread.hpp"
static const u1* const TOP_CRITICAL_SECTION = NULL;
static const u1* const TOP_CRITICAL_SECTION = nullptr;
JfrBuffer::JfrBuffer() : _next(NULL),
_identity(NULL),
_pos(NULL),
_top(NULL),
JfrBuffer::JfrBuffer() : _next(nullptr),
_identity(nullptr),
_pos(nullptr),
_top(nullptr),
_size(0),
_header_size(0),
_flags(0),
@ -39,8 +39,8 @@ JfrBuffer::JfrBuffer() : _next(NULL),
LP64_ONLY(COMMA _pad(0)) {}
void JfrBuffer::initialize(size_t header_size, size_t size) {
assert(_next == NULL, "invariant");
assert(_identity == NULL, "invariant");
assert(_next == nullptr, "invariant");
assert(_identity == nullptr, "invariant");
assert(header_size <= max_jushort, "invariant");
_header_size = static_cast<u2>(header_size);
_size = size;
@ -102,34 +102,34 @@ bool JfrBuffer::acquired_by_self() const {
}
void JfrBuffer::acquire(const void* id) {
assert(id != NULL, "invariant");
assert(id != nullptr, "invariant");
const void* current_id;
do {
current_id = identity();
} while (current_id != NULL || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
} while (current_id != nullptr || Atomic::cmpxchg(&_identity, current_id, id) != current_id);
}
bool JfrBuffer::try_acquire(const void* id) {
assert(id != NULL, "invariant");
assert(id != nullptr, "invariant");
const void* const current_id = identity();
return current_id == NULL && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
return current_id == nullptr && Atomic::cmpxchg(&_identity, current_id, id) == current_id;
}
void JfrBuffer::set_identity(const void* id) {
assert(id != NULL, "invariant");
assert(_identity == NULL, "invariant");
assert(id != nullptr, "invariant");
assert(_identity == nullptr, "invariant");
OrderAccess::storestore();
_identity = id;
}
void JfrBuffer::release() {
assert(identity() != NULL, "invariant");
Atomic::release_store(&_identity, (const void*)NULL);
assert(identity() != nullptr, "invariant");
Atomic::release_store(&_identity, (const void*)nullptr);
}
#ifdef ASSERT
static bool validate_to(const JfrBuffer* const to, size_t size) {
assert(to != NULL, "invariant");
assert(to != nullptr, "invariant");
assert(to->acquired_by_self(), "invariant");
assert(to->free_size() >= size, "invariant");
return true;
@ -178,18 +178,18 @@ enum FLAG {
};
inline u1 load(const volatile u1* dest) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
return Atomic::load_acquire(dest);
}
inline void set(u1* dest, u1 data) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
OrderAccess::storestore();
*dest |= data;
}
inline void clear(u1* dest, u1 data) {
assert(dest != NULL, "invariant");
assert(dest != nullptr, "invariant");
OrderAccess::storestore();
*dest ^= data;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
#include "logging/log.hpp"
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::JfrEpochStorageHost() : _mspace(NULL) {}
JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::JfrEpochStorageHost() : _mspace(nullptr) {}
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::~JfrEpochStorageHost() {
@ -43,17 +43,17 @@ JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::~JfrEpochStorageHo
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
bool JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::initialize(size_t min_elem_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count) {
assert(_mspace == NULL, "invariant");
assert(_mspace == nullptr, "invariant");
_mspace = new EpochMspace(min_elem_size, free_list_cache_count_limit, this);
return _mspace != NULL && _mspace->initialize(cache_prealloc_count);
return _mspace != nullptr && _mspace->initialize(cache_prealloc_count);
}
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::acquire(size_t size, Thread* thread) {
BufferPtr buffer = mspace_acquire_to_live_list(size, _mspace, thread);
if (buffer == NULL) {
if (buffer == nullptr) {
log_warning(jfr)("Unable to allocate " SIZE_FORMAT " bytes of %s.", _mspace->min_element_size(), "epoch storage");
return NULL;
return nullptr;
}
assert(buffer->acquired_by_self(), "invariant");
return buffer;
@ -61,7 +61,7 @@ inline NodeType* JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::a
template <typename NodeType, template <typename> class RetrievalPolicy, bool EagerReclaim>
void JfrEpochStorageHost<NodeType, RetrievalPolicy, EagerReclaim>::release(NodeType* buffer) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
buffer->set_retired();
}
@ -101,7 +101,7 @@ class EmptyVerifier {
typedef typename Mspace::NodePtr NodePtr;
EmptyVerifier(Mspace* mspace) : _mspace(mspace) {}
bool process(NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(node->empty(), "invariant");
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
JfrFullStorage<ValueType, NodeType, AllocPolicy>
::JfrFullStorage(JfrStorageControl& control) : _control(control), _free_node_list(NULL), _queue(NULL) {}
::JfrFullStorage(JfrStorageControl& control) : _control(control), _free_node_list(nullptr), _queue(nullptr) {}
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
JfrFullStorage<ValueType, NodeType, AllocPolicy>::~JfrFullStorage() {
@ -52,21 +52,21 @@ JfrFullStorage<ValueType, NodeType, AllocPolicy>::~JfrFullStorage() {
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::initialize(size_t free_list_prealloc_count) {
assert(_free_node_list == NULL, "invariant");
assert(_free_node_list == nullptr, "invariant");
_free_node_list = new JfrConcurrentQueue<Node>();
if (_free_node_list == NULL || !_free_node_list->initialize()) {
if (_free_node_list == nullptr || !_free_node_list->initialize()) {
return false;
}
for (size_t i = 0; i < free_list_prealloc_count; ++i) {
NodePtr node = new Node();
if (node == NULL) {
if (node == nullptr) {
return false;
}
_free_node_list->add(node);
}
assert(_queue == NULL, "invariant");
assert(_queue == nullptr, "invariant");
_queue = new JfrConcurrentQueue<Node>();
return _queue != NULL && _queue->initialize();
return _queue != nullptr && _queue->initialize();
}
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
@ -83,21 +83,21 @@ template <typename ValueType, template <typename> class NodeType, typename Alloc
inline typename JfrFullStorage<ValueType, NodeType, AllocPolicy>::NodePtr
JfrFullStorage<ValueType, NodeType, AllocPolicy>::acquire() {
NodePtr node = _free_node_list->remove();
return node != NULL ? node : new Node();
return node != nullptr ? node : new Node();
}
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
inline void JfrFullStorage<ValueType, NodeType, AllocPolicy>
::release(typename JfrFullStorage<ValueType, NodeType, AllocPolicy>::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
_free_node_list->add(node);
}
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
inline bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::add(ValueType value) {
assert(value != NULL, "invariant");
assert(value != nullptr, "invariant");
NodePtr node = acquire();
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
node->set_value(value);
const bool notify = _control.increment_full();
_queue->add(node);
@ -106,9 +106,9 @@ inline bool JfrFullStorage<ValueType, NodeType, AllocPolicy>::add(ValueType valu
template <typename ValueType, template <typename> class NodeType, typename AllocPolicy>
inline ValueType JfrFullStorage<ValueType, NodeType, AllocPolicy>::remove() {
Value value = NULL;
Value value = nullptr;
NodePtr node = _queue->remove();
if (node != NULL) {
if (node != nullptr) {
_control.decrement_full();
value = node->value();
release(node);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,7 @@ bool JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_a
// pre-allocate elements to be cached in the requested list
for (size_t i = 0; i < cache_prealloc_count; ++i) {
NodePtr const node = allocate(_min_element_size);
if (node == NULL) {
if (node == nullptr) {
return false;
}
if (prealloc_to_free_list) {
@ -218,25 +218,25 @@ template <typename Client, template <typename> class RetrievalPolicy, typename F
inline typename FreeListType::NodePtr JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::allocate(size_t size) {
const size_t aligned_size_bytes = align_allocation_size(size, _min_element_size);
if (aligned_size_bytes == 0) {
return NULL;
return nullptr;
}
void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(Node));
if (allocation == NULL) {
return NULL;
if (allocation == nullptr) {
return nullptr;
}
NodePtr node = new (allocation) Node();
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
node->initialize(sizeof(Node), aligned_size_bytes);
return node;
}
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::deallocate(typename FreeListType::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(!in_free_list(node), "invariant");
assert(!_live_list_epoch_0.in_list(node), "invariant");
assert(!_live_list_epoch_1.in_list(node), "invariant");
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
JfrCHeapObj::free(node, node->total_size());
}
@ -247,14 +247,14 @@ inline typename FreeListType::NodePtr JfrMemorySpace<Client, RetrievalPolicy, Fr
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::release(typename FreeListType::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
if (node->transient()) {
deallocate(node);
return;
}
assert(node->empty(), "invariant");
assert(!node->retired(), "invariant");
assert(node->identity() == NULL, "invariant");
assert(node->identity() == nullptr, "invariant");
if (should_populate_free_list_cache()) {
add_to_free_list(node);
} else {
@ -264,7 +264,7 @@ inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType,
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::add_to_free_list(typename FreeListType::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
_free_list.add(node);
if (is_free_list_cache_limited()) {
Atomic::inc(&_free_list_cache_count);
@ -273,7 +273,7 @@ inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType,
template <typename Client, template <typename> class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware>
inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType, epoch_aware>::add_to_live_list(typename FreeListType::NodePtr node, bool previous_epoch) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
live_list(previous_epoch).add(node);
}
@ -308,7 +308,7 @@ inline void JfrMemorySpace<Client, RetrievalPolicy, FreeListType, FullListType,
template <typename Mspace, typename Client>
static inline Mspace* create_mspace(size_t min_element_size, size_t free_list_cache_count_limit, size_t cache_prealloc_count, bool prealloc_to_free_list, Client* cb) {
Mspace* const mspace = new Mspace(min_element_size, free_list_cache_count_limit, cb);
if (mspace != NULL) {
if (mspace != nullptr) {
mspace->initialize(cache_prealloc_count, prealloc_to_free_list);
}
return mspace;
@ -322,7 +322,7 @@ inline typename Mspace::NodePtr mspace_allocate(size_t size, Mspace* mspace) {
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_acquired(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::NodePtr node = mspace_allocate(size, mspace);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
node->set_identity(thread);
return node;
}
@ -330,7 +330,7 @@ inline typename Mspace::NodePtr mspace_allocate_acquired(size_t size, Mspace* ms
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_transient(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->acquired_by_self(), "invariant");
node->set_transient();
return node;
@ -339,7 +339,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient(size_t size, Mspace* m
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_transient_lease(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->transient(), "invariant");
node->set_lease();
return node;
@ -348,7 +348,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient_lease(size_t size, Msp
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_free(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->lease(), "invariant");
mspace->add_to_free_list(node);
return node;
@ -364,17 +364,17 @@ inline typename Mspace::NodePtr mspace_acquire_free_with_retry(size_t size, Mspa
assert(size <= mspace->min_element_size(), "invariant");
for (size_t i = 0; i < retry_count; ++i) {
typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread);
if (node != NULL) {
if (node != nullptr) {
return node;
}
}
return NULL;
return nullptr;
}
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_to_live_list(size_t size, Mspace* mspace, Thread* thread) {
typename Mspace::NodePtr node = mspace_allocate_acquired(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->acquired_by_self(), "invariant");
mspace->add_to_live_list(node);
return node;
@ -383,7 +383,7 @@ inline typename Mspace::NodePtr mspace_allocate_to_live_list(size_t size, Mspace
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_transient_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
typename Mspace::NodePtr node = mspace_allocate_transient(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->transient(), "invariant");
mspace->add_to_live_list(node, previous_epoch);
return node;
@ -392,7 +392,7 @@ inline typename Mspace::NodePtr mspace_allocate_transient_to_live_list(size_t si
template <typename Mspace>
inline typename Mspace::NodePtr mspace_allocate_transient_lease_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
typename Mspace::NodePtr node = mspace_allocate_transient_lease(size, mspace, thread);
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->lease(), "invariant");
mspace->add_to_live_list(node, previous_epoch);
return node;
@ -402,8 +402,8 @@ template <typename Mspace>
inline typename Mspace::NodePtr mspace_acquire_free_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
assert(size <= mspace->min_element_size(), "invariant");
typename Mspace::NodePtr node = mspace_acquire_free(size, mspace, thread);
if (node == NULL) {
return NULL;
if (node == nullptr) {
return nullptr;
}
assert(node->acquired_by_self(), "invariant");
mspace->add_to_live_list(node, previous_epoch);
@ -414,7 +414,7 @@ template <typename Mspace>
inline typename Mspace::NodePtr mspace_acquire_to_live_list(size_t size, Mspace* mspace, Thread* thread, bool previous_epoch = false) {
if (size <= mspace->min_element_size()) {
typename Mspace::NodePtr node = mspace_acquire_free_to_live_list(size, mspace, thread, previous_epoch);
if (node != NULL) {
if (node != nullptr) {
return node;
}
}
@ -431,17 +431,17 @@ inline typename Mspace::NodePtr mspace_acquire_live_with_retry(size_t size, Mspa
assert(size <= mspace->min_element_size(), "invariant");
for (size_t i = 0; i < retry_count; ++i) {
typename Mspace::NodePtr const node = mspace_acquire_live(size, mspace, thread, previous_epoch);
if (node != NULL) {
if (node != nullptr) {
return node;
}
}
return NULL;
return nullptr;
}
template <typename Mspace>
inline typename Mspace::NodePtr mspace_acquire_lease_with_retry(size_t size, Mspace* mspace, size_t retry_count, Thread* thread, bool previous_epoch = false) {
typename Mspace::NodePtr node = mspace_acquire_live_with_retry(size, mspace, retry_count, thread, previous_epoch);
if (node != NULL) {
if (node != nullptr) {
node->set_lease();
}
return node;
@ -449,21 +449,21 @@ inline typename Mspace::NodePtr mspace_acquire_lease_with_retry(size_t size, Msp
template <typename Mspace>
inline void mspace_release(typename Mspace::NodePtr node, Mspace* mspace) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(node->unflushed_size() == 0, "invariant");
assert(mspace != NULL, "invariant");
assert(mspace != nullptr, "invariant");
mspace->release(node);
}
template <typename Callback, typename Mspace>
inline void process_live_list(Callback& callback, Mspace* mspace, bool previous_epoch = false) {
assert(mspace != NULL, "invariant");
assert(mspace != nullptr, "invariant");
mspace->iterate_live_list(callback, previous_epoch);
}
template <typename Callback, typename Mspace>
inline void process_free_list(Callback& callback, Mspace* mspace) {
assert(mspace != NULL, "invariant");
assert(mspace != nullptr, "invariant");
assert(mspace->free_list_is_nonempty(), "invariant");
mspace->iterate_free_list(callback);
}
@ -482,7 +482,7 @@ class ReleaseOp : public StackObj {
template <typename Mspace>
inline bool ReleaseOp<Mspace>::process(typename Mspace::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
// assumes some means of exclusive access to the node
if (node->transient()) {
// make sure the transient node is already detached
@ -490,7 +490,7 @@ inline bool ReleaseOp<Mspace>::process(typename Mspace::NodePtr node) {
return true;
}
node->reinitialize();
if (node->identity() != NULL) {
if (node->identity() != nullptr) {
assert(node->empty(), "invariant");
assert(!node->retired(), "invariant");
node->release(); // publish
@ -507,7 +507,7 @@ class ReleaseWithExcisionOp : public ReleaseOp<Mspace> {
size_t _amount;
public:
ReleaseWithExcisionOp(Mspace* mspace, List& list) :
ReleaseOp<Mspace>(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
ReleaseOp<Mspace>(mspace), _list(list), _prev(nullptr), _count(0), _amount(0) {}
bool process(typename List::NodePtr node);
size_t processed() const { return _count; }
size_t amount() const { return _amount; }
@ -515,7 +515,7 @@ class ReleaseWithExcisionOp : public ReleaseOp<Mspace> {
template <typename Mspace, typename List>
inline bool ReleaseWithExcisionOp<Mspace, List>::process(typename List::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
if (node->transient()) {
_prev = _list.excise(_prev, node);
} else {
@ -536,7 +536,7 @@ class ScavengingReleaseOp : public StackObj {
public:
typedef typename List::Node Node;
ScavengingReleaseOp(Mspace* mspace, List& list) :
_mspace(mspace), _list(list), _prev(NULL), _count(0), _amount(0) {}
_mspace(mspace), _list(list), _prev(nullptr), _count(0), _amount(0) {}
bool process(typename List::NodePtr node);
size_t processed() const { return _count; }
size_t amount() const { return _amount; }
@ -544,7 +544,7 @@ class ScavengingReleaseOp : public StackObj {
template <typename Mspace, typename List>
inline bool ScavengingReleaseOp<Mspace, List>::process(typename List::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(!node->transient(), "invariant");
if (node->retired()) {
return excise_with_release(node);
@ -555,14 +555,14 @@ inline bool ScavengingReleaseOp<Mspace, List>::process(typename List::NodePtr no
template <typename Mspace, typename List>
inline bool ScavengingReleaseOp<Mspace, List>::excise_with_release(typename List::NodePtr node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(node->retired(), "invariant");
_prev = _list.excise(_prev, node);
if (node->transient()) {
_mspace->deallocate(node);
return true;
}
assert(node->identity() != NULL, "invariant");
assert(node->identity() != nullptr, "invariant");
assert(node->empty(), "invariant");
assert(!node->lease(), "invariant");
++_count;
@ -583,13 +583,13 @@ private:
public:
typedef typename Mspace::Node Node;
ReleaseRetiredOp(Functor& functor, Mspace* mspace, FromList& list) :
_functor(functor), _mspace(mspace), _list(list), _prev(NULL) {}
_functor(functor), _mspace(mspace), _list(list), _prev(nullptr) {}
bool process(Node* node);
};
template <typename Functor, typename Mspace, typename FromList>
inline bool ReleaseRetiredOp<Functor, Mspace, FromList>::process(typename Mspace::Node* node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
const bool is_retired = node->retired();
const bool result = _functor.process(node);
if (is_retired) {
@ -615,13 +615,13 @@ private:
public:
typedef typename Mspace::Node Node;
ReinitializeAllReleaseRetiredOp(Mspace* mspace, FromList& list) :
_mspace(mspace), _list(list), _prev(NULL) {}
_mspace(mspace), _list(list), _prev(nullptr) {}
bool process(Node* node);
};
template <typename Mspace, typename FromList>
inline bool ReinitializeAllReleaseRetiredOp<Mspace, FromList>::process(typename Mspace::Node* node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
// assumes some means of exclusive access to node
const bool retired = node->retired();
node->reinitialize();
@ -640,8 +640,8 @@ inline bool ReinitializeAllReleaseRetiredOp<Mspace, FromList>::process(typename
#ifdef ASSERT
template <typename Node>
inline void assert_migration_state(const Node* old, const Node* new_node, size_t used, size_t requested) {
assert(old != NULL, "invariant");
assert(new_node != NULL, "invariant");
assert(old != nullptr, "invariant");
assert(new_node != nullptr, "invariant");
assert(old->pos() >= old->start(), "invariant");
assert(old->pos() + used <= old->end(), "invariant");
assert(new_node->free_size() >= (used + requested), "invariant");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ class JfrMspaceRetrieval {
private:
template <typename Iterator>
static Node* acquire(Mspace* mspace, Iterator& iterator, Thread* thread, size_t size) {
assert(mspace != NULL, "invariant");
assert(mspace != nullptr, "invariant");
while (iterator.has_next()) {
Node* const node = iterator.next();
if (node->retired()) continue;
@ -57,7 +57,7 @@ class JfrMspaceRetrieval {
mspace->register_full(node, thread);
}
}
return NULL;
return nullptr;
}
};
@ -70,7 +70,7 @@ class JfrMspaceRemoveRetrieval : AllStatic {
if (free_list) {
StopOnNullConditionRemoval<typename Mspace::FreeList> iterator(mspace->free_list());
Node* const node = acquire(iterator, thread, size);
if (node != NULL) {
if (node != nullptr) {
mspace->decrement_free_list_count();
}
return node;
@ -83,14 +83,14 @@ class JfrMspaceRemoveRetrieval : AllStatic {
static Node* acquire(Iterator& iterator, Thread* thread, size_t size) {
while (iterator.has_next()) {
Node* const node = iterator.next();
if (node == NULL) return NULL;
if (node == nullptr) return nullptr;
assert(node->free_size() >= size, "invariant");
assert(!node->retired(), "invariant");
assert(node->identity() == NULL, "invariant");
assert(node->identity() == nullptr, "invariant");
node->set_identity(thread);
return node;
}
return NULL;
return nullptr;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@
typedef JfrStorage::BufferPtr BufferPtr;
static JfrStorage* _instance = NULL;
static JfrStorage* _instance = nullptr;
static JfrStorageControl* _control;
JfrStorage& JfrStorage::instance() {
@ -54,39 +54,39 @@ JfrStorage& JfrStorage::instance() {
}
JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrStorage(chunkwriter, post_box);
return _instance;
}
void JfrStorage::destroy() {
if (_instance != NULL) {
if (_instance != nullptr) {
delete _instance;
_instance = NULL;
_instance = nullptr;
}
}
JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
_control(NULL),
_global_mspace(NULL),
_thread_local_mspace(NULL),
_control(nullptr),
_global_mspace(nullptr),
_thread_local_mspace(nullptr),
_chunkwriter(chunkwriter),
_post_box(post_box) {}
JfrStorage::~JfrStorage() {
if (_control != NULL) {
if (_control != nullptr) {
delete _control;
}
if (_global_mspace != NULL) {
if (_global_mspace != nullptr) {
delete _global_mspace;
}
if (_thread_local_mspace != NULL) {
if (_thread_local_mspace != nullptr) {
delete _thread_local_mspace;
}
if (_full_list != NULL) {
if (_full_list != nullptr) {
delete _full_list;
}
_instance = NULL;
_instance = nullptr;
}
static const size_t thread_local_cache_count = 8;
@ -94,9 +94,9 @@ static const size_t thread_local_cache_count = 8;
static const size_t in_memory_discard_threshold_delta = 2;
bool JfrStorage::initialize() {
assert(_control == NULL, "invariant");
assert(_global_mspace == NULL, "invariant");
assert(_thread_local_mspace == NULL, "invariant");
assert(_control == nullptr, "invariant");
assert(_global_mspace == nullptr, "invariant");
assert(_thread_local_mspace == nullptr, "invariant");
const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
@ -104,7 +104,7 @@ bool JfrStorage::initialize() {
const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
_control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
if (_control == NULL) {
if (_control == nullptr) {
return false;
}
_global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size,
@ -112,7 +112,7 @@ bool JfrStorage::initialize() {
num_global_buffers, // cache_preallocate count
false, // preallocate_to_free_list (== preallocate directly to live list)
this);
if (_global_mspace == NULL) {
if (_global_mspace == nullptr) {
return false;
}
assert(_global_mspace->live_list_is_nonempty(), "invariant");
@ -121,13 +121,13 @@ bool JfrStorage::initialize() {
thread_local_cache_count, // cache preallocate count
true, // preallocate_to_free_list
this);
if (_thread_local_mspace == NULL) {
if (_thread_local_mspace == nullptr) {
return false;
}
assert(_thread_local_mspace->free_list_is_nonempty(), "invariant");
// The full list will contain nodes pointing to retired global and transient buffers.
_full_list = new JfrFullList(*_control);
return _full_list != NULL && _full_list->initialize(num_global_buffers * 2);
return _full_list != nullptr && _full_list->initialize(num_global_buffers * 2);
}
JfrStorageControl& JfrStorage::control() {
@ -140,9 +140,9 @@ static void log_allocation_failure(const char* msg, size_t size) {
BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
BufferPtr buffer = mspace_acquire_to_live_list(size, instance()._thread_local_mspace, thread);
if (buffer == NULL) {
if (buffer == nullptr) {
log_allocation_failure("thread local_memory", size);
return NULL;
return nullptr;
}
assert(buffer->acquired_by_self(), "invariant");
return buffer;
@ -150,9 +150,9 @@ BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */)
BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
BufferPtr buffer = mspace_allocate_transient_lease(size, instance()._thread_local_mspace, thread);
if (buffer == NULL) {
if (buffer == nullptr) {
log_allocation_failure("transient memory", size);
return NULL;
return nullptr;
}
assert(buffer->acquired_by_self(), "invariant");
assert(buffer->transient(), "invariant");
@ -164,7 +164,7 @@ static BufferPtr acquire_lease(size_t size, JfrStorageMspace* mspace, JfrStorage
assert(size <= mspace->min_element_size(), "invariant");
while (true) {
BufferPtr buffer = mspace_acquire_lease_with_retry(size, mspace, retry_count, thread);
if (buffer == NULL && storage_instance.control().should_discard()) {
if (buffer == nullptr && storage_instance.control().should_discard()) {
storage_instance.discard_oldest(thread);
continue;
}
@ -176,7 +176,7 @@ static BufferPtr acquire_promotion_buffer(size_t size, JfrStorageMspace* mspace,
assert(size <= mspace->min_element_size(), "invariant");
while (true) {
BufferPtr buffer= mspace_acquire_live_with_retry(size, mspace, retry_count, thread);
if (buffer == NULL && storage_instance.control().should_discard()) {
if (buffer == nullptr && storage_instance.control().should_discard()) {
storage_instance.discard_oldest(thread);
continue;
}
@ -192,7 +192,7 @@ BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
// if not too large and capacity is still available, ask for a lease from the global system
if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
BufferPtr const buffer = acquire_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
if (buffer != NULL) {
if (buffer != nullptr) {
assert(buffer->acquired_by_self(), "invariant");
assert(!buffer->transient(), "invariant");
assert(buffer->lease(), "invariant");
@ -204,7 +204,7 @@ BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
}
static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->empty(), "invariant");
const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
if (EventDataLoss::is_enabled()) {
@ -219,7 +219,7 @@ static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread*
}
static void write_data_loss(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
const size_t unflushed_size = buffer->unflushed_size();
buffer->reinitialize();
if (unflushed_size == 0) {
@ -231,7 +231,7 @@ static void write_data_loss(BufferPtr buffer, Thread* thread) {
static const size_t promotion_retry = 100;
bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(!buffer->lease(), "invariant");
assert(!buffer->transient(), "invariant");
const size_t unflushed_size = buffer->unflushed_size();
@ -242,7 +242,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
}
BufferPtr const promotion_buffer = acquire_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
if (promotion_buffer == NULL) {
if (promotion_buffer == nullptr) {
write_data_loss(buffer, thread);
return false;
}
@ -261,7 +261,7 @@ bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
* and the caller should take means to ensure that it is not referenced any longer.
*/
void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->lease(), "invariant");
assert(buffer->acquired_by_self(), "invariant");
buffer->clear_lease();
@ -275,7 +275,7 @@ void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
}
void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->acquired_by(thread), "invariant");
assert(buffer->retired(), "invariant");
if (_full_list->add(buffer)) {
@ -285,7 +285,7 @@ void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
// don't use buffer on return, it is gone
void JfrStorage::release(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(!buffer->lease(), "invariant");
assert(!buffer->transient(), "invariant");
assert(!buffer->retired(), "invariant");
@ -295,12 +295,12 @@ void JfrStorage::release(BufferPtr buffer, Thread* thread) {
}
}
assert(buffer->empty(), "invariant");
assert(buffer->identity() != NULL, "invariant");
assert(buffer->identity() != nullptr, "invariant");
buffer->set_retired();
}
void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
JfrStorage& storage_instance = instance();
storage_instance.release(buffer, thread);
}
@ -325,8 +325,8 @@ void JfrStorage::discard_oldest(Thread* thread) {
size_t discarded_size = 0;
while (_full_list->is_nonempty()) {
BufferPtr oldest = _full_list->remove();
assert(oldest != NULL, "invariant");
assert(oldest->identity() != NULL, "invariant");
assert(oldest != nullptr, "invariant");
assert(oldest->identity() != nullptr, "invariant");
discarded_size += oldest->discard();
assert(oldest->unflushed_size() == 0, "invariant");
if (oldest->transient()) {
@ -347,34 +347,34 @@ void JfrStorage::discard_oldest(Thread* thread) {
typedef const BufferPtr ConstBufferPtr;
static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
assert(t != NULL, "invariant");
assert(cur != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(cur != nullptr, "invariant");
assert(cur->pos() + used <= cur->end(), "invariant");
assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
}
static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
assert(t != NULL, "invariant");
assert(cur != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(cur != nullptr, "invariant");
assert(!cur->lease(), "invariant");
assert(cur_pos != NULL, "invariant");
assert(cur_pos != nullptr, "invariant");
assert(req >= used, "invariant");
}
static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
assert(cur != NULL, "invariant");
assert(t != NULL, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
assert(cur != nullptr, "invariant");
assert(t != nullptr, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant");
assert(req >= used, "invariant");
}
static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
assert(t != NULL, "invariant");
assert(cur != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(cur != nullptr, "invariant");
assert(cur->lease(), "invariant");
assert(cur_pos != NULL, "invariant");
assert(cur_pos != nullptr, "invariant");
assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant");
assert(req >= used, "invariant");
assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
}
@ -408,13 +408,13 @@ BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size
}
// Going for a "larger-than-regular" buffer.
// Shelve the current buffer to make room for a temporary lease.
assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() == nullptr, "invariant");
t->jfr_thread_local()->shelve_buffer(cur);
return provision_large(cur, cur_pos, used, req, native, t);
}
static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
if (native) {
jfr_thread_local->set_native_buffer(buffer);
} else {
@ -426,8 +426,8 @@ static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal*
static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
JfrThreadLocal* const tl = t->jfr_thread_local();
BufferPtr shelved = tl->shelved_buffer();
assert(shelved != NULL, "invariant");
tl->shelve_buffer(NULL);
assert(shelved != nullptr, "invariant");
tl->shelve_buffer(nullptr);
// restore shelved buffer back as primary
return store_buffer_to_thread_local(shelved, tl, native);
}
@ -436,7 +436,7 @@ BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t
debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
// Can the "regular" buffer (now shelved) accommodate the requested size?
BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
assert(shelved != NULL, "invariant");
assert(shelved != nullptr, "invariant");
if (shelved->free_size() >= req) {
if (req > 0) {
memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
@ -450,8 +450,8 @@ BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t
}
static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
assert(cur != NULL, "invariant");
assert(t != NULL, "invariant");
assert(cur != nullptr, "invariant");
assert(t != nullptr, "invariant");
if (cur->lease()) {
storage_instance.release_large(cur, t);
}
@ -464,9 +464,9 @@ static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_inst
// Caller needs to ensure if the size was successfully accommodated.
BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
debug_only(assert_provision_large_precondition(cur, used, req, t);)
assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant");
BufferPtr const buffer = acquire_large(req, t);
if (buffer == NULL) {
if (buffer == nullptr) {
// unable to allocate and serve the request
return large_fail(cur, native, *this, t);
}
@ -531,12 +531,12 @@ size_t JfrStorage::clear() {
template <typename Processor>
static size_t process_full(Processor& processor, JfrFullList* list, JfrStorageControl& control) {
assert(list != NULL, "invariant");
assert(list != nullptr, "invariant");
assert(list->is_nonempty(), "invariant");
size_t count = 0;
do {
BufferPtr full = list->remove();
if (full == NULL) break;
if (full == nullptr) break;
assert(full->retired(), "invariant");
processor.process(full);
// at this point, the buffer is already live or destroyed

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,18 +52,18 @@ class CompositeOperation {
NextOperation* _next;
public:
CompositeOperation(Operation* op, NextOperation* next) : _op(op), _next(next) {
assert(_op != NULL, "invariant");
assert(_op != nullptr, "invariant");
}
typedef typename Operation::Type Type;
bool process(Type* t) {
const bool op_result = _op->process(t);
return _next == NULL ? op_result : TruthFunction::evaluate(op_result) ? _next->process(t) : op_result;
return _next == nullptr ? op_result : TruthFunction::evaluate(op_result) ? _next->process(t) : op_result;
}
size_t elements() const {
return _next == NULL ? _op->elements() : _op->elements() + _next->elements();
return _next == nullptr ? _op->elements() : _op->elements() + _next->elements();
}
size_t size() const {
return _next == NULL ? _op->size() : _op->size() + _next->size();
return _next == nullptr ? _op->size() : _op->size() + _next->size();
}
};
@ -99,7 +99,7 @@ class Retired {
public:
typedef T Type;
bool process(Type* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return negation ? !t->retired() : t->retired();
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,7 @@ inline bool DefaultDiscarder<T>::discard(T* t, const u1* data, size_t size) {
template <typename Type>
inline size_t get_unflushed_size(const u1* top, Type* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return Atomic::load_acquire(t->pos_address()) - top;
}
@ -78,7 +78,7 @@ inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) {
template <typename Operation>
inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
const u1* const top = t->top();
const size_t unflushed_size = get_unflushed_size(top, t);
assert((intptr_t)unflushed_size >= 0, "invariant");
@ -92,7 +92,7 @@ inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) {
template <typename Type>
static void retired_sensitive_acquire(Type* t, Thread* thread) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(thread != nullptr, "invariant");
assert(thread == Thread::current(), "invariant");
if (t->retired()) {
@ -118,7 +118,7 @@ inline bool ExclusiveOp<Operation>::process(typename Operation::Type* t) {
template <typename Operation>
inline bool DiscardOp<Operation>::process(typename Operation::Type* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
const u1* const top = _mode == concurrent ? t->acquire_critical_section_top() : t->top();
const size_t unflushed_size = get_unflushed_size(top, t);
assert((intptr_t)unflushed_size >= 0, "invariant");
@ -150,7 +150,7 @@ inline bool ExclusiveDiscardOp<Operation>::process(typename Operation::Type* t)
template <typename Operation>
inline bool EpochDispatchOp<Operation>::process(typename Operation::Type* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
const u1* const current_top = _previous_epoch ? t->start() : t->top();
const size_t unflushed_size = Atomic::load_acquire(t->pos_address()) - current_top;
assert((intptr_t)unflushed_size >= 0, "invariant");
@ -164,7 +164,7 @@ inline bool EpochDispatchOp<Operation>::process(typename Operation::Type* t) {
template <typename Operation>
size_t EpochDispatchOp<Operation>::dispatch(bool previous_epoch, const u1* element, size_t size) {
assert(element != NULL, "invariant");
assert(element != nullptr, "invariant");
const u1* const limit = element + size;
size_t elements = 0;
while (element < limit) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -90,8 +90,8 @@ class JfrVirtualMemorySegment : public JfrCHeapObj {
};
JfrVirtualMemorySegment::JfrVirtualMemorySegment() :
_next(NULL),
_top(NULL),
_next(nullptr),
_top(nullptr),
_rs(),
_virtual_memory() {}
@ -108,7 +108,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
if (!_rs.is_reserved()) {
return false;
}
assert(_rs.base() != NULL, "invariant");
assert(_rs.base() != nullptr, "invariant");
assert(_rs.size() != 0, "invariant");
assert(is_aligned(_rs.base(), os::vm_allocation_granularity()), "invariant");
assert(is_aligned(_rs.size(), os::vm_allocation_granularity()), "invariant");
@ -169,10 +169,10 @@ void* JfrVirtualMemorySegment::take_from_committed(size_t block_size_request_wor
assert(_virtual_memory.committed_size() == _virtual_memory.actual_committed_size(),
"The committed memory doesn't match the expanded memory.");
if (!is_available(block_size_request_words)) {
return NULL;
return nullptr;
}
void* const block = top();
assert(block != NULL, "invariant");
assert(block != nullptr, "invariant");
inc_top(block_size_request_words);
return block;
}
@ -221,8 +221,8 @@ class JfrVirtualMemoryManager : public JfrCHeapObj {
};
JfrVirtualMemoryManager::JfrVirtualMemoryManager() :
_segments(NULL),
_current_segment(NULL),
_segments(nullptr),
_current_segment(nullptr),
_reservation_size_request_words(0),
_reservation_size_request_limit_words(0),
_current_reserved_words(0),
@ -230,7 +230,7 @@ JfrVirtualMemoryManager::JfrVirtualMemoryManager() :
JfrVirtualMemoryManager::~JfrVirtualMemoryManager() {
JfrVirtualMemorySegment* segment = _segments;
while (segment != NULL) {
while (segment != nullptr) {
JfrVirtualMemorySegment* next_segment = segment->next();
delete segment;
segment = next_segment;
@ -256,7 +256,7 @@ bool JfrVirtualMemoryManager::new_segment(size_t reservation_size_request_words)
assert(reservation_size_request_words > 0, "invariant");
assert(is_aligned(reservation_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
Segment* segment = new Segment();
if (NULL == segment) {
if (nullptr == segment) {
return false;
}
if (!segment->initialize(reservation_size_request_words * BytesPerWord)) {
@ -270,7 +270,7 @@ bool JfrVirtualMemoryManager::new_segment(size_t reservation_size_request_words)
}
bool JfrVirtualMemoryManager::expand_segment_by(JfrVirtualMemorySegment* segment, size_t block_size_request_words) {
assert(segment != NULL, "invariant");
assert(segment != nullptr, "invariant");
const size_t before = segment->committed_words();
const bool result = segment->expand_by(block_size_request_words);
const size_t after = segment->committed_words();
@ -324,11 +324,11 @@ bool JfrVirtualMemoryManager::expand_by(size_t block_size_request_words, size_t
}
void JfrVirtualMemoryManager::link(JfrVirtualMemorySegment* segment) {
assert(segment != NULL, "invariant");
if (_segments == NULL) {
assert(segment != nullptr, "invariant");
if (_segments == nullptr) {
_segments = segment;
} else {
assert(_current_segment != NULL, "invariant");
assert(_current_segment != nullptr, "invariant");
assert(_segments == _current_segment, "invariant");
_current_segment->set_next(segment);
}
@ -340,32 +340,32 @@ void JfrVirtualMemoryManager::link(JfrVirtualMemorySegment* segment) {
void* JfrVirtualMemoryManager::commit(size_t block_size_request_words) {
assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
void* block = current()->commit(block_size_request_words);
if (block != NULL) {
if (block != nullptr) {
return block;
}
assert(block == NULL, "invariant");
assert(block == nullptr, "invariant");
if (is_full()) {
return NULL;
return nullptr;
}
assert(block_size_request_words <= _reservation_size_request_words, "invariant");
if (expand_by(block_size_request_words, _reservation_size_request_words)) {
block = current()->commit(block_size_request_words);
assert(block != NULL, "The allocation was expected to succeed after the expansion");
assert(block != nullptr, "The allocation was expected to succeed after the expansion");
}
return block;
}
JfrVirtualMemory::JfrVirtualMemory() :
_vmm(NULL),
_vmm(nullptr),
_reserved_low(),
_reserved_high(),
_top(NULL),
_commit_point(NULL),
_top(nullptr),
_commit_point(nullptr),
_physical_commit_size_request_words(0),
_aligned_datum_size_bytes(0) {}
JfrVirtualMemory::~JfrVirtualMemory() {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
delete _vmm;
}
@ -374,9 +374,9 @@ size_t JfrVirtualMemory::aligned_datum_size_bytes() const {
}
static void adjust_allocation_ratio(size_t* const reservation_size_bytes, size_t* const commit_size_bytes) {
assert(reservation_size_bytes != NULL, "invariant");
assert(reservation_size_bytes != nullptr, "invariant");
assert(*reservation_size_bytes > 0, "invariant");
assert(commit_size_bytes != NULL, "invariant");
assert(commit_size_bytes != nullptr, "invariant");
assert(*commit_size_bytes > 0, "invariant");
assert(*reservation_size_bytes >= *commit_size_bytes, "invariant");
assert(is_aligned(*reservation_size_bytes, os::vm_allocation_granularity()), "invariant");
@ -409,11 +409,11 @@ static void adjust_allocation_ratio(size_t* const reservation_size_bytes, size_t
void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes,
size_t block_size_request_bytes,
size_t datum_size_bytes /* 1 */) {
assert(_vmm == NULL, "invariant");
assert(_vmm == nullptr, "invariant");
_vmm = new JfrVirtualMemoryManager();
if (_vmm == NULL) {
return NULL;
if (_vmm == nullptr) {
return nullptr;
}
assert(reservation_size_request_bytes > 0, "invariant");
@ -440,7 +440,7 @@ void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes,
if (!_vmm->initialize(reservation_size_request_words)) {
// is implicitly "full" if reservation fails
assert(is_full(), "invariant");
return NULL;
return nullptr;
}
_reserved_low = (const u1*)_vmm->reserved_low();
_reserved_high = (const u1*)_vmm->reserved_high();
@ -454,7 +454,7 @@ void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes,
}
void* JfrVirtualMemory::commit(size_t block_size_request_words) {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
assert(is_aligned(block_size_request_words * BytesPerWord, os::vm_allocation_granularity()), "invariant");
return _vmm->commit(block_size_request_words);
}
@ -468,26 +468,26 @@ bool JfrVirtualMemory::is_empty() const {
}
bool JfrVirtualMemory::commit_memory_block() {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
assert(!is_full(), "invariant");
void* const block = _vmm->commit(_physical_commit_size_request_words);
if (block != NULL) {
if (block != nullptr) {
_commit_point = _vmm->committed_high();
return true;
}
// all reserved virtual memory is committed
assert(block == NULL, "invariant");
assert(block == nullptr, "invariant");
assert(_vmm->reserved_high() == _vmm->committed_high(), "invariant");
return false;
}
void* JfrVirtualMemory::new_datum() {
assert(_vmm != NULL, "invariant");
assert(_vmm != nullptr, "invariant");
assert(!is_full(), "invariant");
if (_top == _commit_point) {
if (!commit_memory_block()) {
assert(is_full(), "invariant");
return NULL;
return nullptr;
}
}
assert(_top + _aligned_datum_size_bytes <= _commit_point, "invariant");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,25 +47,25 @@ bool JfrStringPool::is_modified() {
return _new_string.is_signaled_with_reset();
}
static JfrStringPool* _instance = NULL;
static JfrStringPool* _instance = nullptr;
JfrStringPool& JfrStringPool::instance() {
return *_instance;
}
JfrStringPool* JfrStringPool::create(JfrChunkWriter& cw) {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
_instance = new JfrStringPool(cw);
return _instance;
}
void JfrStringPool::destroy() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
delete _instance;
_instance = NULL;
_instance = nullptr;
}
JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(NULL), _chunkwriter(cw) {}
JfrStringPool::JfrStringPool(JfrChunkWriter& cw) : _mspace(nullptr), _chunkwriter(cw) {}
JfrStringPool::~JfrStringPool() {
delete _mspace;
@ -75,13 +75,13 @@ static const size_t string_pool_cache_count = 2;
static const size_t string_pool_buffer_size = 512 * K;
bool JfrStringPool::initialize() {
assert(_mspace == NULL, "invariant");
assert(_mspace == nullptr, "invariant");
_mspace = create_mspace<JfrStringPoolMspace>(string_pool_buffer_size,
string_pool_cache_count, // cache limit
string_pool_cache_count, // cache preallocate count
false, // preallocate_to_free_list (== preallocate directly to live list)
this);
return _mspace != NULL;
return _mspace != nullptr;
}
/*
@ -91,7 +91,7 @@ bool JfrStringPool::initialize() {
* and the caller should take means to ensure that it is not referenced any longer.
*/
static void release(BufferPtr buffer, Thread* thread) {
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->lease(), "invariant");
assert(buffer->acquired_by_self(), "invariant");
buffer->clear_lease();
@ -103,27 +103,27 @@ static void release(BufferPtr buffer, Thread* thread) {
}
BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thread* thread) {
assert(old != NULL, "invariant");
assert(old != nullptr, "invariant");
assert(old->lease(), "invariant");
if (0 == requested) {
// indicates a lease is being returned
release(old, thread);
return NULL;
return nullptr;
}
// migration of in-flight information
BufferPtr const new_buffer = lease(thread, used + requested);
if (new_buffer != NULL) {
if (new_buffer != nullptr) {
migrate_outstanding_writes(old, new_buffer, used, requested);
}
release(old, thread);
return new_buffer; // might be NULL
return new_buffer; // might be nullptr
}
static const size_t lease_retry = 10;
BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) {
BufferPtr buffer = mspace_acquire_lease_with_retry(size, instance()._mspace, lease_retry, thread);
if (buffer == NULL) {
if (buffer == nullptr) {
buffer = mspace_allocate_transient_lease_to_live_list(size, instance()._mspace, thread);
}
assert(buffer->acquired_by_self(), "invariant");
@ -132,7 +132,7 @@ BufferPtr JfrStringPool::lease(Thread* thread, size_t size /* 0 */) {
}
jboolean JfrStringPool::add(jlong id, jstring string, JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
{
JfrStringPoolWriter writer(jt);
writer.write(id);
@ -209,7 +209,7 @@ size_t JfrStringPool::clear() {
void JfrStringPool::register_full(BufferPtr buffer, Thread* thread) {
// nothing here at the moment
assert(buffer != NULL, "invariant");
assert(buffer != nullptr, "invariant");
assert(buffer->acquired_by(thread), "invariant");
assert(buffer->retired(), "invariant");
}

View File

@ -44,9 +44,9 @@ JfrSamplerWindow::JfrSamplerWindow() :
JfrAdaptiveSampler::JfrAdaptiveSampler() :
_prng(this),
_window_0(NULL),
_window_1(NULL),
_active_window(NULL),
_window_0(nullptr),
_window_1(nullptr),
_active_window(nullptr),
_avg_population_size(0),
_ewma_population_size_alpha(0),
_acc_debt_carry_limit(0),
@ -59,14 +59,14 @@ JfrAdaptiveSampler::~JfrAdaptiveSampler() {
}
bool JfrAdaptiveSampler::initialize() {
assert(_window_0 == NULL, "invariant");
assert(_window_0 == nullptr, "invariant");
_window_0 = new JfrSamplerWindow();
if (_window_0 == NULL) {
if (_window_0 == nullptr) {
return false;
}
assert(_window_1 == NULL, "invariant");
assert(_window_1 == nullptr, "invariant");
_window_1 = new JfrSamplerWindow();
if (_window_1 == NULL) {
if (_window_1 == nullptr) {
return false;
}
_active_window = _window_0;
@ -102,7 +102,7 @@ inline bool JfrSamplerWindow::is_expired(int64_t timestamp) const {
}
bool JfrSamplerWindow::sample(int64_t timestamp, bool* expired_window) const {
assert(expired_window != NULL, "invariant");
assert(expired_window != nullptr, "invariant");
*expired_window = is_expired(timestamp);
return *expired_window ? false : sample();
}
@ -116,7 +116,7 @@ inline bool JfrSamplerWindow::sample() const {
void JfrAdaptiveSampler::rotate_window(int64_t timestamp) {
assert(_lock, "invariant");
const JfrSamplerWindow* const current = active_window();
assert(current != NULL, "invariant");
assert(current != nullptr, "invariant");
if (!current->is_expired(timestamp)) {
// Someone took care of it.
return;
@ -229,7 +229,7 @@ JfrSamplerWindow* JfrAdaptiveSampler::set_rate(const JfrSamplerParams& params, c
}
inline JfrSamplerWindow* JfrAdaptiveSampler::next_window(const JfrSamplerWindow* expired) const {
assert(expired != NULL, "invariant");
assert(expired != nullptr, "invariant");
return expired == _window_0 ? _window_1 : _window_0;
}
@ -257,7 +257,7 @@ size_t JfrAdaptiveSampler::project_sample_size(const JfrSamplerParams& params, c
* or 'amortize' debt accumulated by its predecessor(s).
*/
size_t JfrAdaptiveSampler::amortize_debt(const JfrSamplerWindow* expired) {
assert(expired != NULL, "invariant");
assert(expired != nullptr, "invariant");
const intptr_t accumulated_debt = expired->accumulated_debt();
assert(accumulated_debt <= 0, "invariant");
if (_acc_debt_carry_count == _acc_debt_carry_limit) {
@ -326,7 +326,7 @@ size_t JfrAdaptiveSampler::derive_sampling_interval(double sample_size, const Jf
// The projected population size is an exponentially weighted moving average, a function of the window_lookback_count.
inline size_t JfrAdaptiveSampler::project_population_size(const JfrSamplerWindow* expired) {
assert(expired != NULL, "invariant");
assert(expired != nullptr, "invariant");
_avg_population_size = exponentially_weighted_moving_average(expired->population_size(), _ewma_population_size_alpha, _avg_population_size);
return _avg_population_size;
}
@ -360,7 +360,7 @@ bool JfrGTestFixedRateSampler::initialize() {
*
*/
static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
assert(sample_size_ewma != NULL, "invariant");
assert(sample_size_ewma != nullptr, "invariant");
if (log_is_enabled(Debug, jfr, system, throttle)) {
*sample_size_ewma = exponentially_weighted_moving_average(expired->sample_size(), compute_ewma_alpha_coefficient(expired->params().window_lookback_count), *sample_size_ewma);
log_debug(jfr, system, throttle)("JfrGTestFixedRateSampler: avg.sample size: %0.4f, window set point: %zu, sample size: %zu, population size: %zu, ratio: %.4f, window duration: %zu ms\n",
@ -378,7 +378,7 @@ static void log(const JfrSamplerWindow* expired, double* sample_size_ewma) {
* parameters, possibly updated, for the engine to apply to the next window.
*/
const JfrSamplerParams& JfrGTestFixedRateSampler::next_window_params(const JfrSamplerWindow* expired) {
assert(expired != NULL, "invariant");
assert(expired != nullptr, "invariant");
assert(_lock, "invariant");
log(expired, &_sample_size_ewma);
return _params;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ template <typename T>
class LessThanHalfBufferSize : AllStatic {
public:
static bool evaluate(T* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return t->free_size() < t->size() / 2;
}
};
@ -48,7 +48,7 @@ template <typename T>
class LessThanSize : AllStatic {
public:
static bool evaluate(T* t, size_t size) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return t->free_size() < size;
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@
#ifdef ASSERT
static void assert_precondition(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_java(jt);)
assert(jt->has_last_Java_frame(), "invariant");
}
@ -72,7 +72,7 @@ void* JfrIntrinsicSupport::write_checkpoint(JavaThread* jt) {
}
void JfrIntrinsicSupport::load_barrier(const Klass* klass) {
assert(klass != NULL, "sanity");
assert(klass != nullptr, "sanity");
JfrTraceIdLoadBarrier::load_barrier(klass);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@
#include "runtime/javaThread.hpp"
#include "utilities/stack.inline.hpp"
static jobject empty_java_util_arraylist = NULL;
static jobject empty_java_util_arraylist = nullptr;
static oop new_java_util_arraylist(TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
@ -57,10 +57,10 @@ static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
static bool initialize(TRAPS) {
static bool initialized = false;
if (!initialized) {
assert(NULL == empty_java_util_arraylist, "invariant");
assert(nullptr == empty_java_util_arraylist, "invariant");
const oop array_list = new_java_util_arraylist(CHECK_false);
empty_java_util_arraylist = JfrJavaSupport::global_jni_handle(array_list, THREAD);
initialized = empty_java_util_arraylist != NULL;
initialized = empty_java_util_arraylist != nullptr;
}
return initialized;
}
@ -72,7 +72,7 @@ static bool initialize(TRAPS) {
* trigger initialization.
*/
static bool is_allowed(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
if (!JfrTraceId::is_jdk_jfr_event_sub(k)) {
// Was excluded during initial class load.
return false;
@ -82,7 +82,7 @@ static bool is_allowed(const Klass* k) {
static void fill_klasses(GrowableArray<const void*>& event_subklasses, const InstanceKlass* event_klass, JavaThread* thread) {
assert(event_subklasses.length() == 0, "invariant");
assert(event_klass != NULL, "invariant");
assert(event_klass != nullptr, "invariant");
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(thread));
for (ClassHierarchyIterator iter(const_cast<InstanceKlass*>(event_klass)); !iter.done(); iter.next()) {
@ -107,21 +107,21 @@ static void transform_klasses_to_local_jni_handles(GrowableArray<const void*>& e
jobject JdkJfrEvent::get_all_klasses(TRAPS) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD));
initialize(THREAD);
assert(empty_java_util_arraylist != NULL, "should have been setup already!");
assert(empty_java_util_arraylist != nullptr, "should have been setup already!");
static const char jdk_jfr_event_name[] = "jdk/internal/event/Event";
Symbol* const event_klass_name = SymbolTable::probe(jdk_jfr_event_name, sizeof jdk_jfr_event_name - 1);
if (NULL == event_klass_name) {
if (nullptr == event_klass_name) {
// not loaded yet
return empty_java_util_arraylist;
}
const Klass* const klass = SystemDictionary::resolve_or_null(event_klass_name, THREAD);
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(klass->is_instance_klass(), "invariant");
assert(JdkJfrEvent::is(klass), "invariant");
if (klass->subklass() == NULL) {
if (klass->subklass() == nullptr) {
return empty_java_util_arraylist;
}
@ -141,13 +141,12 @@ jobject JdkJfrEvent::get_all_klasses(TRAPS) {
static const char add_method_name[] = "add";
static const char add_method_signature[] = "(Ljava/lang/Object;)Z";
const Klass* const array_list_klass = JfrJavaSupport::klass(empty_java_util_arraylist);
assert(array_list_klass != NULL, "invariant");
assert(array_list_klass != nullptr, "invariant");
const Symbol* const add_method_sym = SymbolTable::new_symbol(add_method_name);
assert(add_method_sym != NULL, "invariant");
assert(add_method_sym != nullptr, "invariant");
const Symbol* const add_method_sig_sym = SymbolTable::new_symbol(add_method_signature);
assert(add_method_signature != NULL, "invariant");
JavaValue result(T_BOOLEAN);
for (int i = 0; i < event_subklasses.length(); ++i) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,20 +40,20 @@ static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
}
// Track the set of unloaded klasses during a chunk / epoch.
static GrowableArray<traceid>* _unload_set_epoch_0 = NULL;
static GrowableArray<traceid>* _unload_set_epoch_1 = NULL;
static GrowableArray<traceid>* _unload_set_epoch_0 = nullptr;
static GrowableArray<traceid>* _unload_set_epoch_1 = nullptr;
static s8 event_klass_unloaded_count = 0;
static GrowableArray<traceid>* unload_set_epoch_0() {
if (_unload_set_epoch_0 == NULL) {
if (_unload_set_epoch_0 == nullptr) {
_unload_set_epoch_0 = c_heap_allocate_array<traceid>(initial_array_size);
}
return _unload_set_epoch_0;
}
static GrowableArray<traceid>* unload_set_epoch_1() {
if (_unload_set_epoch_1 == NULL) {
if (_unload_set_epoch_1 == nullptr) {
_unload_set_epoch_1 = c_heap_allocate_array<traceid>(initial_array_size);
}
return _unload_set_epoch_1;
@ -72,16 +72,16 @@ static GrowableArray<traceid>* get_unload_set_previous_epoch() {
}
static void sort_set(GrowableArray<traceid>* set) {
assert(set != NULL, "invariant");
assert(set != nullptr, "invariant");
assert(set->is_nonempty(), "invariant");
set->sort(sort_traceid);
}
static bool is_nonempty_set(u1 epoch) {
if (epoch == 0) {
return _unload_set_epoch_0 != NULL && _unload_set_epoch_0->is_nonempty();
return _unload_set_epoch_0 != nullptr && _unload_set_epoch_0->is_nonempty();
}
return _unload_set_epoch_1 != NULL && _unload_set_epoch_1->is_nonempty();
return _unload_set_epoch_1 != nullptr && _unload_set_epoch_1->is_nonempty();
}
void JfrKlassUnloading::sort(bool previous_epoch) {
@ -104,7 +104,7 @@ void JfrKlassUnloading::clear() {
static bool add_to_unloaded_klass_set(traceid klass_id, bool current_epoch) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
GrowableArray<traceid>* const unload_set = current_epoch ? get_unload_set() : get_unload_set_previous_epoch();
assert(unload_set != NULL, "invariant");
assert(unload_set != nullptr, "invariant");
assert(unload_set->find(klass_id) == -1, "invariant");
unload_set->append(klass_id);
return true;
@ -123,7 +123,7 @@ static void send_finalizer_event(const Klass* k) {
#endif
bool JfrKlassUnloading::on_unload(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
MANAGEMENT_ONLY(send_finalizer_event(k);)
if (IS_JDK_JFR_EVENT_SUBKLASS(k)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,11 +32,11 @@
// The InstanceKlass is assumed to be the method holder for the method to be looked up.
static const Method* lookup_method(InstanceKlass* ik, int orig_method_id_num) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(orig_method_id_num >= 0, "invariant");
assert(orig_method_id_num < ik->methods()->length(), "invariant");
const Method* const m = ik->method_with_orig_idnum(orig_method_id_num);
assert(m != NULL, "invariant");
assert(m != nullptr, "invariant");
assert(m->orig_method_idnum() == orig_method_id_num, "invariant");
assert(!m->is_obsolete(), "invariant");
assert(ik == m->method_holder(), "invariant");
@ -44,7 +44,7 @@ static const Method* lookup_method(InstanceKlass* ik, int orig_method_id_num) {
}
const Method* JfrMethodLookup::lookup(const InstanceKlass* ik, traceid method_id) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
return lookup_method(const_cast<InstanceKlass*>(ik), method_id_num(method_id));
}
@ -53,7 +53,7 @@ int JfrMethodLookup::method_id_num(traceid method_id) {
}
traceid JfrMethodLookup::method_id(const Method* method) {
assert(method != NULL, "invariant");
assert(method != nullptr, "invariant");
return METHOD_ID(method->method_holder(), method);
}

View File

@ -67,7 +67,7 @@ inline int64_t estimate_tlab_size_bytes(Thread* thread) {
}
inline int64_t load_allocated_bytes(Thread* thread) {
assert(thread != NULL, "invariant");
assert(thread != nullptr, "invariant");
const int64_t allocated_bytes = thread->allocated_bytes();
if (allocated_bytes < _last_allocated_bytes) {
// A hw thread can detach and reattach to the VM, and when it does,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ JfrStackTraceMark::JfrStackTraceMark(Thread* t) : _t(t), _previous_id(0), _previ
tl->set_cached_stack_trace_id(JfrStackTraceRepository::record(t));
}
JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(NULL), _previous_id(0), _previous_hash(0) {
JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(nullptr), _previous_id(0), _previous_hash(0) {
if (JfrEventSetting::has_stacktrace(eventId)) {
_t = Thread::current();
JfrThreadLocal* const tl = _t->jfr_thread_local();
@ -59,7 +59,7 @@ JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId) : _t(NULL), _previous_i
}
}
JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId, Thread* t) : _t(NULL), _previous_id(0), _previous_hash(0) {
JfrStackTraceMark::JfrStackTraceMark(JfrEventId eventId, Thread* t) : _t(nullptr), _previous_id(0), _previous_hash(0) {
if (JfrEventSetting::has_stacktrace(eventId)) {
_t = t;
JfrThreadLocal* const tl = _t->jfr_thread_local();
@ -75,7 +75,7 @@ JfrStackTraceMark::~JfrStackTraceMark() {
if (_previous_id != 0) {
_t->jfr_thread_local()->set_cached_stack_trace_id(_previous_id, _previous_hash);
} else {
if (_t != NULL) {
if (_t != nullptr) {
_t->jfr_thread_local()->clear_cached_stack_trace();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,17 +46,17 @@ static uintptr_t string_hash(const char* str) {
return java_lang_String::hash_code(reinterpret_cast<const jbyte*>(str), static_cast<int>(strlen(str)));
}
static JfrSymbolTable::StringEntry* bootstrap = NULL;
static JfrSymbolTable::StringEntry* bootstrap = nullptr;
static JfrSymbolTable* _instance = NULL;
static JfrSymbolTable* _instance = nullptr;
static JfrSymbolTable& instance() {
assert(_instance != NULL, "invariant");
assert(_instance != nullptr, "invariant");
return *_instance;
}
JfrSymbolTable* JfrSymbolTable::create() {
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
assert_lock_strong(ClassLoaderDataGraph_lock);
_instance = new JfrSymbolTable();
return _instance;
@ -64,26 +64,26 @@ JfrSymbolTable* JfrSymbolTable::create() {
void JfrSymbolTable::destroy() {
assert_lock_strong(ClassLoaderDataGraph_lock);
if (_instance != NULL) {
if (_instance != nullptr) {
delete _instance;
_instance = NULL;
_instance = nullptr;
}
assert(_instance == NULL, "invariant");
assert(_instance == nullptr, "invariant");
}
JfrSymbolTable::JfrSymbolTable() :
_symbols(new Symbols(this)),
_strings(new Strings(this)),
_symbol_list(NULL),
_string_list(NULL),
_symbol_query(NULL),
_string_query(NULL),
_symbol_list(nullptr),
_string_list(nullptr),
_symbol_query(nullptr),
_string_query(nullptr),
_id_counter(1),
_class_unload(false) {
assert(_symbols != NULL, "invariant");
assert(_strings != NULL, "invariant");
assert(_symbols != nullptr, "invariant");
assert(_strings != nullptr, "invariant");
bootstrap = new StringEntry(0, (const char*)&BOOTSTRAP_LOADER_NAME);
assert(bootstrap != NULL, "invariant");
assert(bootstrap != nullptr, "invariant");
bootstrap->set_id(create_symbol_id(1));
_string_list = bootstrap;
}
@ -96,25 +96,25 @@ JfrSymbolTable::~JfrSymbolTable() {
}
void JfrSymbolTable::clear() {
assert(_symbols != NULL, "invariant");
assert(_symbols != nullptr, "invariant");
if (_symbols->has_entries()) {
_symbols->clear_entries();
}
assert(!_symbols->has_entries(), "invariant");
assert(_strings != NULL, "invariant");
assert(_strings != nullptr, "invariant");
if (_strings->has_entries()) {
_strings->clear_entries();
}
assert(!_strings->has_entries(), "invariant");
_symbol_list = NULL;
_symbol_list = nullptr;
_id_counter = 1;
_symbol_query = NULL;
_string_query = NULL;
_symbol_query = nullptr;
_string_query = nullptr;
assert(bootstrap != NULL, "invariant");
assert(bootstrap != nullptr, "invariant");
bootstrap->reset();
_string_list = bootstrap;
}
@ -131,7 +131,7 @@ void JfrSymbolTable::increment_checkpoint_id() {
template <typename T>
inline void JfrSymbolTable::assign_id(T* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->id() == 0, "invariant");
entry->set_id(create_symbol_id(++_id_counter));
}
@ -144,22 +144,22 @@ void JfrSymbolTable::on_link(const SymbolEntry* entry) {
}
bool JfrSymbolTable::on_equals(uintptr_t hash, const SymbolEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->hash() == hash, "invariant");
assert(_symbol_query != NULL, "invariant");
assert(_symbol_query != nullptr, "invariant");
return _symbol_query == entry->literal();
}
void JfrSymbolTable::on_unlink(const SymbolEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
const_cast<Symbol*>(entry->literal())->decrement_refcount();
}
static const char* resource_to_c_heap_string(const char* resource_str) {
assert(resource_str != NULL, "invariant");
assert(resource_str != nullptr, "invariant");
const size_t length = strlen(resource_str);
char* const c_string = JfrCHeapObj::new_array<char>(length + 1);
assert(c_string != NULL, "invariant");
assert(c_string != nullptr, "invariant");
strncpy(c_string, resource_str, length + 1);
return c_string;
}
@ -172,26 +172,26 @@ void JfrSymbolTable::on_link(const StringEntry* entry) {
}
static bool string_compare(const char* query, const char* candidate) {
assert(query != NULL, "invariant");
assert(candidate != NULL, "invariant");
assert(query != nullptr, "invariant");
assert(candidate != nullptr, "invariant");
const size_t length = strlen(query);
return strncmp(query, candidate, length) == 0;
}
bool JfrSymbolTable::on_equals(uintptr_t hash, const StringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
assert(entry->hash() == hash, "invariant");
assert(_string_query != NULL, "invariant");
assert(_string_query != nullptr, "invariant");
return string_compare(_string_query, entry->literal());
}
void JfrSymbolTable::on_unlink(const StringEntry* entry) {
assert(entry != NULL, "invariant");
assert(entry != nullptr, "invariant");
JfrCHeapObj::free(const_cast<char*>(entry->literal()), strlen(entry->literal() + 1));
}
traceid JfrSymbolTable::bootstrap_name(bool leakp) {
assert(bootstrap != NULL, "invariant");
assert(bootstrap != nullptr, "invariant");
if (leakp) {
bootstrap->set_leakp();
}
@ -199,13 +199,13 @@ traceid JfrSymbolTable::bootstrap_name(bool leakp) {
}
traceid JfrSymbolTable::mark(const Symbol* sym, bool leakp /* false */) {
assert(sym != NULL, "invariant");
assert(sym != nullptr, "invariant");
return mark((uintptr_t)sym->identity_hash(), sym, leakp);
}
traceid JfrSymbolTable::mark(uintptr_t hash, const Symbol* sym, bool leakp) {
assert(sym != NULL, "invariant");
assert(_symbols != NULL, "invariant");
assert(sym != nullptr, "invariant");
assert(_symbols != nullptr, "invariant");
_symbol_query = sym;
const SymbolEntry& entry = _symbols->lookup_put(hash, sym);
if (_class_unload) {
@ -222,8 +222,8 @@ traceid JfrSymbolTable::mark(const char* str, bool leakp /* false*/) {
}
traceid JfrSymbolTable::mark(uintptr_t hash, const char* str, bool leakp) {
assert(str != NULL, "invariant");
assert(_strings != NULL, "invariant");
assert(str != nullptr, "invariant");
assert(_strings != nullptr, "invariant");
_string_query = str;
const StringEntry& entry = _strings->lookup_put(hash, str);
if (_class_unload) {
@ -244,20 +244,20 @@ traceid JfrSymbolTable::mark(uintptr_t hash, const char* str, bool leakp) {
*/
uintptr_t JfrSymbolTable::hidden_klass_name_hash(const InstanceKlass* ik) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(ik->is_hidden(), "invariant");
const oop mirror = ik->java_mirror_no_keepalive();
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
return (uintptr_t)mirror->identity_hash();
}
static const char* create_hidden_klass_symbol(const InstanceKlass* ik, uintptr_t hash) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(ik->is_hidden(), "invariant");
assert(hash != 0, "invariant");
char* hidden_symbol = NULL;
char* hidden_symbol = nullptr;
const oop mirror = ik->java_mirror_no_keepalive();
assert(mirror != NULL, "invariant");
assert(mirror != nullptr, "invariant");
char hash_buf[40];
os::snprintf_checked(hash_buf, sizeof(hash_buf), "/" UINTX_FORMAT, hash);
const size_t hash_len = strlen(hash_buf);
@ -271,12 +271,12 @@ static const char* create_hidden_klass_symbol(const InstanceKlass* ik, uintptr_t
}
bool JfrSymbolTable::is_hidden_klass(const Klass* k) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
return k->is_instance_klass() && ((const InstanceKlass*)k)->is_hidden();
}
traceid JfrSymbolTable::mark_hidden_klass_name(const InstanceKlass* ik, bool leakp) {
assert(ik != NULL, "invariant");
assert(ik != nullptr, "invariant");
assert(ik->is_hidden(), "invariant");
const uintptr_t hash = hidden_klass_name_hash(ik);
const char* const hidden_symbol = create_hidden_klass_symbol(ik, hash);
@ -284,14 +284,14 @@ traceid JfrSymbolTable::mark_hidden_klass_name(const InstanceKlass* ik, bool lea
}
traceid JfrSymbolTable::mark(const Klass* k, bool leakp) {
assert(k != NULL, "invariant");
assert(k != nullptr, "invariant");
traceid symbol_id = 0;
if (is_hidden_klass(k)) {
assert(k->is_instance_klass(), "invariant");
symbol_id = mark_hidden_klass_name((const InstanceKlass*)k, leakp);
} else {
Symbol* const sym = k->name();
if (sym != NULL) {
if (sym != nullptr) {
symbol_id = mark(sym, leakp);
}
}
@ -301,8 +301,8 @@ traceid JfrSymbolTable::mark(const Klass* k, bool leakp) {
template <typename T>
traceid JfrSymbolTable::add_impl(const T* sym) {
assert(sym != NULL, "invariant");
assert(_instance != NULL, "invariant");
assert(sym != nullptr, "invariant");
assert(_instance != nullptr, "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
return instance().mark(sym);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,10 +32,10 @@ template <typename T, typename IdType>
class ListEntry : public JfrHashtableEntry<T, IdType> {
public:
ListEntry(uintptr_t hash, const T& data) : JfrHashtableEntry<T, IdType>(hash, data),
_list_next(NULL), _serialized(false), _unloading(false), _leakp(false) {}
_list_next(nullptr), _serialized(false), _unloading(false), _leakp(false) {}
const ListEntry<T, IdType>* list_next() const { return _list_next; }
void reset() const {
_list_next = NULL; _serialized = false; _unloading = false; _leakp = false;
_list_next = nullptr; _serialized = false; _unloading = false; _leakp = false;
}
void set_list_next(const ListEntry<T, IdType>* next) const { _list_next = next; }
bool is_serialized() const { return _serialized; }
@ -103,8 +103,8 @@ class JfrSymbolTable : public JfrCHeapObj {
traceid bootstrap_name(bool leakp);
bool has_entries() const { return has_symbol_entries() || has_string_entries(); }
bool has_symbol_entries() const { return _symbol_list != NULL; }
bool has_string_entries() const { return _string_list != NULL; }
bool has_symbol_entries() const { return _symbol_list != nullptr; }
bool has_string_entries() const { return _string_list != nullptr; }
traceid mark_hidden_klass_name(const InstanceKlass* k, bool leakp);
bool is_hidden_klass(const Klass* k);
@ -137,7 +137,7 @@ class JfrSymbolTable : public JfrCHeapObj {
template <typename Functor, typename T>
void iterate(Functor& functor, const T* list) {
const T* symbol = list;
while (symbol != NULL) {
while (symbol != nullptr) {
const T* next = symbol->list_next();
functor(symbol);
symbol = next;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,15 +47,15 @@
#include "utilities/sizes.hpp"
JfrThreadLocal::JfrThreadLocal() :
_java_event_writer(NULL),
_java_buffer(NULL),
_native_buffer(NULL),
_shelved_buffer(NULL),
_load_barrier_buffer_epoch_0(NULL),
_load_barrier_buffer_epoch_1(NULL),
_checkpoint_buffer_epoch_0(NULL),
_checkpoint_buffer_epoch_1(NULL),
_stackframes(NULL),
_java_event_writer(nullptr),
_java_buffer(nullptr),
_native_buffer(nullptr),
_shelved_buffer(nullptr),
_load_barrier_buffer_epoch_0(nullptr),
_load_barrier_buffer_epoch_1(nullptr),
_checkpoint_buffer_epoch_0(nullptr),
_checkpoint_buffer_epoch_1(nullptr),
_stackframes(nullptr),
_dcmd_arena(nullptr),
_thread(),
_vthread_id(0),
@ -77,7 +77,7 @@ JfrThreadLocal::JfrThreadLocal() :
_vthread(false),
_dead(false) {
Thread* thread = Thread::current_or_null();
_parent_trace_id = thread != NULL ? jvm_thread_id(thread) : (traceid)0;
_parent_trace_id = thread != nullptr ? jvm_thread_id(thread) : (traceid)0;
}
u8 JfrThreadLocal::add_data_lost(u8 value) {
@ -99,7 +99,7 @@ const JfrBlobHandle& JfrThreadLocal::thread_blob() const {
}
static void send_java_thread_start_event(JavaThread* jt) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(Thread::current() == jt, "invariant");
if (!JfrJavaSupport::on_thread_start(jt)) {
// thread is excluded
@ -146,35 +146,35 @@ void JfrThreadLocal::release(Thread* t) {
if (has_java_event_writer()) {
assert(t->is_Java_thread(), "invariant");
JfrJavaSupport::destroy_global_jni_handle(java_event_writer());
_java_event_writer = NULL;
_java_event_writer = nullptr;
}
if (has_native_buffer()) {
JfrStorage::release_thread_local(native_buffer(), t);
_native_buffer = NULL;
_native_buffer = nullptr;
}
if (has_java_buffer()) {
JfrStorage::release_thread_local(java_buffer(), t);
_java_buffer = NULL;
_java_buffer = nullptr;
}
if (_stackframes != NULL) {
if (_stackframes != nullptr) {
FREE_C_HEAP_ARRAY(JfrStackFrame, _stackframes);
_stackframes = NULL;
_stackframes = nullptr;
}
if (_load_barrier_buffer_epoch_0 != NULL) {
if (_load_barrier_buffer_epoch_0 != nullptr) {
_load_barrier_buffer_epoch_0->set_retired();
_load_barrier_buffer_epoch_0 = NULL;
_load_barrier_buffer_epoch_0 = nullptr;
}
if (_load_barrier_buffer_epoch_1 != NULL) {
if (_load_barrier_buffer_epoch_1 != nullptr) {
_load_barrier_buffer_epoch_1->set_retired();
_load_barrier_buffer_epoch_1 = NULL;
_load_barrier_buffer_epoch_1 = nullptr;
}
if (_checkpoint_buffer_epoch_0 != NULL) {
if (_checkpoint_buffer_epoch_0 != nullptr) {
_checkpoint_buffer_epoch_0->set_retired();
_checkpoint_buffer_epoch_0 = NULL;
_checkpoint_buffer_epoch_0 = nullptr;
}
if (_checkpoint_buffer_epoch_1 != NULL) {
if (_checkpoint_buffer_epoch_1 != nullptr) {
_checkpoint_buffer_epoch_1->set_retired();
_checkpoint_buffer_epoch_1 = NULL;
_checkpoint_buffer_epoch_1 = nullptr;
}
if (_dcmd_arena != nullptr) {
delete _dcmd_arena;
@ -183,17 +183,17 @@ void JfrThreadLocal::release(Thread* t) {
}
void JfrThreadLocal::release(JfrThreadLocal* tl, Thread* t) {
assert(tl != NULL, "invariant");
assert(t != NULL, "invariant");
assert(tl != nullptr, "invariant");
assert(t != nullptr, "invariant");
assert(Thread::current() == t, "invariant");
assert(!tl->is_dead(), "invariant");
assert(tl->shelved_buffer() == NULL, "invariant");
assert(tl->shelved_buffer() == nullptr, "invariant");
tl->_dead = true;
tl->release(t);
}
static void send_java_thread_end_event(JavaThread* jt, traceid tid) {
assert(jt != NULL, "invariant");
assert(jt != nullptr, "invariant");
assert(Thread::current() == jt, "invariant");
assert(tid != 0, "invariant");
if (JfrRecorder::is_recording()) {
@ -205,7 +205,7 @@ static void send_java_thread_end_event(JavaThread* jt, traceid tid) {
}
void JfrThreadLocal::on_exit(Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
JfrThreadLocal * const tl = t->jfr_thread_local();
assert(!tl->is_dead(), "invariant");
if (JfrRecorder::is_recording()) {
@ -237,7 +237,7 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const {
}
JfrStackFrame* JfrThreadLocal::install_stackframes() const {
assert(_stackframes == NULL, "invariant");
assert(_stackframes == nullptr, "invariant");
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
return _stackframes;
}
@ -327,14 +327,14 @@ bool JfrThreadLocal::is_impersonating(const Thread* t) {
}
void JfrThreadLocal::impersonate(const Thread* t, traceid other_thread_id) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
assert(other_thread_id != 0, "invariant");
JfrThreadLocal* const tl = t->jfr_thread_local();
tl->_thread_id_alias = other_thread_id;
}
void JfrThreadLocal::stop_impersonating(const Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
JfrThreadLocal* const tl = t->jfr_thread_local();
if (is_impersonating(t)) {
tl->_thread_id_alias = max_julong;
@ -380,7 +380,7 @@ u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) {
}
traceid JfrThreadLocal::thread_id(const Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
if (is_impersonating(t)) {
return t->jfr_thread_local()->_thread_id_alias;
}
@ -404,7 +404,7 @@ traceid JfrThreadLocal::thread_id(const Thread* t) {
// When not recording, there is no checkpoint system
// in place for writing vthread information.
traceid JfrThreadLocal::external_thread_id(const Thread* t) {
assert(t != NULL, "invariant");
assert(t != nullptr, "invariant");
return JfrRecorder::is_recording() ? thread_id(t) : jvm_thread_id(t);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -92,11 +92,11 @@ class JfrThreadLocal {
JfrThreadLocal();
JfrBuffer* native_buffer() const {
return _native_buffer != NULL ? _native_buffer : install_native_buffer();
return _native_buffer != nullptr ? _native_buffer : install_native_buffer();
}
bool has_native_buffer() const {
return _native_buffer != NULL;
return _native_buffer != nullptr;
}
void set_native_buffer(JfrBuffer* buffer) {
@ -104,11 +104,11 @@ class JfrThreadLocal {
}
JfrBuffer* java_buffer() const {
return _java_buffer != NULL ? _java_buffer : install_java_buffer();
return _java_buffer != nullptr ? _java_buffer : install_java_buffer();
}
bool has_java_buffer() const {
return _java_buffer != NULL;
return _java_buffer != nullptr;
}
void set_java_buffer(JfrBuffer* buffer) {
@ -124,7 +124,7 @@ class JfrThreadLocal {
}
bool has_java_event_writer() const {
return _java_event_writer != NULL;
return _java_event_writer != nullptr;
}
jobject java_event_writer() {
@ -136,7 +136,7 @@ class JfrThreadLocal {
}
JfrStackFrame* stackframes() const {
return _stackframes != NULL ? _stackframes : install_stackframes();
return _stackframes != nullptr ? _stackframes : install_stackframes();
}
void set_stackframes(JfrStackFrame* frames) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ static void hook_memory_deallocation(size_t dealloc_size) {
#endif // ASSERT
static void hook_memory_allocation(const char* allocation, size_t alloc_size) {
if (NULL == allocation) {
if (nullptr == allocation) {
if (!JfrRecorder::is_created()) {
log_warning(jfr, system)("Memory allocation failed for size [" SIZE_FORMAT "] bytes", alloc_size);
return;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,7 @@ inline u8 JfrBigEndian::read_bytes(const address location) {
template <typename T>
inline T JfrBigEndian::read_unaligned(const address location) {
assert(location != NULL, "just checking");
assert(location != nullptr, "just checking");
switch (sizeof(T)) {
case sizeof(u1) :
return read_bytes<u1>(location);
@ -113,7 +113,7 @@ inline bool JfrBigEndian::platform_supports_unaligned_reads(void) {
template<typename T>
inline T JfrBigEndian::read(const void* location) {
assert(location != NULL, "just checking");
assert(location != nullptr, "just checking");
assert(sizeof(T) <= sizeof(u8), "no support for arbitrary sizes");
if (sizeof(T) == sizeof(u1)) {
return *(T*)location;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@ JfrBlob::JfrBlob(const u1* checkpoint, size_t size) :
_next(),
_size(size),
_written(false) {
assert(_data != NULL, "invariant");
assert(_data != nullptr, "invariant");
memcpy(const_cast<u1*>(_data), checkpoint, size);
}
@ -62,6 +62,6 @@ void JfrBlob::set_next(const JfrBlobHandle& ref) {
JfrBlobHandle JfrBlob::make(const u1* data, size_t size) {
const JfrBlob* const blob = new JfrBlob(data, size);
assert(blob != NULL, "invariant");
assert(blob != nullptr, "invariant");
return JfrBlobReference::make(blob);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -89,7 +89,7 @@ class JfrConcurrentLinkedListHost : public AllocPolicy {
bool initialize();
void insert_head(NodePtr node, NodePtr head, ConstNodePtr tail) const;
void insert_tail(NodePtr node, NodePtr head, NodePtr last, ConstNodePtr tail) const;
NodePtr remove(NodePtr head, ConstNodePtr tail, NodePtr last = NULL, bool insert_is_head = true);
NodePtr remove(NodePtr head, ConstNodePtr tail, NodePtr last = nullptr, bool insert_is_head = true);
template <typename Callback>
void iterate(NodePtr head, ConstNodePtr tail, Callback& cb);
bool in_list(ConstNodePtr node, NodePtr head, ConstNodePtr tail) const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,11 +38,11 @@
*/
template <typename Node>
inline Node* mark_for_removal(Node* node) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
const Node* next = node->_next;
assert(next != NULL, "invariant");
assert(next != nullptr, "invariant");
Node* const unmasked_next = unmask(next);
return next == unmasked_next && cas(&node->_next, unmasked_next, set_excision_bit(unmasked_next)) ? unmasked_next : NULL;
return next == unmasked_next && cas(&node->_next, unmasked_next, set_excision_bit(unmasked_next)) ? unmasked_next : nullptr;
}
/*
@ -51,7 +51,7 @@ inline Node* mark_for_removal(Node* node) {
*/
template <typename Node>
inline bool mark_for_insertion(Node* node, const Node* tail) {
assert(node != NULL, "invariant");
assert(node != nullptr, "invariant");
return node->_next == tail && cas(&node->_next, const_cast<Node*>(tail), set_insertion_bit(tail));
}
@ -60,16 +60,16 @@ inline bool mark_for_insertion(Node* node, const Node* tail) {
*/
template <typename Node, typename VersionHandle, template <typename> class SearchPolicy>
Node* find_adjacent(Node* head, const Node* tail, Node** predecessor, VersionHandle& version_handle, SearchPolicy<Node>& predicate) {
assert(head != NULL, "invariant");
assert(tail != NULL, "invariant");
assert(head != nullptr, "invariant");
assert(tail != nullptr, "invariant");
assert(head != tail, "invariant");
Node* predecessor_next = NULL;
Node* predecessor_next = nullptr;
while (true) {
Node* current = head;
version_handle->checkout();
Node* next = Atomic::load_acquire(&current->_next);
do {
assert(next != NULL, "invariant");
assert(next != nullptr, "invariant");
Node* const unmasked_next = unmask(next);
// 1A: Locate the first node to keep as predecessor.
if (!is_marked_for_removal(next)) {
@ -133,10 +133,10 @@ void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::insert_tail
typename Client::Node* head,
typename Client::Node* last,
const typename Client::Node* tail) const {
assert(node != NULL, "invariant");
assert(head != NULL, "invariant");
assert(last != NULL, "invarinat");
assert(tail != NULL, "invariant");
assert(node != nullptr, "invariant");
assert(head != nullptr, "invariant");
assert(last != nullptr, "invarinat");
assert(tail != nullptr, "invariant");
// Mark the new node to be inserted with the insertion marker already.
node->_next = set_insertion_bit(const_cast<NodePtr>(tail));
// Invariant: [node]--> tail
@ -188,10 +188,10 @@ void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::insert_tail
template <typename Client, template <typename> class SearchPolicy, typename AllocPolicy>
typename Client::Node* JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::remove(typename Client::Node* head,
const typename Client::Node* tail,
typename Client::Node* last /* NULL */,
typename Client::Node* last /* nullptr */,
bool insert_is_head /* true */) {
assert(head != NULL, "invariant");
assert(tail != NULL, "invariant");
assert(head != nullptr, "invariant");
assert(tail != nullptr, "invariant");
assert(head != tail, "invariant");
NodePtr predecessor;
NodePtr successor;
@ -202,14 +202,14 @@ typename Client::Node* JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPo
// Find an adjacent predecessor and successor node pair.
successor = find_adjacent<Node, VersionHandle, SearchPolicy>(head, tail, &predecessor, version_handle, predicate);
if (successor == tail) {
return NULL;
return nullptr;
}
// Invariant: predecessor --> successor
// Invariant (optional: key-based total order): predecessor->key() < key && key <= successor->key()
// It is the successor node that is to be removed.
// We first attempt to reserve (logically excise) the successor node.
successor_next = mark_for_removal(successor);
if (successor_next != NULL) {
if (successor_next != nullptr) {
break;
}
}
@ -225,7 +225,7 @@ typename Client::Node* JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPo
Identity<Node> excise(successor);
find_adjacent<Node, VersionHandle, Identity>(head, tail, &predecessor, version_handle, excise);
}
if (last != NULL && Atomic::load_acquire(&last->_next) == successor) {
if (last != nullptr && Atomic::load_acquire(&last->_next) == successor) {
guarantee(!insert_is_head, "invariant");
guarantee(successor_next == tail, "invariant");
LastNode<Node> excise;
@ -243,8 +243,8 @@ template <typename Client, template <typename> class SearchPolicy, typename Allo
bool JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::in_list(const typename Client::Node* node,
typename Client::Node* head,
const typename Client::Node* tail) const {
assert(head != NULL, "invariant");
assert(tail != NULL, "invariant");
assert(head != nullptr, "invariant");
assert(tail != nullptr, "invariant");
assert(head != tail, "invariant");
VersionHandle version_handle = _client->get_version_handle();
const Node* current = head;
@ -268,8 +268,8 @@ template <typename Callback>
inline void JfrConcurrentLinkedListHost<Client, SearchPolicy, AllocPolicy>::iterate(typename Client::Node* head,
const typename Client::Node* tail,
Callback& cb) {
assert(head != NULL, "invariant");
assert(tail != NULL, "invariant");
assert(head != nullptr, "invariant");
assert(tail != nullptr, "invariant");
assert(head != tail, "invariant");
VersionHandle version_handle = _client->get_version_handle();
NodePtr current = head;

Some files were not shown because too many files have changed in this diff Show More