8283710: JVMTI: Use BitSet for object marking

Reviewed-by: stuefe, coleenp
This commit is contained in:
Roman Kennke 2022-04-11 14:50:29 +00:00
parent 7edd186121
commit abfd2f98dc
10 changed files with 194 additions and 229 deletions

View File

@ -22,12 +22,12 @@
*
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "logging/log.hpp"
@ -37,7 +37,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/align.hpp"
BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits) :
BFSClosure::BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, JFRBitSet* mark_bits) :
_edge_queue(edge_queue),
_edge_store(edge_store),
_mark_bits(mark_bits),

View File

@ -25,10 +25,10 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/iterator.hpp"
class BitSet;
class Edge;
class EdgeStore;
class EdgeQueue;
@ -38,7 +38,7 @@ class BFSClosure : public BasicOopIterateClosure {
private:
EdgeQueue* _edge_queue;
EdgeStore* _edge_store;
BitSet* _mark_bits;
JFRBitSet* _mark_bits;
const Edge* _current_parent;
mutable size_t _current_frontier_level;
mutable size_t _next_frontier_idx;
@ -65,7 +65,7 @@ class BFSClosure : public BasicOopIterateClosure {
public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; }
BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, JFRBitSet* mark_bits);
void process();
void do_root(UnifiedOopRef ref);

View File

@ -23,10 +23,10 @@
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
@ -40,7 +40,7 @@
UnifiedOopRef DFSClosure::_reference_stack[max_dfs_depth];
void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
BitSet* mark_bits,
JFRBitSet* mark_bits,
const Edge* start_edge) {
assert(edge_store != NULL, "invariant");
assert(mark_bits != NULL," invariant");
@ -52,7 +52,7 @@ void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
}
void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
BitSet* mark_bits) {
JFRBitSet* mark_bits) {
assert(edge_store != NULL, "invariant");
assert(mark_bits != NULL, "invariant");
@ -68,7 +68,7 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
rs.process();
}
DFSClosure::DFSClosure(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge)
DFSClosure::DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge)
:_edge_store(edge_store), _mark_bits(mark_bits), _start_edge(start_edge),
_max_depth(max_dfs_depth), _depth(0), _ignore_root_set(false) {
}

View File

@ -25,10 +25,10 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/iterator.hpp"
class BitSet;
class Edge;
class EdgeStore;
class EdgeQueue;
@ -41,13 +41,13 @@ class DFSClosure : public BasicOopIterateClosure {
static UnifiedOopRef _reference_stack[max_dfs_depth];
EdgeStore* _edge_store;
BitSet* _mark_bits;
JFRBitSet* _mark_bits;
const Edge*_start_edge;
size_t _max_depth;
size_t _depth;
bool _ignore_root_set;
DFSClosure(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
DFSClosure(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge);
void add_chain();
void closure_impl(UnifiedOopRef reference, const oop pointee);
@ -55,8 +55,8 @@ class DFSClosure : public BasicOopIterateClosure {
public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; }
static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
static void find_leaks_from_edge(EdgeStore* edge_store, JFRBitSet* mark_bits, const Edge* start_edge);
static void find_leaks_from_root_set(EdgeStore* edge_store, JFRBitSet* mark_bits);
void do_root(UnifiedOopRef ref);
virtual void do_oop(oop* ref);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -21,37 +21,13 @@
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
BitSet::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) :
_bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, mtTracing, true /* clear */),
_next(next) {
}
#ifndef SHARE_JFR_LEAKPROFILER_JFRBITMAP_HPP
#define SHARE_JFR_LEAKPROFILER_JFRBITMAP_HPP
BitSet::BitMapFragmentTable::~BitMapFragmentTable() {
for (int index = 0; index < table_size(); index ++) {
Entry* e = bucket(index);
while (e != nullptr) {
Entry* tmp = e;
e = e->next();
free_entry(tmp);
}
}
}
#include "memory/allocation.hpp"
#include "utilities/objectBitSet.inline.hpp"
BitSet::BitSet() :
_bitmap_fragments(32),
_fragment_list(NULL),
_last_fragment_bits(NULL),
_last_fragment_granule(UINTPTR_MAX) {
}
typedef ObjectBitSet<mtTracing> JFRBitSet;
BitSet::~BitSet() {
BitMapFragment* current = _fragment_list;
while (current != NULL) {
BitMapFragment* next = current->next();
delete current;
current = next;
}
}
#endif // SHARE_JFR_LEAKPROFILER_JFRBITMAP_HPP

View File

@ -27,11 +27,11 @@
#include "gc/shared/gc_globals.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/jfrbitset.hpp"
#include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
@ -84,7 +84,7 @@ void PathToGcRootsOperation::doit() {
assert(_cutoff_ticks > 0, "invariant");
// The bitset used for marking is dimensioned as a function of the heap size
BitSet mark_bits;
JFRBitSet mark_bits;
// The edge queue is dimensioned as a fraction of the heap size
const size_t edge_queue_reservation_size = edge_queue_memory_reservation();

View File

@ -65,8 +65,11 @@
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vmOperations.hpp"
#include "utilities/objectBitSet.inline.hpp"
#include "utilities/macros.hpp"
typedef ObjectBitSet<mtServiceability> JVMTIBitSet;
bool JvmtiTagMap::_has_object_free_events = false;
// create a JvmtiTagMap
@ -1331,134 +1334,6 @@ jvmtiError JvmtiTagMap::get_objects_with_tags(const jlong* tags,
return collector.result(count_ptr, object_result_ptr, tag_result_ptr);
}
// ObjectMarker is used to support the marking objects when walking the
// heap.
//
// This implementation uses the existing mark bits in an object for
// marking. Objects that are marked must later have their headers restored.
// As most objects are unlocked and don't have their identity hash computed
// we don't have to save their headers. Instead we save the headers that
// are "interesting". Later when the headers are restored this implementation
// restores all headers to their initial value and then restores the few
// objects that had interesting headers.
//
// Future work: This implementation currently uses growable arrays to save
// the oop and header of interesting objects. As an optimization we could
// use the same technique as the GC and make use of the unused area
// between top() and end().
//
// An ObjectClosure used to restore the mark bits of an object
class RestoreMarksClosure : public ObjectClosure {
public:
void do_object(oop o) {
if (o != NULL) {
markWord mark = o->mark();
if (mark.is_marked()) {
o->init_mark();
}
}
}
};
// ObjectMarker provides the mark and visited functions
class ObjectMarker : AllStatic {
private:
// saved headers
static GrowableArray<oop>* _saved_oop_stack;
static GrowableArray<markWord>* _saved_mark_stack;
static bool _needs_reset; // do we need to reset mark bits?
public:
static void init(); // initialize
static void done(); // clean-up
static inline void mark(oop o); // mark an object
static inline bool visited(oop o); // check if object has been visited
static inline bool needs_reset() { return _needs_reset; }
static inline void set_needs_reset(bool v) { _needs_reset = v; }
};
GrowableArray<oop>* ObjectMarker::_saved_oop_stack = NULL;
GrowableArray<markWord>* ObjectMarker::_saved_mark_stack = NULL;
bool ObjectMarker::_needs_reset = true; // need to reset mark bits by default
// initialize ObjectMarker - prepares for object marking
void ObjectMarker::init() {
assert(Thread::current()->is_VM_thread(), "must be VMThread");
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
// prepare heap for iteration
Universe::heap()->ensure_parsability(false); // no need to retire TLABs
// create stacks for interesting headers
_saved_mark_stack = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<markWord>(4000, mtServiceability);
_saved_oop_stack = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(4000, mtServiceability);
}
// Object marking is done so restore object headers
void ObjectMarker::done() {
// iterate over all objects and restore the mark bits to
// their initial value
RestoreMarksClosure blk;
if (needs_reset()) {
Universe::heap()->object_iterate(&blk);
} else {
// We don't need to reset mark bits on this call, but reset the
// flag to the default for the next call.
set_needs_reset(true);
}
// now restore the interesting headers
for (int i = 0; i < _saved_oop_stack->length(); i++) {
oop o = _saved_oop_stack->at(i);
markWord mark = _saved_mark_stack->at(i);
o->set_mark(mark);
}
// free the stacks
delete _saved_oop_stack;
delete _saved_mark_stack;
}
// mark an object
inline void ObjectMarker::mark(oop o) {
assert(Universe::heap()->is_in(o), "sanity check");
assert(!o->mark().is_marked(), "should only mark an object once");
// object's mark word
markWord mark = o->mark();
if (o->mark_must_be_preserved(mark)) {
_saved_mark_stack->push(mark);
_saved_oop_stack->push(o);
}
// mark the object
o->set_mark(markWord::prototype().set_marked());
}
// return true if object is marked
inline bool ObjectMarker::visited(oop o) {
return o->mark().is_marked();
}
// Stack allocated class to help ensure that ObjectMarker is used
// correctly. Constructor initializes ObjectMarker, destructor calls
// ObjectMarker's done() function to restore object headers.
class ObjectMarkerController : public StackObj {
public:
ObjectMarkerController() {
ObjectMarker::init();
}
~ObjectMarkerController() {
ObjectMarker::done();
}
};
// helper to map a jvmtiHeapReferenceKind to an old style jvmtiHeapRootKind
// (not performance critical as only used for roots)
static jvmtiHeapRootKind toJvmtiHeapRootKind(jvmtiHeapReferenceKind kind) {
@ -1591,6 +1466,7 @@ class CallbackInvoker : AllStatic {
static JvmtiTagMap* _tag_map;
static const void* _user_data;
static GrowableArray<oop>* _visit_stack;
static JVMTIBitSet* _bitset;
// accessors
static JvmtiTagMap* tag_map() { return _tag_map; }
@ -1600,7 +1476,7 @@ class CallbackInvoker : AllStatic {
// if the object hasn't been visited then push it onto the visit stack
// so that it will be visited later
static inline bool check_for_visit(oop obj) {
if (!ObjectMarker::visited(obj)) visit_stack()->push(obj);
if (!_bitset->is_marked(obj)) visit_stack()->push(obj);
return true;
}
@ -1631,13 +1507,15 @@ class CallbackInvoker : AllStatic {
static void initialize_for_basic_heap_walk(JvmtiTagMap* tag_map,
GrowableArray<oop>* visit_stack,
const void* user_data,
BasicHeapWalkContext context);
BasicHeapWalkContext context,
JVMTIBitSet* bitset);
// initialize for advanced mode
static void initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map,
GrowableArray<oop>* visit_stack,
const void* user_data,
AdvancedHeapWalkContext context);
AdvancedHeapWalkContext context,
JVMTIBitSet* bitset);
// functions to report roots
static inline bool report_simple_root(jvmtiHeapReferenceKind kind, oop o);
@ -1670,31 +1548,36 @@ AdvancedHeapWalkContext CallbackInvoker::_advanced_context;
JvmtiTagMap* CallbackInvoker::_tag_map;
const void* CallbackInvoker::_user_data;
GrowableArray<oop>* CallbackInvoker::_visit_stack;
JVMTIBitSet* CallbackInvoker::_bitset;
// initialize for basic heap walk (IterateOverReachableObjects et al)
void CallbackInvoker::initialize_for_basic_heap_walk(JvmtiTagMap* tag_map,
GrowableArray<oop>* visit_stack,
const void* user_data,
BasicHeapWalkContext context) {
BasicHeapWalkContext context,
JVMTIBitSet* bitset) {
_tag_map = tag_map;
_visit_stack = visit_stack;
_user_data = user_data;
_basic_context = context;
_advanced_context.invalidate(); // will trigger assertion if used
_heap_walk_type = basic;
_bitset = bitset;
}
// initialize for advanced heap walk (FollowReferences)
void CallbackInvoker::initialize_for_advanced_heap_walk(JvmtiTagMap* tag_map,
GrowableArray<oop>* visit_stack,
const void* user_data,
AdvancedHeapWalkContext context) {
AdvancedHeapWalkContext context,
JVMTIBitSet* bitset) {
_tag_map = tag_map;
_visit_stack = visit_stack;
_user_data = user_data;
_advanced_context = context;
_basic_context.invalidate(); // will trigger assertion if used
_heap_walk_type = advanced;
_bitset = bitset;
}
@ -2366,6 +2249,8 @@ class VM_HeapWalkOperation: public VM_Operation {
Handle _initial_object;
GrowableArray<oop>* _visit_stack; // the visit stack
JVMTIBitSet _bitset;
bool _following_object_refs; // are we following object references
bool _reporting_primitive_fields; // optional reporting
@ -2434,8 +2319,7 @@ VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
_reporting_string_values = false;
_visit_stack = create_visit_stack();
CallbackInvoker::initialize_for_basic_heap_walk(tag_map, _visit_stack, user_data, callbacks);
CallbackInvoker::initialize_for_basic_heap_walk(tag_map, _visit_stack, user_data, callbacks, &_bitset);
}
VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
@ -2450,8 +2334,7 @@ VM_HeapWalkOperation::VM_HeapWalkOperation(JvmtiTagMap* tag_map,
_reporting_primitive_array_values = (callbacks.array_primitive_value_callback() != NULL);;
_reporting_string_values = (callbacks.string_primitive_value_callback() != NULL);;
_visit_stack = create_visit_stack();
CallbackInvoker::initialize_for_advanced_heap_walk(tag_map, _visit_stack, user_data, callbacks);
CallbackInvoker::initialize_for_advanced_heap_walk(tag_map, _visit_stack, user_data, callbacks, &_bitset);
}
VM_HeapWalkOperation::~VM_HeapWalkOperation() {
@ -2887,8 +2770,8 @@ inline bool VM_HeapWalkOperation::collect_stack_roots() {
//
bool VM_HeapWalkOperation::visit(oop o) {
// mark object as visited
assert(!ObjectMarker::visited(o), "can't visit same object more than once");
ObjectMarker::mark(o);
assert(!_bitset.is_marked(o), "can't visit same object more than once");
_bitset.mark_obj(o);
// instance
if (o->is_instance()) {
@ -2917,7 +2800,6 @@ bool VM_HeapWalkOperation::visit(oop o) {
void VM_HeapWalkOperation::doit() {
ResourceMark rm;
ObjectMarkerController marker;
ClassFieldMapCacheMark cm;
JvmtiTagMap::check_hashmaps_for_heapwalk();
@ -2926,20 +2808,11 @@ void VM_HeapWalkOperation::doit() {
// the heap walk starts with an initial object or the heap roots
if (initial_object().is_null()) {
// If either collect_stack_roots() or collect_simple_roots()
// returns false at this point, then there are no mark bits
// to reset.
ObjectMarker::set_needs_reset(false);
// Calling collect_stack_roots() before collect_simple_roots()
// can result in a big performance boost for an agent that is
// focused on analyzing references in the thread stacks.
if (!collect_stack_roots()) return;
if (!collect_simple_roots()) return;
// no early return so enable heap traversal to reset the mark bits
ObjectMarker::set_needs_reset(true);
} else {
visit_stack()->push(initial_object()());
}
@ -2951,7 +2824,7 @@ void VM_HeapWalkOperation::doit() {
// visited or the callback asked to terminate the iteration.
while (!visit_stack()->is_empty()) {
oop o = visit_stack()->pop();
if (!ObjectMarker::visited(o)) {
if (!_bitset.is_marked(o)) {
if (!visit(o)) {
break;
}

View File

@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
#ifndef SHARE_UTILITIES_OBJECTBITSET_HPP
#define SHARE_UTILITIES_OBJECTBITSET_HPP
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
@ -31,24 +31,30 @@
#include "utilities/bitMap.hpp"
#include "utilities/hashtable.hpp"
class JfrVirtualMemory;
class MemRegion;
class BitSet : public CHeapObj<mtTracing> {
/*
* ObjectBitSet is a sparse bitmap for marking objects in the Java heap.
* It holds one bit per ObjAlignmentInBytes-aligned address. Its underlying backing memory is
* allocated on-demand only, in fragments covering 64M heap ranges. Fragments are never deleted
* during the lifetime of the ObjectBitSet. The underlying memory is allocated from C-Heap.
*/
template<MEMFLAGS F>
class ObjectBitSet : public CHeapObj<F> {
const static size_t _bitmap_granularity_shift = 26; // 64M
const static size_t _bitmap_granularity_size = (size_t)1 << _bitmap_granularity_shift;
const static size_t _bitmap_granularity_mask = _bitmap_granularity_size - 1;
class BitMapFragment;
class BitMapFragmentTable : public BasicHashtable<mtTracing> {
class Entry : public BasicHashtableEntry<mtTracing> {
class BitMapFragmentTable : public BasicHashtable<F> {
class Entry : public BasicHashtableEntry<F> {
public:
uintptr_t _key;
CHeapBitMap* _value;
Entry* next() {
return (Entry*)BasicHashtableEntry<mtTracing>::next();
return (Entry*)BasicHashtableEntry<F>::next();
}
};
@ -63,11 +69,11 @@ class BitSet : public CHeapObj<mtTracing> {
}
unsigned hash_to_index(unsigned hash) {
return hash & (BasicHashtable<mtTracing>::table_size() - 1);
return hash & (BasicHashtable<F>::table_size() - 1);
}
public:
BitMapFragmentTable(int table_size) : BasicHashtable<mtTracing>(table_size, sizeof(Entry)) {}
BitMapFragmentTable(int table_size) : BasicHashtable<F>(table_size, sizeof(Entry)) {}
~BitMapFragmentTable();
void add(uintptr_t key, CHeapBitMap* value);
CHeapBitMap** lookup(uintptr_t key);
@ -81,8 +87,8 @@ class BitSet : public CHeapObj<mtTracing> {
uintptr_t _last_fragment_granule;
public:
BitSet();
~BitSet();
ObjectBitSet();
~ObjectBitSet();
BitMap::idx_t addr_to_bit(uintptr_t addr) const;
@ -99,7 +105,8 @@ class BitSet : public CHeapObj<mtTracing> {
}
};
class BitSet::BitMapFragment : public CHeapObj<mtTracing> {
template<MEMFLAGS F>
class ObjectBitSet<F>::BitMapFragment : public CHeapObj<F> {
CHeapBitMap _bits;
BitMapFragment* _next;
@ -115,4 +122,4 @@ public:
}
};
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
#endif // SHARE_UTILITIES_OBJECTBITSET_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,36 +22,75 @@
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
#ifndef SHARE_UTILITIES_OBJECTBITSET_INLINE_HPP
#define SHARE_UTILITIES_OBJECTBITSET_INLINE_HPP
#include "jfr/leakprofiler/chains/bitset.hpp"
#include "utilities/objectBitSet.hpp"
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
#include "memory/memRegion.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/hashtable.inline.hpp"
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::bucket(int i) const {
return (Entry*)BasicHashtable<mtTracing>::bucket(i);
template<MEMFLAGS F>
ObjectBitSet<F>::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) :
_bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, F, true /* clear */),
_next(next) {
}
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::new_entry(unsigned int hash,
uintptr_t key,
CHeapBitMap* value) {
Entry* entry = (Entry*)BasicHashtable<mtTracing>::new_entry(hash);
template<MEMFLAGS F>
ObjectBitSet<F>::ObjectBitSet() :
_bitmap_fragments(32),
_fragment_list(NULL),
_last_fragment_bits(NULL),
_last_fragment_granule(UINTPTR_MAX) {
}
template<MEMFLAGS F>
ObjectBitSet<F>::~ObjectBitSet() {
BitMapFragment* current = _fragment_list;
while (current != NULL) {
BitMapFragment* next = current->next();
delete current;
current = next;
}
}
template<MEMFLAGS F>
ObjectBitSet<F>::BitMapFragmentTable::~BitMapFragmentTable() {
for (int index = 0; index < BasicHashtable<F>::table_size(); index ++) {
Entry* e = bucket(index);
while (e != nullptr) {
Entry* tmp = e;
e = e->next();
BasicHashtable<F>::free_entry(tmp);
}
}
}
template<MEMFLAGS F>
inline typename ObjectBitSet<F>::BitMapFragmentTable::Entry* ObjectBitSet<F>::BitMapFragmentTable::bucket(int i) const {
return (Entry*)BasicHashtable<F>::bucket(i);
}
template<MEMFLAGS F>
inline typename ObjectBitSet<F>::BitMapFragmentTable::Entry*
ObjectBitSet<F>::BitMapFragmentTable::new_entry(unsigned int hash, uintptr_t key, CHeapBitMap* value) {
Entry* entry = (Entry*)BasicHashtable<F>::new_entry(hash);
entry->_key = key;
entry->_value = value;
return entry;
}
inline void BitSet::BitMapFragmentTable::add(uintptr_t key, CHeapBitMap* value) {
template<MEMFLAGS F>
inline void ObjectBitSet<F>::BitMapFragmentTable::add(uintptr_t key, CHeapBitMap* value) {
unsigned hash = hash_segment(key);
Entry* entry = new_entry(hash, key, value);
BasicHashtable<mtTracing>::add_entry(hash_to_index(hash), entry);
BasicHashtable<F>::add_entry(hash_to_index(hash), entry);
}
inline CHeapBitMap** BitSet::BitMapFragmentTable::lookup(uintptr_t key) {
template<MEMFLAGS F>
inline CHeapBitMap** ObjectBitSet<F>::BitMapFragmentTable::lookup(uintptr_t key) {
unsigned hash = hash_segment(key);
int index = hash_to_index(hash);
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
@ -62,11 +101,13 @@ inline CHeapBitMap** BitSet::BitMapFragmentTable::lookup(uintptr_t key) {
return NULL;
}
inline BitMap::idx_t BitSet::addr_to_bit(uintptr_t addr) const {
template<MEMFLAGS F>
inline BitMap::idx_t ObjectBitSet<F>::addr_to_bit(uintptr_t addr) const {
return (addr & _bitmap_granularity_mask) >> LogMinObjAlignmentInBytes;
}
inline CHeapBitMap* BitSet::get_fragment_bits(uintptr_t addr) {
template<MEMFLAGS F>
inline CHeapBitMap* ObjectBitSet<F>::get_fragment_bits(uintptr_t addr) {
uintptr_t granule = addr >> _bitmap_granularity_shift;
if (granule == _last_fragment_granule) {
return _last_fragment_bits;
@ -92,16 +133,18 @@ inline CHeapBitMap* BitSet::get_fragment_bits(uintptr_t addr) {
return bits;
}
inline void BitSet::mark_obj(uintptr_t addr) {
template<MEMFLAGS F>
inline void ObjectBitSet<F>::mark_obj(uintptr_t addr) {
CHeapBitMap* bits = get_fragment_bits(addr);
const BitMap::idx_t bit = addr_to_bit(addr);
bits->set_bit(bit);
}
inline bool BitSet::is_marked(uintptr_t addr) {
template<MEMFLAGS F>
inline bool ObjectBitSet<F>::is_marked(uintptr_t addr) {
CHeapBitMap* bits = get_fragment_bits(addr);
const BitMap::idx_t bit = addr_to_bit(addr);
return bits->at(bit);
}
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
#endif // SHARE_UTILITIES_OBJECTBITSET_INLINE_HPP

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "utilities/objectBitSet.inline.hpp"
#include "unittest.hpp"
TEST_VM(ObjectBitSet, empty) {
ObjectBitSet<mtTracing> obs;
oopDesc obj1;
ASSERT_FALSE(obs.is_marked(&obj1));
}
// NOTE: This is a little weird. NULL is not treated any special: ObjectBitSet will happily
// allocate a fragement for the memory range starting at 0 and mark the first bit when passing NULL.
// In the absense of any error handling, I am not sure what would possibly be a reasonable better
// way to do it, though.
TEST_VM(ObjectBitSet, null) {
ObjectBitSet<mtTracing> obs;
ASSERT_FALSE(obs.is_marked((oop)NULL));
obs.mark_obj((oop) NULL);
ASSERT_TRUE(obs.is_marked((oop)NULL));
}
TEST_VM(ObjectBitSet, mark_single) {
ObjectBitSet<mtTracing> obs;
oopDesc obj1;
ASSERT_FALSE(obs.is_marked(&obj1));
obs.mark_obj(&obj1);
ASSERT_TRUE(obs.is_marked(&obj1));
}
TEST_VM(ObjectBitSet, mark_multi) {
ObjectBitSet<mtTracing> obs;
oopDesc obj1;
oopDesc obj2;
ASSERT_FALSE(obs.is_marked(&obj1));
ASSERT_FALSE(obs.is_marked(&obj2));
obs.mark_obj(&obj1);
ASSERT_TRUE(obs.is_marked(&obj1));
ASSERT_FALSE(obs.is_marked(&obj2));
obs.mark_obj(&obj2);
ASSERT_TRUE(obs.is_marked(&obj1));
ASSERT_TRUE(obs.is_marked(&obj2));
}