8235174: ZGC: Add support for JFR leak profiler

Reviewed-by: stefank, mgronlun, egahlin
This commit is contained in:
Erik Österlund 2019-12-10 13:50:10 +00:00
parent 1142184922
commit c2bce5e902
26 changed files with 284 additions and 181 deletions

View File

@ -103,7 +103,7 @@ void Jfr::on_vm_shutdown(bool exception_handler) {
void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
if (LeakProfiler::is_running()) {
LeakProfiler::oops_do(is_alive, f);
LeakProfiler::weak_oops_do(is_alive, f);
}
}

View File

@ -29,7 +29,7 @@
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
@ -111,12 +111,12 @@ void BFSClosure::process_root_set() {
}
}
void BFSClosure::process(const oop* reference, const oop pointee) {
void BFSClosure::process(UnifiedOopRef reference, const oop pointee) {
closure_impl(reference, pointee);
}
void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
assert(reference != NULL, "invariant");
assert(UnifiedOop::dereference(reference) == pointee, "invariant");
void BFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
assert(!reference.is_null(), "invariant");
assert(reference.dereference() == pointee, "invariant");
if (GranularTimer::is_finished()) {
return;
@ -146,7 +146,7 @@ void BFSClosure::closure_impl(const oop* reference, const oop pointee) {
}
}
void BFSClosure::add_chain(const oop* reference, const oop pointee) {
void BFSClosure::add_chain(UnifiedOopRef reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(NULL == pointee->mark().to_pointer(), "invariant");
Edge leak_edge(_current_parent, reference);
@ -213,23 +213,23 @@ void BFSClosure::iterate(const Edge* parent) {
void BFSClosure::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = *ref;
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
closure_impl(ref, pointee);
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void BFSClosure::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = RawAccess<>::oop_load(ref);
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
closure_impl(UnifiedOop::encode(ref), pointee);
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void BFSClosure::do_root(const oop* ref) {
assert(ref != NULL, "invariant");
void BFSClosure::do_root(UnifiedOopRef ref) {
assert(!ref.is_null(), "invariant");
if (!_edge_queue->is_full()) {
_edge_queue->add(NULL, ref);
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_BFSCLOSURE_HPP
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/iterator.hpp"
class BitSet;
@ -51,20 +52,23 @@ class BFSClosure : public BasicOopIterateClosure {
bool is_complete() const;
void step_frontier() const;
void closure_impl(const oop* reference, const oop pointee);
void add_chain(const oop* reference, const oop pointee);
void closure_impl(UnifiedOopRef reference, const oop pointee);
void add_chain(UnifiedOopRef reference, const oop pointee);
void dfs_fallback();
void iterate(const Edge* parent);
void process(const oop* reference, const oop pointee);
void process(UnifiedOopRef reference, const oop pointee);
void process_root_set();
void process_queue();
public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; }
virtual bool should_verify_oops() { return false; }
BFSClosure(EdgeQueue* edge_queue, EdgeStore* edge_store, BitSet* mark_bits);
void process();
void do_root(const oop* ref);
void do_root(UnifiedOopRef ref);
virtual void do_oop(oop* ref);
virtual void do_oop(narrowOop* ref);

View File

@ -30,7 +30,7 @@
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/granularTimer.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
@ -48,13 +48,13 @@ bool DFSClosure::_ignore_root_set = false;
DFSClosure::DFSClosure() :
_parent(NULL),
_reference(NULL),
_reference(UnifiedOopRef::encode_null()),
_depth(0) {
}
DFSClosure::DFSClosure(DFSClosure* parent, size_t depth) :
_parent(parent),
_reference(NULL),
_reference(UnifiedOopRef::encode_null()),
_depth(depth) {
}
@ -99,9 +99,9 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
rs.process();
}
void DFSClosure::closure_impl(const oop* reference, const oop pointee) {
void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
assert(pointee != NULL, "invariant");
assert(reference != NULL, "invariant");
assert(!reference.is_null(), "invariant");
if (GranularTimer::is_finished()) {
return;
@ -161,24 +161,24 @@ void DFSClosure::add_chain() {
void DFSClosure::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, HeapWordSize), "invariant");
const oop pointee = *ref;
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
closure_impl(ref, pointee);
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void DFSClosure::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = RawAccess<>::oop_load(ref);
const oop pointee = HeapAccess<AS_NO_KEEPALIVE>::oop_load(ref);
if (pointee != NULL) {
closure_impl(UnifiedOop::encode(ref), pointee);
closure_impl(UnifiedOopRef::encode_in_heap(ref), pointee);
}
}
void DFSClosure::do_root(const oop* ref) {
assert(ref != NULL, "invariant");
const oop pointee = UnifiedOop::dereference(ref);
void DFSClosure::do_root(UnifiedOopRef ref) {
assert(!ref.is_null(), "invariant");
const oop pointee = ref.dereference();
assert(pointee != NULL, "invariant");
closure_impl(ref, pointee);
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_DFSCLOSURE_HPP
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/iterator.hpp"
class BitSet;
@ -41,22 +42,25 @@ class DFSClosure : public BasicOopIterateClosure {
static size_t _max_depth;
static bool _ignore_root_set;
DFSClosure* _parent;
const oop* _reference;
UnifiedOopRef _reference;
size_t _depth;
void add_chain();
void closure_impl(const oop* reference, const oop pointee);
void closure_impl(UnifiedOopRef reference, const oop pointee);
DFSClosure* parent() const { return _parent; }
const oop* reference() const { return _reference; }
UnifiedOopRef reference() const { return _reference; }
DFSClosure(DFSClosure* parent, size_t depth);
DFSClosure();
public:
virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS_EXCEPT_REFERENT; }
virtual bool should_verify_oops() { return false; }
static void find_leaks_from_edge(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
static void find_leaks_from_root_set(EdgeStore* edge_store, BitSet* mark_bits);
void do_root(const oop* ref);
void do_root(UnifiedOopRef ref);
virtual void do_oop(oop* ref);
virtual void do_oop(narrowOop* ref);

View File

@ -24,19 +24,17 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
Edge::Edge() : _parent(NULL), _reference(NULL) {}
Edge::Edge(const Edge* parent, const oop* reference) : _parent(parent),
_reference(reference) {}
Edge::Edge(const Edge* parent, UnifiedOopRef reference) : _parent(parent),
_reference(reference) {}
const oop Edge::pointee() const {
return UnifiedOop::dereference(_reference);
return _reference.dereference();
}
const oop Edge::reference_owner() const {
return is_root() ? (oop)NULL : UnifiedOop::dereference(_parent->reference());
return is_root() ? (oop)NULL : _parent->reference().dereference();
}
static const Klass* resolve_klass(const oop obj) {

View File

@ -25,18 +25,18 @@
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_EDGE_HPP
#define SHARE_JFR_LEAKPROFILER_CHAINS_EDGE_HPP
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
class Edge {
protected:
const Edge* _parent;
const oop* _reference;
UnifiedOopRef _reference;
public:
Edge();
Edge(const Edge* parent, const oop* reference);
Edge(const Edge* parent, UnifiedOopRef reference);
const oop* reference() const {
UnifiedOopRef reference() const {
return _reference;
}
const Edge* parent() const {

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
EdgeQueue::EdgeQueue(size_t reservation_size_bytes, size_t commit_block_size_bytes) :
@ -45,8 +46,8 @@ EdgeQueue::~EdgeQueue() {
delete _vmm;
}
void EdgeQueue::add(const Edge* parent, const oop* ref) {
assert(ref != NULL, "Null objects not allowed in EdgeQueue");
void EdgeQueue::add(const Edge* parent, UnifiedOopRef ref) {
assert(!ref.is_null(), "Null objects not allowed in EdgeQueue");
assert(!is_full(), "EdgeQueue is full. Check is_full before adding another Edge");
assert(!_vmm->is_full(), "invariant");
void* const allocation = _vmm->new_datum();

View File

@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
class JfrVirtualMemory;
@ -43,7 +44,7 @@ class EdgeQueue : public CHeapObj<mtTracing> {
bool initialize();
void add(const Edge* parent, const oop* ref);
void add(const Edge* parent, UnifiedOopRef ref);
const Edge* remove() const;
const Edge* element_at(size_t index) const;

View File

@ -25,21 +25,15 @@
#include "precompiled.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "oops/oop.inline.hpp"
StoredEdge::StoredEdge() : Edge() {}
StoredEdge::StoredEdge(const Edge* parent, const oop* reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
StoredEdge::StoredEdge(const Edge* parent, UnifiedOopRef reference) : Edge(parent, reference), _gc_root_id(0), _skip_length(0) {}
StoredEdge::StoredEdge(const Edge& edge) : Edge(edge), _gc_root_id(0), _skip_length(0) {}
StoredEdge::StoredEdge(const StoredEdge& edge) : Edge(edge), _gc_root_id(edge._gc_root_id), _skip_length(edge._skip_length) {}
void StoredEdge::operator=(const StoredEdge& edge) {
Edge::operator=(edge);
_gc_root_id = edge._gc_root_id;
_skip_length = edge._skip_length;
}
traceid EdgeStore::_edge_id_counter = 0;
EdgeStore::EdgeStore() : _edges(NULL) {
@ -73,28 +67,28 @@ void EdgeStore::on_unlink(EdgeEntry* entry) {
}
#ifdef ASSERT
bool EdgeStore::contains(const oop* reference) const {
bool EdgeStore::contains(UnifiedOopRef reference) const {
return get(reference) != NULL;
}
#endif
StoredEdge* EdgeStore::get(const oop* reference) const {
assert(reference != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only((uintptr_t)reference);
StoredEdge* EdgeStore::get(UnifiedOopRef reference) const {
assert(!reference.is_null(), "invariant");
EdgeEntry* const entry = _edges->lookup_only(reference.addr<uintptr_t>());
return entry != NULL ? entry->literal_addr() : NULL;
}
StoredEdge* EdgeStore::put(const oop* reference) {
assert(reference != NULL, "invariant");
StoredEdge* EdgeStore::put(UnifiedOopRef reference) {
assert(!reference.is_null(), "invariant");
const StoredEdge e(NULL, reference);
assert(NULL == _edges->lookup_only((uintptr_t)reference), "invariant");
EdgeEntry& entry = _edges->put((uintptr_t)reference, e);
assert(NULL == _edges->lookup_only(reference.addr<uintptr_t>()), "invariant");
EdgeEntry& entry = _edges->put(reference.addr<uintptr_t>(), e);
return entry.literal_addr();
}
traceid EdgeStore::get_id(const Edge* edge) const {
assert(edge != NULL, "invariant");
EdgeEntry* const entry = _edges->lookup_only((uintptr_t)edge->reference());
EdgeEntry* const entry = _edges->lookup_only(edge->reference().addr<uintptr_t>());
assert(entry != NULL, "invariant");
return entry->id();
}

View File

@ -26,6 +26,7 @@
#define SHARE_JFR_LEAKPROFILER_CHAINS_EDGESTORE_HPP
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "jfr/utilities/jfrHashtable.hpp"
#include "memory/allocation.hpp"
@ -38,10 +39,9 @@ class StoredEdge : public Edge {
public:
StoredEdge();
StoredEdge(const Edge* parent, const oop* reference);
StoredEdge(const Edge* parent, UnifiedOopRef reference);
StoredEdge(const Edge& edge);
StoredEdge(const StoredEdge& edge);
void operator=(const StoredEdge& edge);
traceid gc_root_id() const { return _gc_root_id; }
void set_gc_root_id(traceid root_id) const { _gc_root_id = root_id; }
@ -78,8 +78,8 @@ class EdgeStore : public CHeapObj<mtTracing> {
bool on_equals(uintptr_t hash, const EdgeEntry* entry);
void on_unlink(EdgeEntry* entry);
StoredEdge* get(const oop* reference) const;
StoredEdge* put(const oop* reference);
StoredEdge* get(UnifiedOopRef reference) const;
StoredEdge* put(UnifiedOopRef reference);
traceid gc_root_id(const Edge* edge) const;
bool put_edges(StoredEdge** previous, const Edge** current, size_t length);
@ -94,7 +94,7 @@ class EdgeStore : public CHeapObj<mtTracing> {
template <typename T>
void iterate(T& functor) const { _edges->iterate_value<T>(functor); }
DEBUG_ONLY(bool contains(const oop* reference) const;)
DEBUG_ONLY(bool contains(UnifiedOopRef reference) const;)
public:
EdgeStore();

View File

@ -27,7 +27,7 @@
#include "jfr/leakprofiler/chains/edge.hpp"
#include "jfr/leakprofiler/chains/edgeStore.hpp"
#include "jfr/leakprofiler/chains/edgeUtils.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "oops/fieldStreams.inline.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayOop.inline.hpp"
@ -42,12 +42,11 @@ static int field_offset(const StoredEdge& edge) {
assert(!edge.is_root(), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
const oop* reference = UnifiedOop::decode(edge.reference());
assert(reference != NULL, "invariant");
assert(!UnifiedOop::is_narrow(reference), "invariant");
UnifiedOopRef reference = edge.reference();
assert(!reference.is_null(), "invariant");
assert(!ref_owner->is_array(), "invariant");
assert(ref_owner->is_instance(), "invariant");
const int offset = (int)pointer_delta(reference, ref_owner, sizeof(char));
const int offset = (int)(reference.addr<uintptr_t>() - cast_from_oop<uintptr_t>(ref_owner));
assert(offset < (ref_owner->size() * HeapWordSize), "invariant");
return offset;
}
@ -103,12 +102,11 @@ static int array_offset(const Edge& edge) {
assert(!edge.is_root(), "invariant");
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
const oop* reference = UnifiedOop::decode(edge.reference());
assert(reference != NULL, "invariant");
assert(!UnifiedOop::is_narrow(reference), "invariant");
UnifiedOopRef reference = edge.reference();
assert(!reference.is_null(), "invariant");
assert(ref_owner->is_array(), "invariant");
const objArrayOop ref_owner_array = static_cast<const objArrayOop>(ref_owner);
const int offset = (int)pointer_delta(reference, ref_owner_array->base(), heapOopSize);
const int offset = (int)pointer_delta(reference.addr<HeapWord*>(), ref_owner_array->base(), heapOopSize);
assert(offset >= 0 && offset < ref_owner_array->length(), "invariant");
return offset;
}
@ -122,7 +120,7 @@ int EdgeUtils::array_size(const Edge& edge) {
const oop ref_owner = edge.reference_owner();
assert(ref_owner != NULL, "invariant");
assert(ref_owner->is_objArray(), "invariant");
return ((objArrayOop)(ref_owner))->length();
return ((objArrayOop)ref_owner)->length();
}
return 0;
}

View File

@ -32,7 +32,7 @@
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
#include "jfr/leakprofiler/chains/rootSetClosure.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
@ -49,20 +49,9 @@ RootSetClosure<Delegate>::RootSetClosure(Delegate* delegate) : _delegate(delegat
template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(oop* ref) {
assert(ref != NULL, "invariant");
// We discard unaligned root references because
// our reference tagging scheme will use
// the lowest bit in a represented reference
// to indicate the reference is narrow.
// It is mainly roots delivered via nmethods::do_oops()
// that come in unaligned. It should be ok to duck these
// since they are supposedly weak.
if (!is_aligned(ref, HeapWordSize)) {
return;
}
assert(is_aligned(ref, HeapWordSize), "invariant");
if (*ref != NULL) {
_delegate->do_root(ref);
_delegate->do_root(UnifiedOopRef::encode_in_native(ref));
}
}
@ -70,9 +59,8 @@ template <typename Delegate>
void RootSetClosure<Delegate>::do_oop(narrowOop* ref) {
assert(ref != NULL, "invariant");
assert(is_aligned(ref, sizeof(narrowOop)), "invariant");
const oop pointee = RawAccess<>::oop_load(ref);
if (pointee != NULL) {
_delegate->do_root(UnifiedOop::encode(ref));
if (*ref != 0) {
_delegate->do_root(UnifiedOopRef::encode_in_native(ref));
}
}
@ -83,8 +71,8 @@ void RootSetClosure<Delegate>::process() {
RootSetClosureMarkScope mark_scope;
CLDToOopClosure cldt_closure(this, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::always_strong_cld_do(&cldt_closure);
CodeBlobToOopClosure blobs(this, false);
Threads::oops_do(this, &blobs);
// We don't follow code blob oops, because they have misaligned oops.
Threads::oops_do(this, NULL);
ObjectSynchronizer::oops_do(this);
Universe::oops_do(this);
JNIHandles::oops_do(this);

View File

@ -30,6 +30,7 @@
#include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/markWord.hpp"
@ -109,17 +110,16 @@ void EventEmitter::write_event(const ObjectSample* sample, EdgeStore* edge_store
assert(edge_store != NULL, "invariant");
assert(_jfr_thread_local != NULL, "invariant");
const oop* object_addr = sample->object_addr();
traceid gc_root_id = 0;
const Edge* edge = NULL;
if (SafepointSynchronize::is_at_safepoint()) {
edge = (const Edge*)(*object_addr)->mark().to_pointer();
edge = (const Edge*)(sample->object())->mark().to_pointer();
}
if (edge == NULL) {
// In order to dump out a representation of the event
// even though it was not reachable / too long to reach,
// we need to register a top level edge for this object.
edge = edge_store->put(object_addr);
edge = edge_store->put(UnifiedOopRef::encode_in_native(sample->object_addr()));
} else {
gc_root_id = edge_store->gc_root_id(edge);
}

View File

@ -466,7 +466,7 @@ void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
}
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(LeakProfiler::is_running(), "invariant");
if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) {
save_type_set_blob(writer, true);

View File

@ -32,7 +32,7 @@
#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/writers/jfrTypeWriterHost.hpp"
#include "oops/oop.inline.hpp"
@ -374,8 +374,8 @@ int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) {
}
int _root_desc_compare_(const ObjectSampleRootDescriptionInfo*const & lhs, const ObjectSampleRootDescriptionInfo* const& rhs) {
const uintptr_t lhs_ref = (uintptr_t)lhs->_data._root_edge->reference();
const uintptr_t rhs_ref = (uintptr_t)rhs->_data._root_edge->reference();
const uintptr_t lhs_ref = lhs->_data._root_edge->reference().addr<uintptr_t>();
const uintptr_t rhs_ref = rhs->_data._root_edge->reference().addr<uintptr_t>();
return _edge_reference_compare_(lhs_ref, rhs_ref);
}
@ -393,7 +393,7 @@ static int find_sorted(const RootCallbackInfo& callback_info,
while (max >= min) {
const int mid = (int)(((uint)max + min) / 2);
int diff = _edge_reference_compare_((uintptr_t)callback_info._high,
(uintptr_t)arr->at(mid)->_data._root_edge->reference());
arr->at(mid)->_data._root_edge->reference().addr<uintptr_t>());
if (diff > 0) {
min = mid + 1;
} else if (diff < 0) {
@ -411,11 +411,11 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
GrowableArray<const ObjectSampleRootDescriptionInfo*>* _unresolved_roots;
const uintptr_t high() const {
return (uintptr_t)_unresolved_roots->last()->_data._root_edge->reference();
return _unresolved_roots->last()->_data._root_edge->reference().addr<uintptr_t>();
}
const uintptr_t low() const {
return (uintptr_t)_unresolved_roots->first()->_data._root_edge->reference();
return _unresolved_roots->first()->_data._root_edge->reference().addr<uintptr_t>();
}
bool in_set_address_range(const RootCallbackInfo& callback_info) const {
@ -429,7 +429,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
assert(callback_info._low != NULL, "invariant");
for (int i = 0; i < _unresolved_roots->length(); ++i) {
const uintptr_t ref_addr = (uintptr_t)_unresolved_roots->at(i)->_data._root_edge->reference();
const uintptr_t ref_addr = _unresolved_roots->at(i)->_data._root_edge->reference().addr<uintptr_t>();
if ((uintptr_t)callback_info._low <= ref_addr && (uintptr_t)callback_info._high >= ref_addr) {
return i;
}
@ -453,7 +453,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
ObjectSampleRootDescriptionInfo* const desc =
const_cast<ObjectSampleRootDescriptionInfo*>(_unresolved_roots->at(idx));
assert(desc != NULL, "invariant");
assert((uintptr_t)callback_info._high == (uintptr_t)desc->_data._root_edge->reference(), "invariant");
assert((uintptr_t)callback_info._high == desc->_data._root_edge->reference().addr<uintptr_t>(), "invariant");
desc->_data._system = callback_info._system;
desc->_data._type = callback_info._type;
@ -499,7 +499,7 @@ class RootResolutionSet : public ResourceObj, public RootCallback {
return _unresolved_roots->length();
}
const void* at(int idx) const {
UnifiedOopRef at(int idx) const {
assert(idx >= 0, "invariant");
assert(idx < _unresolved_roots->length(), "invariant");
return _unresolved_roots->at(idx)->_data._root_edge->reference();

View File

@ -27,7 +27,7 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "jfr/leakprofiler/utilities/unifiedOop.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.inline.hpp"
#include "jfr/leakprofiler/checkpoint/rootResolver.hpp"
#include "jfr/utilities/jfrThreadIterator.hpp"
#include "memory/iterator.hpp"
@ -47,7 +47,7 @@ class ReferenceLocateClosure : public OopClosure {
RootCallbackInfo _info;
bool _complete;
void do_oop_shared(const void* ref);
void do_oop_shared(UnifiedOopRef ref);
public:
ReferenceLocateClosure(RootCallback& callback,
@ -71,20 +71,20 @@ class ReferenceLocateClosure : public OopClosure {
}
};
void ReferenceLocateClosure::do_oop_shared(const void* ref) {
assert(ref != NULL, "invariant");
void ReferenceLocateClosure::do_oop_shared(UnifiedOopRef ref) {
assert(!ref.is_null(), "invariant");
if (!_complete) {
_info._high = ref;
_info._high = ref.addr<address>();
_complete = _callback.process(_info);
}
}
void ReferenceLocateClosure::do_oop(oop* ref) {
do_oop_shared(ref);
do_oop_shared(UnifiedOopRef::encode_in_native(ref));
}
void ReferenceLocateClosure::do_oop(narrowOop* ref) {
do_oop_shared(ref);
do_oop_shared(UnifiedOopRef::encode_in_native(ref));
}
class ReferenceToRootClosure : public StackObj {
@ -303,7 +303,7 @@ bool ReferenceToThreadRootClosure::do_thread_stack_fast(JavaThread* jt) {
info._type = OldObjectRoot::_stack_variable;
for (int i = 0; i < _callback.entries(); ++i) {
const address adr = (address)_callback.at(i);
const address adr = _callback.at(i).addr<address>();
if (jt->is_in_usable_stack(adr)) {
info._high = adr;
_complete = _callback.process(info);

View File

@ -26,12 +26,13 @@
#define SHARE_JFR_LEAKPROFILER_CHECKPOINT_ROOTRESOLVER_HPP
#include "jfr/leakprofiler/utilities/rootType.hpp"
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
struct RootCallbackInfo {
const void* _high;
const void* _low;
address _high;
address _low;
const void* _context;
OldObjectRoot::System _system;
OldObjectRoot::Type _type;
@ -41,7 +42,7 @@ class RootCallback {
public:
virtual bool process(const RootCallbackInfo& info) = 0;
virtual int entries() const = 0;
virtual const void* at(int idx) const = 0;
virtual UnifiedOopRef at(int idx) const = 0;
};
class RootResolver : public AllStatic {

View File

@ -49,11 +49,6 @@ bool LeakProfiler::start(int sample_count) {
return false;
}
if (UseZGC) {
log_warning(jfr)("LeakProfiler is currently not supported in combination with ZGC");
return false;
}
if (UseShenandoahGC) {
log_warning(jfr)("LeakProfiler is currently not supported in combination with Shenandoah GC");
return false;
@ -101,11 +96,11 @@ void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all) {
ObjectSampler::release();
}
void LeakProfiler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
void LeakProfiler::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
assert(SafepointSynchronize::is_at_safepoint(),
"Leak Profiler::oops_do(...) may only be called during safepoint");
if (is_running()) {
ObjectSampler::oops_do(is_alive, f);
ObjectSampler::weak_oops_do(is_alive, f);
}
}

View File

@ -41,7 +41,7 @@ class LeakProfiler : public AllStatic {
static void sample(HeapWord* object, size_t size, JavaThread* thread);
// Called by GC
static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
};
#endif // SHARE_JFR_LEAKPROFILER_LEAKPROFILER_HPP

View File

@ -0,0 +1,34 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jfr/leakprofiler/sampling/objectSample.hpp"
#include "oops/access.inline.hpp"
const oop ObjectSample::object() const {
return NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(&_object);
}
void ObjectSample::set_object(oop object) {
NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(&_object, object);
}

View File

@ -57,11 +57,6 @@ class ObjectSample : public JfrCHeapObj {
size_t _allocated;
size_t _heap_used_at_last_gc;
unsigned int _stack_trace_hash;
bool _dead;
void set_dead() {
_dead = true;
}
void release_references() {
_stacktrace.~JfrBlobHandle();
@ -70,10 +65,10 @@ class ObjectSample : public JfrCHeapObj {
}
void reset() {
_object = NULL;
set_stack_trace_id(0);
set_stack_trace_hash(0);
release_references();
_dead = false;
}
public:
@ -90,8 +85,7 @@ class ObjectSample : public JfrCHeapObj {
_span(0),
_allocated(0),
_heap_used_at_last_gc(0),
_stack_trace_hash(0),
_dead(false) {}
_stack_trace_hash(0) {}
ObjectSample* next() const {
return _next;
@ -110,26 +104,16 @@ class ObjectSample : public JfrCHeapObj {
}
bool is_dead() const {
return _dead;
return object() == NULL;
}
const oop object() const {
return _object;
}
const oop object() const;
void set_object(oop object);
const oop* object_addr() const {
return &_object;
}
void set_object(oop object) {
_object = object;
}
const Klass* klass() const {
assert(_object != NULL, "invariant");
return _object->klass();
}
int index() const {
return _index;
}

View File

@ -206,7 +206,7 @@ void ObjectSampler::remove_dead(ObjectSample* sample) {
assert(sample != NULL, "invariant");
assert(sample->is_dead(), "invariant");
ObjectSample* const previous = sample->prev();
// push span on to previous
// push span onto previous
if (previous != NULL) {
_priority_queue->remove(previous);
previous->add_span(sample->span());
@ -216,23 +216,23 @@ void ObjectSampler::remove_dead(ObjectSample* sample) {
_list->release(sample);
}
void ObjectSampler::oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
void ObjectSampler::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
assert(is_created(), "invariant");
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
ObjectSampler& sampler = instance();
ObjectSample* current = sampler._list->last();
while (current != NULL) {
ObjectSample* next = current->next();
if (!current->is_dead()) {
if (current->_object != NULL) {
if (is_alive->do_object_b(current->object())) {
// The weakly referenced object is alive, update pointer
f->do_oop(const_cast<oop*>(current->object_addr()));
} else {
current->set_dead();
// clear existing field to assist GC barriers
current->_object = NULL;
sampler._dead_samples = true;
}
}
current = next;
current = current->next();
}
sampler._last_sweep = JfrTicks::now();
}

View File

@ -67,7 +67,7 @@ class ObjectSampler : public CHeapObj<mtTracing> {
void remove_dead(ObjectSample* sample);
// Called by GC
static void oops_do(BoolObjectClosure* is_alive, OopClosure* f);
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
const ObjectSample* item_at(int index) const;
ObjectSample* item_at(int index);

View File

@ -22,35 +22,29 @@
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
#define SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_HPP
#define SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_HPP
#include "oops/oop.inline.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/globalDefinitions.hpp"
class UnifiedOop : public AllStatic {
public:
static const bool is_narrow(const oop* ref) {
assert(ref != NULL, "invariant");
return 1 == (((u8)ref) & 1);
}
struct UnifiedOopRef {
uintptr_t _value;
static const oop* decode(const oop* ref) {
assert(ref != NULL, "invariant");
return is_narrow(ref) ? (const oop*)(((u8)ref) & ~1) : ref;
}
template <typename T>
T addr() const;
static const oop* encode(narrowOop* ref) {
assert(ref != NULL, "invariant");
return (const oop*)((u8)ref | 1);
}
bool is_narrow() const;
bool is_native() const;
bool is_null() const;
static oop dereference(const oop* ref) {
assert(ref != NULL, "invariant");
return is_narrow(ref) ?
(oop)RawAccess<>::oop_load((narrowOop*)decode(ref)) :
(oop)RawAccess<>::oop_load(const_cast<oop*>(ref));
oop dereference() const;
}
static UnifiedOopRef encode_in_native(const narrowOop* ref);
static UnifiedOopRef encode_in_native(const oop* ref);
static UnifiedOopRef encode_in_heap(const oop* ref);
static UnifiedOopRef encode_in_heap(const narrowOop* ref);
static UnifiedOopRef encode_null();
};
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOP_HPP
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_HPP

View File

@ -0,0 +1,107 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_INLINE_HPP
#define SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_INLINE_HPP
#include "jfr/leakprofiler/utilities/unifiedOopRef.hpp"
#include "oops/access.inline.hpp"
#include "utilities/debug.hpp"
template <typename T>
inline T UnifiedOopRef::addr() const {
return reinterpret_cast<T>(_value & ~uintptr_t(3));
}
// Visual Studio 2019 and earlier have a problem with reinterpret_cast
// when the new type is the same as the expression type. For example:
// reinterpret_cast<int>(1);
// "error C2440: 'reinterpret_cast': cannot convert from 'int' to 'int'"
// this specialization provides a workaround.
template<>
inline uintptr_t UnifiedOopRef::addr<uintptr_t>() const {
return _value & ~uintptr_t(3);
}
inline bool UnifiedOopRef::is_narrow() const {
return _value & 1;
}
inline bool UnifiedOopRef::is_native() const {
return _value & 2;
}
inline bool UnifiedOopRef::is_null() const {
return _value == 0;
}
inline UnifiedOopRef UnifiedOopRef::encode_in_native(const narrowOop* ref) {
assert(ref != NULL, "invariant");
UnifiedOopRef result = { reinterpret_cast<uintptr_t>(ref) | 3 };
assert(result.addr<narrowOop*>() == ref, "sanity");
return result;
}
inline UnifiedOopRef UnifiedOopRef::encode_in_native(const oop* ref) {
assert(ref != NULL, "invariant");
UnifiedOopRef result = { reinterpret_cast<uintptr_t>(ref) | 2 };
assert(result.addr<oop*>() == ref, "sanity");
return result;
}
inline UnifiedOopRef UnifiedOopRef::encode_in_heap(const narrowOop* ref) {
assert(ref != NULL, "invariant");
UnifiedOopRef result = { reinterpret_cast<uintptr_t>(ref) | 1 };
assert(result.addr<narrowOop*>() == ref, "sanity");
return result;
}
inline UnifiedOopRef UnifiedOopRef::encode_in_heap(const oop* ref) {
assert(ref != NULL, "invariant");
UnifiedOopRef result = { reinterpret_cast<uintptr_t>(ref) | 0 };
assert(result.addr<oop*>() == ref, "sanity");
return result;
}
inline UnifiedOopRef UnifiedOopRef::encode_null() {
return UnifiedOopRef();
}
inline oop UnifiedOopRef::dereference() const {
if (is_native()) {
if (is_narrow()) {
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(addr<narrowOop*>());
} else {
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(addr<oop*>());
}
} else {
if (is_narrow()) {
return HeapAccess<AS_NO_KEEPALIVE>::oop_load(addr<narrowOop*>());
} else {
return HeapAccess<AS_NO_KEEPALIVE>::oop_load(addr<oop*>());
}
}
}
#endif // SHARE_JFR_LEAKPROFILER_UTILITIES_UNIFIEDOOPREF_INLINE_HPP