mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-18 06:15:16 +00:00
8307348: Parallelize heap walk for ObjectCount(AfterGC) JFR event collection
Reviewed-by: shade, ayang, tschatzl
This commit is contained in:
parent
d993432d44
commit
540c706bbc
@ -1731,10 +1731,10 @@ void G1ConcurrentMark::report_object_count(bool mark_completed) {
|
||||
// using either the bitmap or after the cycle using the scrubbing information.
|
||||
if (mark_completed) {
|
||||
G1ObjectCountIsAliveClosure is_alive(_g1h);
|
||||
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
|
||||
_gc_tracer_cm->report_object_count_after_gc(&is_alive, _g1h->workers());
|
||||
} else {
|
||||
G1CMIsAliveClosure is_alive(_g1h);
|
||||
_gc_tracer_cm->report_object_count_after_gc(&is_alive);
|
||||
_gc_tracer_cm->report_object_count_after_gc(&is_alive, _g1h->workers());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -327,7 +327,7 @@ void G1FullCollector::phase1_mark_live_objects() {
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) debug("Report Object Count", scope()->timer());
|
||||
scope()->tracer()->report_object_count_after_gc(&_is_alive);
|
||||
scope()->tracer()->report_object_count_after_gc(&_is_alive, _heap->workers());
|
||||
}
|
||||
#if TASKQUEUE_STATS
|
||||
oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
|
||||
|
||||
@ -2069,7 +2069,7 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) tm("Report Object Count", &_gc_timer);
|
||||
_gc_tracer.report_object_count_after_gc(is_alive_closure());
|
||||
_gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
|
||||
}
|
||||
#if TASKQUEUE_STATS
|
||||
ParCompactionManager::oop_task_queues()->print_and_reset_taskqueue_stats("Oop Queue");
|
||||
|
||||
@ -212,7 +212,7 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
|
||||
|
||||
{
|
||||
GCTraceTime(Debug, gc, phases) tm_m("Report Object Count", gc_timer());
|
||||
gc_tracer()->report_object_count_after_gc(&is_alive);
|
||||
gc_tracer()->report_object_count_after_gc(&is_alive, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -100,7 +100,7 @@ class ObjectCountEventSenderClosure : public KlassInfoClosure {
|
||||
}
|
||||
};
|
||||
|
||||
void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
|
||||
void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl, WorkerThreads* workers) {
|
||||
assert(is_alive_cl != nullptr, "Must supply function to check liveness");
|
||||
|
||||
if (ObjectCountEventSender::should_send_event()) {
|
||||
@ -109,7 +109,7 @@ void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
|
||||
KlassInfoTable cit(false);
|
||||
if (!cit.allocation_failed()) {
|
||||
HeapInspection hi;
|
||||
hi.populate_table(&cit, is_alive_cl);
|
||||
hi.populate_table(&cit, is_alive_cl, workers);
|
||||
ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now());
|
||||
cit.iterate(&event_sender);
|
||||
}
|
||||
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "gc/shared/gcName.hpp"
|
||||
#include "gc/shared/gcWhen.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/referenceType.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -102,7 +103,7 @@ class GCTracer {
|
||||
void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
|
||||
void report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& metaspace_summary) const;
|
||||
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
|
||||
void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
|
||||
void report_object_count_after_gc(BoolObjectClosure* object_filter, WorkerThreads* workers) NOT_SERVICES_RETURN;
|
||||
void report_cpu_time_event(double user_time, double system_time, double real_time) const;
|
||||
|
||||
protected:
|
||||
|
||||
@ -167,7 +167,16 @@ void VM_GC_HeapInspection::doit() {
|
||||
}
|
||||
}
|
||||
HeapInspection inspect;
|
||||
inspect.heap_inspection(_out, _parallel_thread_num);
|
||||
WorkerThreads* workers = Universe::heap()->safepoint_workers();
|
||||
if (workers != nullptr) {
|
||||
// The GC provided a WorkerThreads to be used during a safepoint.
|
||||
// Can't run with more threads than provided by the WorkerThreads.
|
||||
const uint capped_parallel_thread_num = MIN2(_parallel_thread_num, workers->max_workers());
|
||||
WithActiveWorkers with_active_workers(workers, capped_parallel_thread_num);
|
||||
inspect.heap_inspection(_out, workers);
|
||||
} else {
|
||||
inspect.heap_inspection(_out, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -564,27 +564,16 @@ void ParHeapInspectTask::work(uint worker_id) {
|
||||
}
|
||||
}
|
||||
|
||||
uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
|
||||
|
||||
uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, WorkerThreads* workers) {
|
||||
// Try parallel first.
|
||||
if (parallel_thread_num > 1) {
|
||||
if (workers != nullptr) {
|
||||
ResourceMark rm;
|
||||
|
||||
WorkerThreads* workers = Universe::heap()->safepoint_workers();
|
||||
if (workers != nullptr) {
|
||||
// The GC provided a WorkerThreads to be used during a safepoint.
|
||||
|
||||
// Can't run with more threads than provided by the WorkerThreads.
|
||||
const uint capped_parallel_thread_num = MIN2(parallel_thread_num, workers->max_workers());
|
||||
WithActiveWorkers with_active_workers(workers, capped_parallel_thread_num);
|
||||
|
||||
ParallelObjectIterator poi(workers->active_workers());
|
||||
ParHeapInspectTask task(&poi, cit, filter);
|
||||
// Run task with the active workers.
|
||||
workers->run_task(&task);
|
||||
if (task.success()) {
|
||||
return task.missed_count();
|
||||
}
|
||||
ParallelObjectIterator poi(workers->active_workers());
|
||||
ParHeapInspectTask task(&poi, cit, filter);
|
||||
// Run task with the active workers.
|
||||
workers->run_task(&task);
|
||||
if (task.success()) {
|
||||
return task.missed_count();
|
||||
}
|
||||
}
|
||||
|
||||
@ -595,13 +584,13 @@ uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *fil
|
||||
return ric.missed_count();
|
||||
}
|
||||
|
||||
void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
|
||||
void HeapInspection::heap_inspection(outputStream* st, WorkerThreads* workers) {
|
||||
ResourceMark rm;
|
||||
|
||||
KlassInfoTable cit(false);
|
||||
if (!cit.allocation_failed()) {
|
||||
// populate table with object allocation info
|
||||
uintx missed_count = populate_table(&cit, nullptr, parallel_thread_num);
|
||||
uintx missed_count = populate_table(&cit, nullptr, workers);
|
||||
if (missed_count != 0) {
|
||||
log_info(gc, classhisto)("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
|
||||
" total instances in data below",
|
||||
|
||||
@ -200,8 +200,8 @@ class KlassInfoClosure;
|
||||
|
||||
class HeapInspection : public StackObj {
|
||||
public:
|
||||
void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN;
|
||||
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = nullptr, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
|
||||
void heap_inspection(outputStream* st, WorkerThreads* workers) NOT_SERVICES_RETURN;
|
||||
uintx populate_table(KlassInfoTable* cit, BoolObjectClosure* filter, WorkerThreads* workers) NOT_SERVICES_RETURN_(0);
|
||||
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
|
||||
};
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user