8251570: JDK-8215624 causes assert(worker_id <' _n_workers) failed: Invalid worker_id

Reviewed-by: kbarrett, sjohanss
This commit is contained in:
Stefan Karlsson 2020-08-17 11:32:26 +02:00
parent ea73b5b0d9
commit 2631422bc5
16 changed files with 42 additions and 70 deletions

View File

@ -119,10 +119,6 @@ public:
// No GC threads
virtual void gc_threads_do(ThreadClosure* tc) const {}
// Runs the given AbstractGangTask with the current active workers
// No workGang for EpsilonHeap, work serially with thread 0
virtual void run_task(AbstractGangTask* task) { task->work(0); }
// No nmethod handling
virtual void register_nmethod(nmethod* nm) {}
virtual void unregister_nmethod(nmethod* nm) {}

View File

@ -162,13 +162,9 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
reset_from_card_cache(start_idx, num_regions);
}
void G1CollectedHeap::run_task(AbstractGangTask* task) {
workers()->run_task(task, workers()->active_workers());
}
Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
Ticks start = Ticks::now();
run_task(task);
workers()->run_task(task);
return Ticks::now() - start;
}

View File

@ -551,9 +551,6 @@ public:
WorkGang* workers() const { return _workers; }
// Runs the given AbstractGangTask with the current active workers.
virtual void run_task(AbstractGangTask* task);
// Runs the given AbstractGangTask with the current active workers,
// returning the total time taken.
Tickspan run_task_timed(AbstractGangTask* task);

View File

@ -610,10 +610,6 @@ void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
ParallelScavengeHeap::heap()->workers().threads_do(tc);
}
void ParallelScavengeHeap::run_task(AbstractGangTask* task) {
_workers.run_task(task);
}
void ParallelScavengeHeap::print_tracing_info() const {
AdaptiveSizePolicyOutput::print();
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());

View File

@ -218,8 +218,6 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual void print_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
virtual void gc_threads_do(ThreadClosure* tc) const;
// Runs the given AbstractGangTask with the current active workers.
virtual void run_task(AbstractGangTask* task);
virtual void print_tracing_info() const;
virtual WorkGang* get_safepoint_workers() { return &_workers; }

View File

@ -87,8 +87,3 @@ GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
memory_pools.append(_old_pool);
return memory_pools;
}
// No workGang for SerialHeap, work serially with thread 0.
void SerialHeap::run_task(AbstractGangTask* task) {
task->work(0);
}

View File

@ -75,10 +75,6 @@ public:
template <typename OopClosureType1, typename OopClosureType2>
void oop_since_save_marks_iterate(OopClosureType1* cur,
OopClosureType2* older);
// Runs the given AbstractGangTask with the current active workers.
// No workGang for SerialHeap, work serially with thread 0.
virtual void run_task(AbstractGangTask* task);
};
#endif // SHARE_GC_SERIAL_SERIALHEAP_HPP

View File

@ -467,9 +467,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Iterator for all GC threads (other than VM thread)
virtual void gc_threads_do(ThreadClosure* tc) const = 0;
// Run given task. Possibly in parallel if the GC supports it.
virtual void run_task(AbstractGangTask* task) = 0;
// Print any relevant tracing info that flags imply.
// Default implementation does nothing.
virtual void print_tracing_info() const = 0;

View File

@ -202,6 +202,27 @@ protected:
virtual AbstractGangWorker* allocate_worker(uint which);
};
// Temporarily try to set the number of active workers.
// It's not guaranteed that it succeeds, and users need to
// query the number of active workers.
class WithUpdatedActiveWorkers : public StackObj {
private:
AbstractWorkGang* const _gang;
const uint _old_active_workers;
public:
WithUpdatedActiveWorkers(AbstractWorkGang* gang, uint requested_num_workers) :
_gang(gang),
_old_active_workers(gang->active_workers()) {
uint capped_num_workers = MIN2(requested_num_workers, gang->total_workers());
gang->update_active_workers(capped_num_workers);
}
~WithUpdatedActiveWorkers() {
_gang->update_active_workers(_old_active_workers);
}
};
// Several instances of this class run in parallel as workers for a gang.
class AbstractGangWorker: public WorkerThread {
public:

View File

@ -1195,10 +1195,6 @@ void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
}
}
void ShenandoahHeap::run_task(AbstractGangTask* task) {
workers()->run_task(task, workers()->active_workers());
}
void ShenandoahHeap::print_tracing_info() const {
LogTarget(Info, gc, stats) lt;
if (lt.is_enabled()) {

View File

@ -198,8 +198,6 @@ public:
WorkGang* get_safepoint_workers();
void gc_threads_do(ThreadClosure* tcl) const;
// Runs the given AbstractGangTask with the current active workers.
virtual void run_task(AbstractGangTask* task);
// ---------- Heap regions handling machinery
//

View File

@ -253,10 +253,6 @@ void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_weaks */);
}
void ZCollectedHeap::run_task(AbstractGangTask* task) {
return _heap.run_task(task);
}
void ZCollectedHeap::keep_alive(oop obj) {
_heap.keep_alive(obj);
}

View File

@ -98,8 +98,6 @@ public:
virtual void object_iterate(ObjectClosure* cl);
virtual void run_task(AbstractGangTask* task);
virtual void keep_alive(oop obj);
virtual void register_nmethod(nmethod* nm);

View File

@ -186,26 +186,6 @@ void ZHeap::threads_do(ThreadClosure* tc) const {
_workers.threads_do(tc);
}
// Adapter class from AbstractGangTask to Ztask
class ZAbstractGangTaskAdapter : public ZTask {
private:
AbstractGangTask* _task;
public:
ZAbstractGangTaskAdapter(AbstractGangTask* task) :
ZTask(task->name()),
_task(task) { }
virtual void work() {
_task->work(ZThread::worker_id());
}
};
void ZHeap::run_task(AbstractGangTask* task) {
ZAbstractGangTaskAdapter ztask(task);
_workers.run_parallel(&ztask);
}
void ZHeap::out_of_memory() {
ResourceMark rm;

View File

@ -98,7 +98,6 @@ public:
uint nconcurrent_no_boost_worker_threads() const;
void set_boost_worker_threads(bool boost);
void threads_do(ThreadClosure* tc) const;
void run_task(AbstractGangTask* task);
// Reference processing
ReferenceDiscoverer* reference_discoverer();

View File

@ -575,13 +575,26 @@ uintx HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *fil
// Try parallel first.
if (parallel_thread_num > 1) {
ResourceMark rm;
ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(parallel_thread_num);
if (poi != NULL) {
ParHeapInspectTask task(poi, cit, filter);
Universe::heap()->run_task(&task);
delete poi;
if (task.success()) {
return task.missed_count();
WorkGang* gang = Universe::heap()->get_safepoint_workers();
if (gang != NULL) {
// The GC provided a WorkGang to be used during a safepoint.
// Can't run with more threads than provided by the WorkGang.
WithUpdatedActiveWorkers update_and_restore(gang, parallel_thread_num);
ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(gang->active_workers());
if (poi != NULL) {
// The GC supports parallel object iteration.
ParHeapInspectTask task(poi, cit, filter);
// Run task with the active workers.
gang->run_task(&task);
delete poi;
if (task.success()) {
return task.missed_count();
}
}
}
}