8152312: ParNew: Restore preserved marks in parallel

Reviewed-by: tschatzl
This commit is contained in:
Antonios Printezis 2016-04-07 10:55:54 +02:00
parent e266e56356
commit f22c068365
4 changed files with 137 additions and 32 deletions

View File

@ -692,7 +692,7 @@ void DefNewGeneration::collect(bool full,
_promo_failure_scan_stack.clear(true); // Clear cached segments.
remove_forwarding_pointers();
log_debug(gc)("Promotion failed");
log_info(gc, promotion)("Promotion failed");
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
// case there can be live objects in to-space
@ -739,8 +739,7 @@ void DefNewGeneration::remove_forwarding_pointers() {
eden()->object_iterate(&rspc);
from()->object_iterate(&rspc);
// Now restore saved marks, if any.
_preserved_marks_set.restore();
_preserved_marks_set.restore(GenCollectedHeap::heap()->workers());
}
void DefNewGeneration::handle_promotion_failure(oop old) {

View File

@ -24,24 +24,30 @@
#include "precompiled.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
void PreservedMarks::restore() {
// First, iterate over the stack and restore all marks.
StackIterator<OopAndMarkOop, mtGC> iter(_stack);
while (!iter.is_empty()) {
OopAndMarkOop elem = iter.next();
while (!_stack.is_empty()) {
const OopAndMarkOop elem = _stack.pop();
elem.set_mark();
}
// Second, reclaim all the stack memory
_stack.clear(true /* clear_cache */);
assert_empty();
}
#ifndef PRODUCT
void PreservedMarks::assert_empty() {
assert(_stack.is_empty(), "stack expected to be empty, size = "SIZE_FORMAT,
_stack.size());
assert(_stack.cache_size() == 0,
"stack expected to have no cached segments, cache size = "SIZE_FORMAT,
_stack.cache_size());
}
#endif // ndef PRODUCT
void RemoveForwardedPointerClosure::do_object(oop obj) {
if (obj->is_forwarded()) {
obj->init_mark();
PreservedMarks::init_forwarded_mark(obj);
}
}
@ -61,15 +67,48 @@ void PreservedMarksSet::init(uint num) {
assert_empty();
}
void PreservedMarksSet::restore() {
size_t total_size = 0;
for (uint i = 0; i < _num; i += 1) {
total_size += get(i)->size();
get(i)->restore();
}
assert_empty();
class ParRestoreTask : public AbstractGangTask {
private:
PreservedMarksSet* const _preserved_marks_set;
SequentialSubTasksDone _sub_tasks;
volatile size_t* const _total_size_addr;
log_trace(gc)("Restored " SIZE_FORMAT " marks", total_size);
public:
virtual void work(uint worker_id) {
uint task_id = 0;
while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) {
PreservedMarks* const preserved_marks = _preserved_marks_set->get(task_id);
const size_t size = preserved_marks->size();
preserved_marks->restore();
// Only do the atomic add if the size is > 0.
if (size > 0) {
Atomic::add(size, _total_size_addr);
}
}
_sub_tasks.all_tasks_completed();
}
ParRestoreTask(uint worker_num,
PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr)
: AbstractGangTask("Parallel Preserved Mark Restoration"),
_preserved_marks_set(preserved_marks_set),
_total_size_addr(total_size_addr) {
_sub_tasks.set_n_threads(worker_num);
_sub_tasks.set_n_tasks(preserved_marks_set->num());
}
};
void PreservedMarksSet::restore_internal(WorkGang* workers,
volatile size_t* total_size_addr) {
assert(workers != NULL, "pre-condition");
ParRestoreTask task(workers->active_workers(), this, total_size_addr);
workers->run_task(&task);
}
// temporary, used by PS
void PreservedMarksSet::restore() {
restore<WorkGang>(NULL);
}
void PreservedMarksSet::reclaim() {
@ -92,7 +131,7 @@ void PreservedMarksSet::reclaim() {
void PreservedMarksSet::assert_empty() {
assert(_stacks != NULL && _num > 0, "should have been initialized");
for (uint i = 0; i < _num; i += 1) {
assert(get(i)->is_empty(), "stack should be empty");
get(i)->assert_empty();
}
}
#endif // ndef PRODUCT

View File

@ -44,6 +44,8 @@ public:
};
typedef Stack<OopAndMarkOop, mtGC> OopAndMarkOopStack;
class WorkGang;
class PreservedMarks VALUE_OBJ_CLASS_SPEC {
private:
OopAndMarkOopStack _stack;
@ -52,13 +54,19 @@ private:
inline void push(oop obj, markOop m);
public:
bool is_empty() const { return _stack.is_empty(); }
size_t size() const { return _stack.size(); }
inline void push_if_necessary(oop obj, markOop m);
// Iterate over the stack, restore the preserved marks, then reclaim
// the memory taken up by stack chunks.
// Iterate over the stack, restore all preserved marks, and
// reclaim the memory taken up by the stack segments.
void restore();
~PreservedMarks() { assert(is_empty(), "should have been cleared"); }
inline static void init_forwarded_mark(oop obj);
// Assert the stack is empty and has no cached segments.
void assert_empty() PRODUCT_RETURN;
inline PreservedMarks();
~PreservedMarks() { assert_empty(); }
};
class RemoveForwardedPointerClosure: public ObjectClosure {
@ -82,7 +90,12 @@ private:
// or == NULL if they have not.
Padded<PreservedMarks>* _stacks;
// Internal version of restore() that uses a WorkGang for parallelism.
void restore_internal(WorkGang* workers, volatile size_t* total_size_addr);
public:
uint num() const { return _num; }
// Return the i'th stack.
PreservedMarks* get(uint i = 0) const {
assert(_num > 0 && _stacks != NULL, "stacks should have been initialized");
@ -92,13 +105,23 @@ public:
// Allocate stack array.
void init(uint num);
// Iterate over all stacks, restore all preserved marks, then
// reclaim the memory taken up by stack chunks.
// Itrerate over all stacks, restore all presered marks, and reclaim
// the memory taken up by the stack segments. If the executor is
// NULL, restoration will be done serially. If the executor is not
// NULL, restoration could be done in parallel (when it makes
// sense). Supported executors: WorkGang (Serial, CMS, G1)
template <class E>
inline void restore(E* executor);
// Do the restoration serially. Temporary, to be used by PS until we
// can support GCTaskManager in restore(E*).
void restore();
// Reclaim stack array.
void reclaim();
// Assert all the stacks are empty.
// Assert all the stacks are empty and have no cached segments.
void assert_empty() PRODUCT_RETURN;
PreservedMarksSet(bool in_c_heap)

View File

@ -22,13 +22,13 @@
*
*/
#include "gc/shared/preservedMarks.hpp"
#include "oops/markOop.inline.hpp"
#include "utilities/stack.inline.hpp"
#ifndef SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
#define SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP
#include "gc/shared/preservedMarks.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/stack.inline.hpp"
inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const {
return m->must_be_preserved_for_promotion_failure(obj);
}
@ -45,4 +45,48 @@ inline void PreservedMarks::push_if_necessary(oop obj, markOop m) {
}
}
inline void PreservedMarks::init_forwarded_mark(oop obj) {
obj->init_mark();
}
template <class E>
inline void PreservedMarksSet::restore(E* executor) {
volatile size_t total_size = 0;
#ifdef ASSERT
// This is to make sure the total_size we'll calculate below is correct.
size_t total_size_before = 0;
for (uint i = 0; i < _num; i += 1) {
total_size_before += get(i)->size();
}
#endif // def ASSERT
if (executor == NULL) {
for (uint i = 0; i < _num; i += 1) {
total_size += get(i)->size();
get(i)->restore();
}
} else {
// Right now, if the executor is not NULL we do the work in
// parallel. In the future we might want to do the restoration
// serially, if there's only a small number of marks per stack.
restore_internal(executor, &total_size);
}
assert_empty();
assert(total_size == total_size_before,
"total_size = " SIZE_FORMAT " before = " SIZE_FORMAT,
total_size, total_size_before);
log_trace(gc)("Restored " SIZE_FORMAT " marks", total_size);
}
inline PreservedMarks::PreservedMarks()
: _stack(OopAndMarkOopStack::default_segment_size(),
// This stack should be used very infrequently so there's
// no point in caching stack segments (there will be a
// waste of space most of the time). So we set the max
// cache size to 0.
0 /* max_cache_size */) { }
#endif // SHARE_VM_GC_SHARED_PRESERVEDMARKS_INLINE_HPP