mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-12 03:18:37 +00:00
Merge
This commit is contained in:
commit
6572ca3124
@ -270,7 +270,7 @@ psParallelCompact.cpp parallelScavengeHeap.inline.hpp
|
||||
psParallelCompact.cpp pcTasks.hpp
|
||||
psParallelCompact.cpp psMarkSweep.hpp
|
||||
psParallelCompact.cpp psMarkSweepDecorator.hpp
|
||||
psParallelCompact.cpp psCompactionManager.hpp
|
||||
psParallelCompact.cpp psCompactionManager.inline.hpp
|
||||
psParallelCompact.cpp psPromotionManager.inline.hpp
|
||||
psParallelCompact.cpp psOldGen.hpp
|
||||
psParallelCompact.cpp psParallelCompact.hpp
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,7 +32,7 @@ ParCompactionManager::ObjArrayTaskQueueSet*
|
||||
ParCompactionManager::_objarray_queues = NULL;
|
||||
ObjectStartArray* ParCompactionManager::_start_array = NULL;
|
||||
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
|
||||
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
|
||||
|
||||
ParCompactionManager::ParCompactionManager() :
|
||||
_action(CopyAndUpdate) {
|
||||
@ -43,25 +43,9 @@ ParCompactionManager::ParCompactionManager() :
|
||||
_old_gen = heap->old_gen();
|
||||
_start_array = old_gen()->start_array();
|
||||
|
||||
|
||||
marking_stack()->initialize();
|
||||
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||
|
||||
_objarray_queue.initialize();
|
||||
_objarray_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
_objarray_stack.initialize();
|
||||
region_stack()->initialize();
|
||||
#else
|
||||
region_stack()->initialize();
|
||||
|
||||
// We want the overflow stack to be permanent
|
||||
_region_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
|
||||
#endif
|
||||
|
||||
// Note that _revisit_klass_stack is allocated out of the
|
||||
// C heap (as opposed to out of ResourceArena).
|
||||
@ -71,12 +55,9 @@ ParCompactionManager::ParCompactionManager() :
|
||||
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
|
||||
// have to do for now until we are able to investigate a more optimal setting.
|
||||
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
|
||||
|
||||
}
|
||||
|
||||
ParCompactionManager::~ParCompactionManager() {
|
||||
delete _overflow_stack;
|
||||
delete _objarray_overflow_stack;
|
||||
delete _revisit_klass_stack;
|
||||
delete _revisit_mdo_stack;
|
||||
// _manager_array and _stack_array are statics
|
||||
@ -108,12 +89,8 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
|
||||
_manager_array[i] = new ParCompactionManager();
|
||||
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
|
||||
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
|
||||
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
|
||||
#else
|
||||
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
|
||||
region_array()->register_queue(i, _manager_array[i]->region_stack());
|
||||
#endif
|
||||
}
|
||||
|
||||
// The VMThread gets its own ParCompactionManager, which is not available
|
||||
@ -149,57 +126,6 @@ bool ParCompactionManager::should_reset_only() {
|
||||
return action() == ParCompactionManager::ResetObjects;
|
||||
}
|
||||
|
||||
// For now save on a stack
|
||||
void ParCompactionManager::save_for_scanning(oop m) {
|
||||
stack_push(m);
|
||||
}
|
||||
|
||||
void ParCompactionManager::stack_push(oop obj) {
|
||||
|
||||
if(!marking_stack()->push(obj)) {
|
||||
overflow_stack()->push(obj);
|
||||
}
|
||||
}
|
||||
|
||||
oop ParCompactionManager::retrieve_for_scanning() {
|
||||
|
||||
// Should not be used in the parallel case
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Save region on a stack
|
||||
void ParCompactionManager::save_for_processing(size_t region_index) {
|
||||
#ifdef ASSERT
|
||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||
ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
|
||||
assert(region_ptr->claimed(), "must be claimed");
|
||||
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
|
||||
#endif
|
||||
region_stack_push(region_index);
|
||||
}
|
||||
|
||||
void ParCompactionManager::region_stack_push(size_t region_index) {
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
region_stack()->save(region_index);
|
||||
#else
|
||||
if(!region_stack()->push(region_index)) {
|
||||
region_overflow_stack()->push(region_index);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
return region_stack()->retrieve(region_index);
|
||||
#else
|
||||
// Should not be used in the parallel case
|
||||
ShouldNotReachHere();
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
ParCompactionManager*
|
||||
ParCompactionManager::gc_thread_compaction_manager(int index) {
|
||||
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
|
||||
@ -218,8 +144,8 @@ void ParCompactionManager::follow_marking_stacks() {
|
||||
do {
|
||||
// Drain the overflow stack first, to allow stealing from the marking stack.
|
||||
oop obj;
|
||||
while (!overflow_stack()->is_empty()) {
|
||||
overflow_stack()->pop()->follow_contents(this);
|
||||
while (marking_stack()->pop_overflow(obj)) {
|
||||
obj->follow_contents(this);
|
||||
}
|
||||
while (marking_stack()->pop_local(obj)) {
|
||||
obj->follow_contents(this);
|
||||
@ -227,11 +153,10 @@ void ParCompactionManager::follow_marking_stacks() {
|
||||
|
||||
// Process ObjArrays one at a time to avoid marking stack bloat.
|
||||
ObjArrayTask task;
|
||||
if (!_objarray_overflow_stack->is_empty()) {
|
||||
task = _objarray_overflow_stack->pop();
|
||||
if (_objarray_stack.pop_overflow(task)) {
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(this, task.obj(), task.index());
|
||||
} else if (_objarray_queue.pop_local(task)) {
|
||||
} else if (_objarray_stack.pop_local(task)) {
|
||||
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
|
||||
k->oop_follow_contents(this, task.obj(), task.index());
|
||||
}
|
||||
@ -240,68 +165,18 @@ void ParCompactionManager::follow_marking_stacks() {
|
||||
assert(marking_stacks_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_region_overflow_stack() {
|
||||
size_t region_index = (size_t) -1;
|
||||
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
}
|
||||
|
||||
void ParCompactionManager::drain_region_stacks() {
|
||||
#ifdef ASSERT
|
||||
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
|
||||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
|
||||
MutableSpace* to_space = heap->young_gen()->to_space();
|
||||
MutableSpace* old_space = heap->old_gen()->object_space();
|
||||
MutableSpace* perm_space = heap->perm_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
||||
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
|
||||
do {
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
size_t region_index = (size_t) -1;
|
||||
while(region_stack()->retrieve_from_overflow(region_index)) {
|
||||
// Drain overflow stack first so other threads can steal.
|
||||
size_t region_index;
|
||||
while (region_stack()->pop_overflow(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
|
||||
while (region_stack()->retrieve_from_stealable_queue(region_index)) {
|
||||
while (region_stack()->pop_local(region_index)) {
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
} while (!region_stack()->is_empty());
|
||||
#else
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!region_overflow_stack()->is_empty()) {
|
||||
size_t region_index = region_overflow_stack()->pop();
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
|
||||
size_t region_index = -1;
|
||||
// obj is a reference!!!
|
||||
while (region_stack()->pop_local(region_index)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
PSParallelCompact::fill_and_update_region(this, region_index);
|
||||
}
|
||||
} while((region_stack()->size() != 0) ||
|
||||
(region_overflow_stack()->length() != 0));
|
||||
#endif
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
assert(region_stack()->is_empty(), "Sanity");
|
||||
#else
|
||||
assert(region_stack()->size() == 0, "Sanity");
|
||||
assert(region_overflow_stack()->length() == 0, "Sanity");
|
||||
#endif
|
||||
#else
|
||||
oop obj;
|
||||
while (obj = retrieve_for_scanning()) {
|
||||
obj->follow_contents(this);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -59,10 +59,10 @@ class ParCompactionManager : public CHeapObj {
|
||||
|
||||
private:
|
||||
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
|
||||
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
|
||||
typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
|
||||
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
|
||||
#undef OBJARRAY_QUEUE_SIZE
|
||||
#define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
|
||||
typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
|
||||
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
|
||||
#undef QUEUE_SIZE
|
||||
|
||||
static ParCompactionManager** _manager_array;
|
||||
static OopTaskQueueSet* _stack_array;
|
||||
@ -72,23 +72,13 @@ class ParCompactionManager : public CHeapObj {
|
||||
static PSOldGen* _old_gen;
|
||||
|
||||
private:
|
||||
OopTaskQueue _marking_stack;
|
||||
GrowableArray<oop>* _overflow_stack;
|
||||
|
||||
typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
|
||||
ObjArrayTaskQueue _objarray_queue;
|
||||
ObjArrayOverflowStack* _objarray_overflow_stack;
|
||||
OverflowTaskQueue<oop> _marking_stack;
|
||||
ObjArrayTaskQueue _objarray_stack;
|
||||
|
||||
// Is there a way to reuse the _marking_stack for the
|
||||
// saving empty regions? For now just create a different
|
||||
// type of TaskQueue.
|
||||
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
RegionTaskQueueWithOverflow _region_stack;
|
||||
#else
|
||||
RegionTaskQueue _region_stack;
|
||||
GrowableArray<size_t>* _region_overflow_stack;
|
||||
#endif
|
||||
|
||||
#if 1 // does this happen enough to need a per thread stack?
|
||||
GrowableArray<Klass*>* _revisit_klass_stack;
|
||||
@ -107,16 +97,8 @@ private:
|
||||
protected:
|
||||
// Array of tasks. Needed by the ParallelTaskTerminator.
|
||||
static RegionTaskQueueSet* region_array() { return _region_array; }
|
||||
OopTaskQueue* marking_stack() { return &_marking_stack; }
|
||||
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
|
||||
#ifdef USE_RegionTaskQueueWithOverflow
|
||||
RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; }
|
||||
#else
|
||||
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||
GrowableArray<size_t>* region_overflow_stack() {
|
||||
return _region_overflow_stack;
|
||||
}
|
||||
#endif
|
||||
OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; }
|
||||
RegionTaskQueue* region_stack() { return &_region_stack; }
|
||||
|
||||
// Pushes onto the marking stack. If the marking stack is full,
|
||||
// pushes onto the overflow stack.
|
||||
@ -124,11 +106,7 @@ private:
|
||||
// Do not implement an equivalent stack_pop. Deal with the
|
||||
// marking stack and overflow stack directly.
|
||||
|
||||
// Pushes onto the region stack. If the region stack is full,
|
||||
// pushes onto the region overflow stack.
|
||||
void region_stack_push(size_t region_index);
|
||||
|
||||
public:
|
||||
public:
|
||||
Action action() { return _action; }
|
||||
void set_action(Action v) { _action = v; }
|
||||
|
||||
@ -157,22 +135,15 @@ public:
|
||||
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
|
||||
#endif
|
||||
|
||||
// Save oop for later processing. Must not fail.
|
||||
void save_for_scanning(oop m);
|
||||
// Get a oop for scanning. If returns null, no oop were found.
|
||||
oop retrieve_for_scanning();
|
||||
|
||||
inline void push_objarray(oop obj, size_t index);
|
||||
|
||||
// Save region for later processing. Must not fail.
|
||||
void save_for_processing(size_t region_index);
|
||||
// Get a region for processing. If returns null, no region were found.
|
||||
bool retrieve_for_processing(size_t& region_index);
|
||||
// Save for later processing. Must not fail.
|
||||
inline void push(oop obj) { _marking_stack.push(obj); }
|
||||
inline void push_objarray(oop objarray, size_t index);
|
||||
inline void push_region(size_t index);
|
||||
|
||||
// Access function for compaction managers
|
||||
static ParCompactionManager* gc_thread_compaction_manager(int index);
|
||||
|
||||
static bool steal(int queue_num, int* seed, Task& t) {
|
||||
static bool steal(int queue_num, int* seed, oop& t) {
|
||||
return stack_array()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
@ -180,8 +151,8 @@ public:
|
||||
return _objarray_queues->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal(int queue_num, int* seed, RegionTask& t) {
|
||||
return region_array()->steal(queue_num, seed, t);
|
||||
static bool steal(int queue_num, int* seed, size_t& region) {
|
||||
return region_array()->steal(queue_num, seed, region);
|
||||
}
|
||||
|
||||
// Process tasks remaining on any marking stack
|
||||
@ -191,9 +162,6 @@ public:
|
||||
// Process tasks remaining on any stack
|
||||
void drain_region_stacks();
|
||||
|
||||
// Process tasks remaining on any stack
|
||||
void drain_region_overflow_stack();
|
||||
|
||||
// Debugging support
|
||||
#ifdef ASSERT
|
||||
bool stacks_have_been_allocated();
|
||||
@ -208,6 +176,5 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
|
||||
}
|
||||
|
||||
bool ParCompactionManager::marking_stacks_empty() const {
|
||||
return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
|
||||
_objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
|
||||
return _marking_stack.is_empty() && _objarray_stack.is_empty();
|
||||
}
|
||||
|
||||
@ -26,7 +26,16 @@ void ParCompactionManager::push_objarray(oop obj, size_t index)
|
||||
{
|
||||
ObjArrayTask task(obj, index);
|
||||
assert(task.is_valid(), "bad ObjArrayTask");
|
||||
if (!_objarray_queue.push(task)) {
|
||||
_objarray_overflow_stack->push(task);
|
||||
}
|
||||
_objarray_stack.push(task);
|
||||
}
|
||||
|
||||
void ParCompactionManager::push_region(size_t index)
|
||||
{
|
||||
#ifdef ASSERT
|
||||
const ParallelCompactData& sd = PSParallelCompact::summary_data();
|
||||
ParallelCompactData::RegionData* const region_ptr = sd.region(index);
|
||||
assert(region_ptr->claimed(), "must be claimed");
|
||||
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
|
||||
#endif
|
||||
region_stack()->push(index);
|
||||
}
|
||||
|
||||
@ -2474,7 +2474,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
|
||||
for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
|
||||
if (sd.region(cur)->claim_unsafe()) {
|
||||
ParCompactionManager* cm = ParCompactionManager::manager_array(which);
|
||||
cm->save_for_processing(cur);
|
||||
cm->push_region(cur);
|
||||
|
||||
if (TraceParallelOldGCCompactionPhase && Verbose) {
|
||||
const size_t count_mod_8 = fillable_regions & 7;
|
||||
@ -3138,7 +3138,7 @@ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
|
||||
assert(cur->data_size() > 0, "region must have live data");
|
||||
cur->decrement_destination_count();
|
||||
if (cur < enqueue_end && cur->available() && cur->claim()) {
|
||||
cm->save_for_processing(sd.region(cur));
|
||||
cm->push_region(sd.region(cur));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1297,11 +1297,8 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (mark_bitmap()->is_unmarked(obj)) {
|
||||
if (mark_obj(obj)) {
|
||||
// This thread marked the object and owns the subsequent processing of it.
|
||||
cm->save_for_scanning(obj);
|
||||
}
|
||||
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
|
||||
cm->push(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -94,45 +94,13 @@ void PSPromotionManager::post_scavenge() {
|
||||
print_stats();
|
||||
#endif // PS_PM_STATS
|
||||
|
||||
for(uint i=0; i<ParallelGCThreads+1; i++) {
|
||||
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
|
||||
PSPromotionManager* manager = manager_array(i);
|
||||
|
||||
// the guarantees are a bit gratuitous but, if one fires, we'll
|
||||
// have a better idea of what went wrong
|
||||
if (i < ParallelGCThreads) {
|
||||
guarantee((!UseDepthFirstScavengeOrder ||
|
||||
manager->overflow_stack_depth()->length() <= 0),
|
||||
"promotion manager overflow stack must be empty");
|
||||
guarantee((UseDepthFirstScavengeOrder ||
|
||||
manager->overflow_stack_breadth()->length() <= 0),
|
||||
"promotion manager overflow stack must be empty");
|
||||
|
||||
guarantee((!UseDepthFirstScavengeOrder ||
|
||||
manager->claimed_stack_depth()->size() <= 0),
|
||||
"promotion manager claimed stack must be empty");
|
||||
guarantee((UseDepthFirstScavengeOrder ||
|
||||
manager->claimed_stack_breadth()->size() <= 0),
|
||||
"promotion manager claimed stack must be empty");
|
||||
if (UseDepthFirstScavengeOrder) {
|
||||
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
|
||||
} else {
|
||||
guarantee((!UseDepthFirstScavengeOrder ||
|
||||
manager->overflow_stack_depth()->length() <= 0),
|
||||
"VM Thread promotion manager overflow stack "
|
||||
"must be empty");
|
||||
guarantee((UseDepthFirstScavengeOrder ||
|
||||
manager->overflow_stack_breadth()->length() <= 0),
|
||||
"VM Thread promotion manager overflow stack "
|
||||
"must be empty");
|
||||
|
||||
guarantee((!UseDepthFirstScavengeOrder ||
|
||||
manager->claimed_stack_depth()->size() <= 0),
|
||||
"VM Thread promotion manager claimed stack "
|
||||
"must be empty");
|
||||
guarantee((UseDepthFirstScavengeOrder ||
|
||||
manager->claimed_stack_breadth()->size() <= 0),
|
||||
"VM Thread promotion manager claimed stack "
|
||||
"must be empty");
|
||||
assert(manager->claimed_stack_breadth()->is_empty(), "should be empty");
|
||||
}
|
||||
|
||||
manager->flush_labs();
|
||||
}
|
||||
}
|
||||
@ -181,15 +149,9 @@ PSPromotionManager::PSPromotionManager() {
|
||||
if (depth_first()) {
|
||||
claimed_stack_depth()->initialize();
|
||||
queue_size = claimed_stack_depth()->max_elems();
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
|
||||
_overflow_stack_breadth = NULL;
|
||||
} else {
|
||||
claimed_stack_breadth()->initialize();
|
||||
queue_size = claimed_stack_breadth()->max_elems();
|
||||
// We want the overflow stack to be permanent
|
||||
_overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
|
||||
_overflow_stack_depth = NULL;
|
||||
}
|
||||
|
||||
_totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
|
||||
@ -209,8 +171,7 @@ PSPromotionManager::PSPromotionManager() {
|
||||
}
|
||||
|
||||
void PSPromotionManager::reset() {
|
||||
assert(claimed_stack_empty(), "reset of non-empty claimed stack");
|
||||
assert(overflow_stack_empty(), "reset of non-empty overflow stack");
|
||||
assert(stacks_empty(), "reset of non-empty stack");
|
||||
|
||||
// We need to get an assert in here to make sure the labs are always flushed.
|
||||
|
||||
@ -243,7 +204,7 @@ void PSPromotionManager::reset() {
|
||||
|
||||
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
assert(depth_first(), "invariant");
|
||||
assert(overflow_stack_depth() != NULL, "invariant");
|
||||
assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -254,41 +215,35 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
MutableSpace* perm_space = heap->perm_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
||||
OopStarTaskQueue* const tq = claimed_stack_depth();
|
||||
do {
|
||||
StarTask p;
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!overflow_stack_depth()->is_empty()) {
|
||||
// linux compiler wants different overloaded operator= in taskqueue to
|
||||
// assign to p that the other compilers don't like.
|
||||
StarTask ptr = overflow_stack_depth()->pop();
|
||||
process_popped_location_depth(ptr);
|
||||
while (tq->pop_overflow(p)) {
|
||||
process_popped_location_depth(p);
|
||||
}
|
||||
|
||||
if (totally_drain) {
|
||||
while (claimed_stack_depth()->pop_local(p)) {
|
||||
while (tq->pop_local(p)) {
|
||||
process_popped_location_depth(p);
|
||||
}
|
||||
} else {
|
||||
while (claimed_stack_depth()->size() > _target_stack_size &&
|
||||
claimed_stack_depth()->pop_local(p)) {
|
||||
while (tq->size() > _target_stack_size && tq->pop_local(p)) {
|
||||
process_popped_location_depth(p);
|
||||
}
|
||||
}
|
||||
} while( (totally_drain && claimed_stack_depth()->size() > 0) ||
|
||||
(overflow_stack_depth()->length() > 0) );
|
||||
} while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
|
||||
|
||||
assert(!totally_drain || claimed_stack_empty(), "Sanity");
|
||||
assert(totally_drain ||
|
||||
claimed_stack_depth()->size() <= _target_stack_size,
|
||||
"Sanity");
|
||||
assert(overflow_stack_empty(), "Sanity");
|
||||
assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
|
||||
assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
|
||||
assert(tq->overflow_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
|
||||
assert(!depth_first(), "invariant");
|
||||
assert(overflow_stack_breadth() != NULL, "invariant");
|
||||
assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant");
|
||||
totally_drain = totally_drain || _totally_drain;
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -299,51 +254,39 @@ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
|
||||
MutableSpace* perm_space = heap->perm_gen()->object_space();
|
||||
#endif /* ASSERT */
|
||||
|
||||
OverflowTaskQueue<oop>* const tq = claimed_stack_breadth();
|
||||
do {
|
||||
oop obj;
|
||||
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while(!overflow_stack_breadth()->is_empty()) {
|
||||
obj = overflow_stack_breadth()->pop();
|
||||
while (tq->pop_overflow(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
|
||||
if (totally_drain) {
|
||||
// obj is a reference!!!
|
||||
while (claimed_stack_breadth()->pop_local(obj)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
while (tq->pop_local(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
} else {
|
||||
// obj is a reference!!!
|
||||
while (claimed_stack_breadth()->size() > _target_stack_size &&
|
||||
claimed_stack_breadth()->pop_local(obj)) {
|
||||
// It would be nice to assert about the type of objects we might
|
||||
// pop, but they can come from anywhere, unfortunately.
|
||||
while (tq->size() > _target_stack_size && tq->pop_local(obj)) {
|
||||
obj->copy_contents(this);
|
||||
}
|
||||
}
|
||||
|
||||
// If we could not find any other work, flush the prefetch queue
|
||||
if (claimed_stack_breadth()->size() == 0 &&
|
||||
(overflow_stack_breadth()->length() == 0)) {
|
||||
if (tq->is_empty()) {
|
||||
flush_prefetch_queue();
|
||||
}
|
||||
} while((totally_drain && claimed_stack_breadth()->size() > 0) ||
|
||||
(overflow_stack_breadth()->length() > 0));
|
||||
} while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
|
||||
|
||||
assert(!totally_drain || claimed_stack_empty(), "Sanity");
|
||||
assert(totally_drain ||
|
||||
claimed_stack_breadth()->size() <= _target_stack_size,
|
||||
"Sanity");
|
||||
assert(overflow_stack_empty(), "Sanity");
|
||||
assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
|
||||
assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
|
||||
assert(tq->overflow_empty(), "Sanity");
|
||||
}
|
||||
|
||||
void PSPromotionManager::flush_labs() {
|
||||
assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
|
||||
assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
|
||||
assert(stacks_empty(), "Attempt to flush lab with live stack");
|
||||
|
||||
// If either promotion lab fills up, we can flush the
|
||||
// lab but not refill it, so check first.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -78,9 +78,7 @@ class PSPromotionManager : public CHeapObj {
|
||||
PrefetchQueue _prefetch_queue;
|
||||
|
||||
OopStarTaskQueue _claimed_stack_depth;
|
||||
GrowableArray<StarTask>* _overflow_stack_depth;
|
||||
OopTaskQueue _claimed_stack_breadth;
|
||||
GrowableArray<oop>* _overflow_stack_breadth;
|
||||
OverflowTaskQueue<oop> _claimed_stack_breadth;
|
||||
|
||||
bool _depth_first;
|
||||
bool _totally_drain;
|
||||
@ -97,9 +95,6 @@ class PSPromotionManager : public CHeapObj {
|
||||
template <class T> inline void claim_or_forward_internal_depth(T* p);
|
||||
template <class T> inline void claim_or_forward_internal_breadth(T* p);
|
||||
|
||||
GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
|
||||
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
|
||||
|
||||
// On the task queues we push reference locations as well as
|
||||
// partially-scanned arrays (in the latter case, we push an oop to
|
||||
// the from-space image of the array and the length on the
|
||||
@ -151,18 +146,19 @@ class PSPromotionManager : public CHeapObj {
|
||||
|
||||
#if PS_PM_STATS
|
||||
++_total_pushes;
|
||||
int stack_length = claimed_stack_depth()->overflow_stack()->length();
|
||||
#endif // PS_PM_STATS
|
||||
|
||||
if (!claimed_stack_depth()->push(p)) {
|
||||
overflow_stack_depth()->push(p);
|
||||
claimed_stack_depth()->push(p);
|
||||
|
||||
#if PS_PM_STATS
|
||||
if (claimed_stack_depth()->overflow_stack()->length() != stack_length) {
|
||||
++_overflow_pushes;
|
||||
uint stack_length = (uint) overflow_stack_depth()->length();
|
||||
if (stack_length > _max_overflow_length) {
|
||||
_max_overflow_length = stack_length;
|
||||
if ((uint)stack_length + 1 > _max_overflow_length) {
|
||||
_max_overflow_length = (uint)stack_length + 1;
|
||||
}
|
||||
#endif // PS_PM_STATS
|
||||
}
|
||||
#endif // PS_PM_STATS
|
||||
}
|
||||
|
||||
void push_breadth(oop o) {
|
||||
@ -170,18 +166,19 @@ class PSPromotionManager : public CHeapObj {
|
||||
|
||||
#if PS_PM_STATS
|
||||
++_total_pushes;
|
||||
int stack_length = claimed_stack_breadth()->overflow_stack()->length();
|
||||
#endif // PS_PM_STATS
|
||||
|
||||
if(!claimed_stack_breadth()->push(o)) {
|
||||
overflow_stack_breadth()->push(o);
|
||||
claimed_stack_breadth()->push(o);
|
||||
|
||||
#if PS_PM_STATS
|
||||
if (claimed_stack_breadth()->overflow_stack()->length() != stack_length) {
|
||||
++_overflow_pushes;
|
||||
uint stack_length = (uint) overflow_stack_breadth()->length();
|
||||
if (stack_length > _max_overflow_length) {
|
||||
_max_overflow_length = stack_length;
|
||||
if ((uint)stack_length + 1 > _max_overflow_length) {
|
||||
_max_overflow_length = (uint)stack_length + 1;
|
||||
}
|
||||
#endif // PS_PM_STATS
|
||||
}
|
||||
#endif // PS_PM_STATS
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -199,12 +196,10 @@ class PSPromotionManager : public CHeapObj {
|
||||
static PSPromotionManager* vm_thread_promotion_manager();
|
||||
|
||||
static bool steal_depth(int queue_num, int* seed, StarTask& t) {
|
||||
assert(stack_array_depth() != NULL, "invariant");
|
||||
return stack_array_depth()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
static bool steal_breadth(int queue_num, int* seed, Task& t) {
|
||||
assert(stack_array_breadth() != NULL, "invariant");
|
||||
static bool steal_breadth(int queue_num, int* seed, oop& t) {
|
||||
return stack_array_breadth()->steal(queue_num, seed, t);
|
||||
}
|
||||
|
||||
@ -214,7 +209,7 @@ class PSPromotionManager : public CHeapObj {
|
||||
OopStarTaskQueue* claimed_stack_depth() {
|
||||
return &_claimed_stack_depth;
|
||||
}
|
||||
OopTaskQueue* claimed_stack_breadth() {
|
||||
OverflowTaskQueue<oop>* claimed_stack_breadth() {
|
||||
return &_claimed_stack_breadth;
|
||||
}
|
||||
|
||||
@ -246,25 +241,13 @@ class PSPromotionManager : public CHeapObj {
|
||||
void drain_stacks_depth(bool totally_drain);
|
||||
void drain_stacks_breadth(bool totally_drain);
|
||||
|
||||
bool claimed_stack_empty() {
|
||||
if (depth_first()) {
|
||||
return claimed_stack_depth()->size() <= 0;
|
||||
} else {
|
||||
return claimed_stack_breadth()->size() <= 0;
|
||||
}
|
||||
}
|
||||
bool overflow_stack_empty() {
|
||||
if (depth_first()) {
|
||||
return overflow_stack_depth()->length() <= 0;
|
||||
} else {
|
||||
return overflow_stack_breadth()->length() <= 0;
|
||||
}
|
||||
bool depth_first() const {
|
||||
return _depth_first;
|
||||
}
|
||||
bool stacks_empty() {
|
||||
return claimed_stack_empty() && overflow_stack_empty();
|
||||
}
|
||||
bool depth_first() {
|
||||
return _depth_first;
|
||||
return depth_first() ?
|
||||
claimed_stack_depth()->is_empty() :
|
||||
claimed_stack_breadth()->is_empty();
|
||||
}
|
||||
|
||||
inline void process_popped_location_depth(StarTask p);
|
||||
|
||||
@ -414,7 +414,6 @@ bool PSScavenge::invoke_no_policy() {
|
||||
}
|
||||
|
||||
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
|
||||
assert(promotion_manager->claimed_stack_empty(), "Sanity");
|
||||
PSPromotionManager::post_scavenge();
|
||||
|
||||
promotion_failure_occurred = promotion_failed();
|
||||
|
||||
@ -1524,7 +1524,7 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
ConnectionGraph *cgr = phase->C->congraph();
|
||||
PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
|
||||
if (cgr != NULL)
|
||||
es = cgr->escape_state(obj_node(), phase);
|
||||
es = cgr->escape_state(obj_node());
|
||||
if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
|
||||
// Mark it eliminated to update any counters
|
||||
this->set_eliminated();
|
||||
@ -1627,7 +1627,7 @@ Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
ConnectionGraph *cgr = phase->C->congraph();
|
||||
PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
|
||||
if (cgr != NULL)
|
||||
es = cgr->escape_state(obj_node(), phase);
|
||||
es = cgr->escape_state(obj_node());
|
||||
if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
|
||||
// Mark it eliminated to update any counters
|
||||
this->set_eliminated();
|
||||
|
||||
@ -637,34 +637,6 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
if (failing()) return;
|
||||
NOT_PRODUCT( verify_graph_edges(); )
|
||||
|
||||
// Perform escape analysis
|
||||
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
|
||||
TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
|
||||
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction.
|
||||
PhaseGVN* igvn = initial_gvn();
|
||||
Node* oop_null = igvn->zerocon(T_OBJECT);
|
||||
Node* noop_null = igvn->zerocon(T_NARROWOOP);
|
||||
|
||||
_congraph = new(comp_arena()) ConnectionGraph(this);
|
||||
bool has_non_escaping_obj = _congraph->compute_escape();
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintEscapeAnalysis) {
|
||||
_congraph->dump();
|
||||
}
|
||||
#endif
|
||||
// Cleanup.
|
||||
if (oop_null->outcnt() == 0)
|
||||
igvn->hash_delete(oop_null);
|
||||
if (noop_null->outcnt() == 0)
|
||||
igvn->hash_delete(noop_null);
|
||||
|
||||
if (!has_non_escaping_obj) {
|
||||
_congraph = NULL;
|
||||
}
|
||||
|
||||
if (failing()) return;
|
||||
}
|
||||
// Now optimize
|
||||
Optimize();
|
||||
if (failing()) return;
|
||||
@ -1601,6 +1573,20 @@ void Compile::Optimize() {
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
// Perform escape analysis
|
||||
if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
|
||||
TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
|
||||
ConnectionGraph::do_analysis(this, &igvn);
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
igvn.optimize();
|
||||
print_method("Iter GVN 3", 2);
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
}
|
||||
|
||||
// Loop transforms on the ideal graph. Range Check Elimination,
|
||||
// peeling, unrolling, etc.
|
||||
|
||||
|
||||
@ -362,6 +362,7 @@ class Compile : public Phase {
|
||||
Node* macro_node(int idx) { return _macro_nodes->at(idx); }
|
||||
Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
|
||||
ConnectionGraph* congraph() { return _congraph;}
|
||||
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
|
||||
void add_macro_node(Node * n) {
|
||||
//assert(n->is_macro(), "must be a macro node");
|
||||
assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
|
||||
|
||||
@ -81,18 +81,18 @@ void PointsToNode::dump(bool print_state) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
ConnectionGraph::ConnectionGraph(Compile * C) :
|
||||
ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) :
|
||||
_nodes(C->comp_arena(), C->unique(), C->unique(), PointsToNode()),
|
||||
_processed(C->comp_arena()),
|
||||
_collecting(true),
|
||||
_compile(C),
|
||||
_igvn(igvn),
|
||||
_node_map(C->comp_arena()) {
|
||||
|
||||
_phantom_object = C->top()->_idx,
|
||||
add_node(C->top(), PointsToNode::JavaObject, PointsToNode::GlobalEscape,true);
|
||||
|
||||
// Add ConP(#NULL) and ConN(#NULL) nodes.
|
||||
PhaseGVN* igvn = C->initial_gvn();
|
||||
Node* oop_null = igvn->zerocon(T_OBJECT);
|
||||
_oop_null = oop_null->_idx;
|
||||
assert(_oop_null < C->unique(), "should be created already");
|
||||
@ -182,7 +182,7 @@ void ConnectionGraph::add_node(Node *n, PointsToNode::NodeType nt,
|
||||
_processed.set(n->_idx);
|
||||
}
|
||||
|
||||
PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform *phase) {
|
||||
PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n) {
|
||||
uint idx = n->_idx;
|
||||
PointsToNode::EscapeState es;
|
||||
|
||||
@ -207,22 +207,26 @@ PointsToNode::EscapeState ConnectionGraph::escape_state(Node *n, PhaseTransform
|
||||
if (n->uncast()->_idx >= nodes_size())
|
||||
return PointsToNode::UnknownEscape;
|
||||
|
||||
PointsToNode::EscapeState orig_es = es;
|
||||
|
||||
// compute max escape state of anything this node could point to
|
||||
VectorSet ptset(Thread::current()->resource_area());
|
||||
PointsTo(ptset, n, phase);
|
||||
PointsTo(ptset, n);
|
||||
for(VectorSetI i(&ptset); i.test() && es != PointsToNode::GlobalEscape; ++i) {
|
||||
uint pt = i.elem;
|
||||
PointsToNode::EscapeState pes = ptnode_adr(pt)->escape_state();
|
||||
if (pes > es)
|
||||
es = pes;
|
||||
}
|
||||
// cache the computed escape state
|
||||
assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
|
||||
ptnode_adr(idx)->set_escape_state(es);
|
||||
if (orig_es != es) {
|
||||
// cache the computed escape state
|
||||
assert(es != PointsToNode::UnknownEscape, "should have computed an escape state");
|
||||
ptnode_adr(idx)->set_escape_state(es);
|
||||
} // orig_es could be PointsToNode::UnknownEscape
|
||||
return es;
|
||||
}
|
||||
|
||||
void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase) {
|
||||
void ConnectionGraph::PointsTo(VectorSet &ptset, Node * n) {
|
||||
VectorSet visited(Thread::current()->resource_area());
|
||||
GrowableArray<uint> worklist;
|
||||
|
||||
@ -990,7 +994,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
GrowableArray<Node *> memnode_worklist;
|
||||
GrowableArray<PhiNode *> orig_phis;
|
||||
|
||||
PhaseGVN *igvn = _compile->initial_gvn();
|
||||
PhaseGVN *igvn = _igvn;
|
||||
uint new_index_start = (uint) _compile->num_alias_types();
|
||||
Arena* arena = Thread::current()->resource_area();
|
||||
VectorSet visited(arena);
|
||||
@ -1012,7 +1016,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
CallNode *alloc = n->as_Call();
|
||||
// copy escape information to call node
|
||||
PointsToNode* ptn = ptnode_adr(alloc->_idx);
|
||||
PointsToNode::EscapeState es = escape_state(alloc, igvn);
|
||||
PointsToNode::EscapeState es = escape_state(alloc);
|
||||
// We have an allocation or call which returns a Java object,
|
||||
// see if it is unescaped.
|
||||
if (es != PointsToNode::NoEscape || !ptn->_scalar_replaceable)
|
||||
@ -1123,7 +1127,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
}
|
||||
} else if (n->is_AddP()) {
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, get_addp_base(n), igvn);
|
||||
PointsTo(ptset, get_addp_base(n));
|
||||
assert(ptset.Size() == 1, "AddP address is unique");
|
||||
uint elem = ptset.getelem(); // Allocation node's index
|
||||
if (elem == _phantom_object) {
|
||||
@ -1143,7 +1147,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
continue; // already processed
|
||||
}
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, n, igvn);
|
||||
PointsTo(ptset, n);
|
||||
if (ptset.Size() == 1) {
|
||||
uint elem = ptset.getelem(); // Allocation node's index
|
||||
if (elem == _phantom_object) {
|
||||
@ -1478,6 +1482,26 @@ bool ConnectionGraph::has_candidates(Compile *C) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
|
||||
// Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction
|
||||
// to create space for them in ConnectionGraph::_nodes[].
|
||||
Node* oop_null = igvn->zerocon(T_OBJECT);
|
||||
Node* noop_null = igvn->zerocon(T_NARROWOOP);
|
||||
|
||||
ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn);
|
||||
// Perform escape analysis
|
||||
if (congraph->compute_escape()) {
|
||||
// There are non escaping objects.
|
||||
C->set_congraph(congraph);
|
||||
}
|
||||
|
||||
// Cleanup.
|
||||
if (oop_null->outcnt() == 0)
|
||||
igvn->hash_delete(oop_null);
|
||||
if (noop_null->outcnt() == 0)
|
||||
igvn->hash_delete(noop_null);
|
||||
}
|
||||
|
||||
bool ConnectionGraph::compute_escape() {
|
||||
Compile* C = _compile;
|
||||
|
||||
@ -1492,7 +1516,7 @@ bool ConnectionGraph::compute_escape() {
|
||||
}
|
||||
|
||||
GrowableArray<int> cg_worklist;
|
||||
PhaseGVN* igvn = C->initial_gvn();
|
||||
PhaseGVN* igvn = _igvn;
|
||||
bool has_allocations = false;
|
||||
|
||||
// Push all useful nodes onto CG list and set their type.
|
||||
@ -1661,6 +1685,12 @@ bool ConnectionGraph::compute_escape() {
|
||||
_collecting = false;
|
||||
assert(C->unique() == nodes_size(), "there should be no new ideal nodes during ConnectionGraph build");
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintEscapeAnalysis) {
|
||||
dump(); // Dump ConnectionGraph
|
||||
}
|
||||
#endif
|
||||
|
||||
bool has_scalar_replaceable_candidates = alloc_worklist.length() > 0;
|
||||
if ( has_scalar_replaceable_candidates &&
|
||||
C->AliasLevel() >= 3 && EliminateAllocations ) {
|
||||
@ -1671,10 +1701,6 @@ bool ConnectionGraph::compute_escape() {
|
||||
|
||||
if (C->failing()) return false;
|
||||
|
||||
// Clean up after split unique types.
|
||||
ResourceMark rm;
|
||||
PhaseRemoveUseless pru(C->initial_gvn(), C->for_igvn());
|
||||
|
||||
C->print_method("After Escape Analysis", 2);
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -1711,7 +1737,7 @@ void ConnectionGraph::verify_escape_state(int nidx, VectorSet& ptset, PhaseTrans
|
||||
int offset = ptn->offset();
|
||||
Node* base = get_addp_base(n);
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, base, phase);
|
||||
PointsTo(ptset, base);
|
||||
int ptset_size = ptset.Size();
|
||||
|
||||
// Check if a oop field's initializing value is recorded and add
|
||||
@ -1889,7 +1915,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
||||
arg = get_addp_base(arg);
|
||||
}
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, arg, phase);
|
||||
PointsTo(ptset, arg);
|
||||
for( VectorSetI j(&ptset); j.test(); ++j ) {
|
||||
uint pt = j.elem;
|
||||
set_escape_state(pt, PointsToNode::ArgEscape);
|
||||
@ -1934,7 +1960,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
||||
}
|
||||
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, arg, phase);
|
||||
PointsTo(ptset, arg);
|
||||
for( VectorSetI j(&ptset); j.test(); ++j ) {
|
||||
uint pt = j.elem;
|
||||
if (global_escapes) {
|
||||
@ -1970,7 +1996,7 @@ void ConnectionGraph::process_call_arguments(CallNode *call, PhaseTransform *pha
|
||||
Node *arg = call->in(i)->uncast();
|
||||
set_escape_state(arg->_idx, PointsToNode::GlobalEscape);
|
||||
ptset.Clear();
|
||||
PointsTo(ptset, arg, phase);
|
||||
PointsTo(ptset, arg);
|
||||
for( VectorSetI j(&ptset); j.test(); ++j ) {
|
||||
uint pt = j.elem;
|
||||
set_escape_state(pt, PointsToNode::GlobalEscape);
|
||||
@ -2433,7 +2459,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
||||
Node *base = get_addp_base(n);
|
||||
// Create a field edge to this node from everything base could point to.
|
||||
VectorSet ptset(Thread::current()->resource_area());
|
||||
PointsTo(ptset, base, phase);
|
||||
PointsTo(ptset, base);
|
||||
for( VectorSetI i(&ptset); i.test(); ++i ) {
|
||||
uint pt = i.elem;
|
||||
add_field_edge(pt, n_idx, address_offset(n, phase));
|
||||
@ -2501,7 +2527,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
||||
// For everything "adr_base" could point to, create a deferred edge from
|
||||
// this node to each field with the same offset.
|
||||
VectorSet ptset(Thread::current()->resource_area());
|
||||
PointsTo(ptset, adr_base, phase);
|
||||
PointsTo(ptset, adr_base);
|
||||
int offset = address_offset(adr, phase);
|
||||
for( VectorSetI i(&ptset); i.test(); ++i ) {
|
||||
uint pt = i.elem;
|
||||
@ -2594,7 +2620,7 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
||||
// For everything "adr_base" could point to, create a deferred edge
|
||||
// to "val" from each field with the same offset.
|
||||
VectorSet ptset(Thread::current()->resource_area());
|
||||
PointsTo(ptset, adr_base, phase);
|
||||
PointsTo(ptset, adr_base);
|
||||
for( VectorSetI i(&ptset); i.test(); ++i ) {
|
||||
uint pt = i.elem;
|
||||
add_edge_from_fields(pt, val->_idx, address_offset(adr, phase));
|
||||
@ -2638,7 +2664,6 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
|
||||
|
||||
#ifndef PRODUCT
|
||||
void ConnectionGraph::dump() {
|
||||
PhaseGVN *igvn = _compile->initial_gvn();
|
||||
bool first = true;
|
||||
|
||||
uint size = nodes_size();
|
||||
@ -2648,7 +2673,7 @@ void ConnectionGraph::dump() {
|
||||
|
||||
if (ptn_type != PointsToNode::JavaObject || ptn->_node == NULL)
|
||||
continue;
|
||||
PointsToNode::EscapeState es = escape_state(ptn->_node, igvn);
|
||||
PointsToNode::EscapeState es = escape_state(ptn->_node);
|
||||
if (ptn->_node->is_Allocate() && (es == PointsToNode::NoEscape || Verbose)) {
|
||||
if (first) {
|
||||
tty->cr();
|
||||
|
||||
@ -227,6 +227,7 @@ private:
|
||||
uint _noop_null; // ConN(#NULL)
|
||||
|
||||
Compile * _compile; // Compile object for current compilation
|
||||
PhaseIterGVN * _igvn; // Value numbering
|
||||
|
||||
// Address of an element in _nodes. Used when the element is to be modified
|
||||
PointsToNode *ptnode_adr(uint idx) const {
|
||||
@ -257,7 +258,7 @@ private:
|
||||
// walk the connection graph starting at the node corresponding to "n" and
|
||||
// add the index of everything it could point to, to "ptset". This may cause
|
||||
// Phi's encountered to get (re)processed (which requires "phase".)
|
||||
void PointsTo(VectorSet &ptset, Node * n, PhaseTransform *phase);
|
||||
void PointsTo(VectorSet &ptset, Node * n);
|
||||
|
||||
// Edge manipulation. The "from_i" and "to_i" arguments are the
|
||||
// node indices of the source and destination of the edge
|
||||
@ -310,7 +311,7 @@ private:
|
||||
// Node: This assumes that escape analysis is run before
|
||||
// PhaseIterGVN creation
|
||||
void record_for_optimizer(Node *n) {
|
||||
_compile->record_for_igvn(n);
|
||||
_igvn->_worklist.push(n);
|
||||
}
|
||||
|
||||
// Set the escape state of a node
|
||||
@ -320,16 +321,20 @@ private:
|
||||
void verify_escape_state(int nidx, VectorSet& ptset, PhaseTransform* phase);
|
||||
|
||||
public:
|
||||
ConnectionGraph(Compile *C);
|
||||
ConnectionGraph(Compile *C, PhaseIterGVN *igvn);
|
||||
|
||||
// Check for non-escaping candidates
|
||||
static bool has_candidates(Compile *C);
|
||||
|
||||
// Perform escape analysis
|
||||
static void do_analysis(Compile *C, PhaseIterGVN *igvn);
|
||||
|
||||
// Compute the escape information
|
||||
bool compute_escape();
|
||||
|
||||
// escape state of a node
|
||||
PointsToNode::EscapeState escape_state(Node *n, PhaseTransform *phase);
|
||||
PointsToNode::EscapeState escape_state(Node *n);
|
||||
|
||||
// other information we have collected
|
||||
bool is_scalar_replaceable(Node *n) {
|
||||
if (_collecting || (n->_idx >= nodes_size()))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -118,34 +118,13 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
|
||||
for (int i=0; i<_global_code_blobs->length(); i++) {
|
||||
JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i);
|
||||
if (addr == scb->code_begin()) {
|
||||
ShouldNotReachHere();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// we must name the CodeBlob - some CodeBlobs already have names :-
|
||||
// - stubs used by compiled code to call a (static) C++ runtime routine
|
||||
// - non-relocatable machine code such as the interpreter, stubroutines, etc.
|
||||
// - various singleton blobs
|
||||
//
|
||||
// others are unnamed so we create a name :-
|
||||
// - OSR adapter (interpreter frame that has been on-stack replaced)
|
||||
// - I2C and C2I adapters
|
||||
const char* name = NULL;
|
||||
if (cb->is_runtime_stub()) {
|
||||
name = ((RuntimeStub*)cb)->name();
|
||||
}
|
||||
if (cb->is_buffer_blob()) {
|
||||
name = ((BufferBlob*)cb)->name();
|
||||
}
|
||||
if (cb->is_deoptimization_stub() || cb->is_safepoint_stub()) {
|
||||
name = ((SingletonBlob*)cb)->name();
|
||||
}
|
||||
if (cb->is_uncommon_trap_stub() || cb->is_exception_stub()) {
|
||||
name = ((SingletonBlob*)cb)->name();
|
||||
}
|
||||
|
||||
// record the CodeBlob details as a JvmtiCodeBlobDesc
|
||||
JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(name, cb->instructions_begin(),
|
||||
JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->instructions_begin(),
|
||||
cb->instructions_end());
|
||||
_global_code_blobs->append(scb);
|
||||
}
|
||||
@ -197,7 +176,10 @@ void CodeBlobCollector::collect() {
|
||||
jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
|
||||
CodeBlobCollector collector;
|
||||
|
||||
// first collect all the code blobs
|
||||
// First collect all the code blobs. This has to be done in a
|
||||
// single pass over the code cache with CodeCache_lock held because
|
||||
// there isn't any safe way to iterate over regular CodeBlobs since
|
||||
// they can be freed at any point.
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
collector.collect();
|
||||
@ -213,166 +195,28 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
|
||||
}
|
||||
|
||||
|
||||
// Support class to describe a nmethod in the CodeCache
|
||||
|
||||
class nmethodDesc: public CHeapObj {
|
||||
private:
|
||||
jmethodID _jmethod_id;
|
||||
address _code_begin;
|
||||
address _code_end;
|
||||
jvmtiAddrLocationMap* _map;
|
||||
jint _map_length;
|
||||
public:
|
||||
nmethodDesc(jmethodID jmethod_id, address code_begin, address code_end,
|
||||
jvmtiAddrLocationMap* map, jint map_length) {
|
||||
_jmethod_id = jmethod_id;
|
||||
_code_begin = code_begin;
|
||||
_code_end = code_end;
|
||||
_map = map;
|
||||
_map_length = map_length;
|
||||
}
|
||||
jmethodID jmethod_id() const { return _jmethod_id; }
|
||||
address code_begin() const { return _code_begin; }
|
||||
address code_end() const { return _code_end; }
|
||||
jvmtiAddrLocationMap* map() const { return _map; }
|
||||
jint map_length() const { return _map_length; }
|
||||
};
|
||||
|
||||
|
||||
// Support class to collect a list of the nmethod CodeBlobs in
|
||||
// the CodeCache.
|
||||
//
|
||||
// Usage :-
|
||||
//
|
||||
// nmethodCollector collector;
|
||||
//
|
||||
// collector.collect();
|
||||
// JvmtiCodeBlobDesc* blob = collector.first();
|
||||
// while (blob != NULL) {
|
||||
// :
|
||||
// blob = collector.next();
|
||||
// }
|
||||
//
|
||||
class nmethodCollector : StackObj {
|
||||
private:
|
||||
GrowableArray<nmethodDesc*>* _nmethods; // collect nmethods
|
||||
int _pos; // iteration support
|
||||
|
||||
// used during a collection
|
||||
static GrowableArray<nmethodDesc*>* _global_nmethods;
|
||||
static void do_nmethod(nmethod* nm);
|
||||
public:
|
||||
nmethodCollector() {
|
||||
_nmethods = NULL;
|
||||
_pos = -1;
|
||||
}
|
||||
~nmethodCollector() {
|
||||
if (_nmethods != NULL) {
|
||||
for (int i=0; i<_nmethods->length(); i++) {
|
||||
nmethodDesc* blob = _nmethods->at(i);
|
||||
if (blob->map()!= NULL) {
|
||||
FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, blob->map());
|
||||
}
|
||||
}
|
||||
delete _nmethods;
|
||||
}
|
||||
}
|
||||
|
||||
// collect list of nmethods in the cache
|
||||
void collect();
|
||||
|
||||
// iteration support - return first code blob
|
||||
nmethodDesc* first() {
|
||||
assert(_nmethods != NULL, "not collected");
|
||||
if (_nmethods->length() == 0) {
|
||||
return NULL;
|
||||
}
|
||||
_pos = 0;
|
||||
return _nmethods->at(0);
|
||||
}
|
||||
|
||||
// iteration support - return next code blob
|
||||
nmethodDesc* next() {
|
||||
assert(_pos >= 0, "iteration not started");
|
||||
if (_pos+1 >= _nmethods->length()) {
|
||||
return NULL;
|
||||
}
|
||||
return _nmethods->at(++_pos);
|
||||
}
|
||||
};
|
||||
|
||||
// used during collection
|
||||
GrowableArray<nmethodDesc*>* nmethodCollector::_global_nmethods;
|
||||
|
||||
|
||||
// called for each nmethod in the CodeCache
|
||||
//
|
||||
// This function simply adds a descriptor for each nmethod to the global list.
|
||||
|
||||
void nmethodCollector::do_nmethod(nmethod* nm) {
|
||||
// ignore zombies
|
||||
if (!nm->is_alive()) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(nm->method() != NULL, "checking");
|
||||
|
||||
// create the location map for the nmethod.
|
||||
jvmtiAddrLocationMap* map;
|
||||
jint map_length;
|
||||
JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);
|
||||
|
||||
// record the nmethod details
|
||||
nmethodDesc* snm = new nmethodDesc(nm->get_and_cache_jmethod_id(),
|
||||
nm->code_begin(),
|
||||
nm->code_end(),
|
||||
map,
|
||||
map_length);
|
||||
_global_nmethods->append(snm);
|
||||
}
|
||||
|
||||
// collects a list of nmethod in the CodeCache.
|
||||
//
|
||||
// The created list is growable array of nmethodDesc - each one describes
|
||||
// a nmethod and includs its JVMTI address location map.
|
||||
|
||||
void nmethodCollector::collect() {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
assert(_global_nmethods == NULL, "checking");
|
||||
|
||||
// create the list
|
||||
_global_nmethods = new (ResourceObj::C_HEAP) GrowableArray<nmethodDesc*>(100,true);
|
||||
|
||||
// any a descriptor for each nmethod to the list.
|
||||
CodeCache::nmethods_do(do_nmethod);
|
||||
|
||||
// make the list the instance list
|
||||
_nmethods = _global_nmethods;
|
||||
_global_nmethods = NULL;
|
||||
}
|
||||
|
||||
// Generate a COMPILED_METHOD_LOAD event for each nnmethod
|
||||
|
||||
jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* env) {
|
||||
HandleMark hm;
|
||||
nmethodCollector collector;
|
||||
|
||||
// first collect all nmethods
|
||||
{
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
collector.collect();
|
||||
}
|
||||
// Walk the CodeCache notifying for live nmethods. The code cache
|
||||
// may be changing while this is happening which is ok since newly
|
||||
// created nmethod will notify normally and nmethods which are freed
|
||||
// can be safely skipped.
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
nmethod* current = CodeCache::first_nmethod();
|
||||
while (current != NULL) {
|
||||
// Lock the nmethod so it can't be freed
|
||||
nmethodLocker nml(current);
|
||||
|
||||
// iterate over the list and post an event for each nmethod
|
||||
nmethodDesc* nm_desc = collector.first();
|
||||
while (nm_desc != NULL) {
|
||||
jmethodID mid = nm_desc->jmethod_id();
|
||||
assert(mid != NULL, "checking");
|
||||
JvmtiExport::post_compiled_method_load(env, mid,
|
||||
(jint)(nm_desc->code_end() - nm_desc->code_begin()),
|
||||
nm_desc->code_begin(), nm_desc->map_length(),
|
||||
nm_desc->map());
|
||||
nm_desc = collector.next();
|
||||
// Only notify for live nmethods
|
||||
if (current->is_alive()) {
|
||||
// Don't hold the lock over the notify or jmethodID creation
|
||||
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
current->get_and_cache_jmethod_id();
|
||||
JvmtiExport::post_compiled_method_load(current);
|
||||
}
|
||||
current = CodeCache::next_nmethod(current);
|
||||
}
|
||||
return JVMTI_ERROR_NONE;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -182,73 +182,3 @@ bool ObjArrayTask::is_valid() const {
|
||||
_index < objArrayOop(_obj)->length();
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
bool RegionTaskQueueWithOverflow::is_empty() {
|
||||
return (_region_queue.size() == 0) &&
|
||||
(_overflow_stack->length() == 0);
|
||||
}
|
||||
|
||||
bool RegionTaskQueueWithOverflow::stealable_is_empty() {
|
||||
return _region_queue.size() == 0;
|
||||
}
|
||||
|
||||
bool RegionTaskQueueWithOverflow::overflow_is_empty() {
|
||||
return _overflow_stack->length() == 0;
|
||||
}
|
||||
|
||||
void RegionTaskQueueWithOverflow::initialize() {
|
||||
_region_queue.initialize();
|
||||
assert(_overflow_stack == 0, "Creating memory leak");
|
||||
_overflow_stack =
|
||||
new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
|
||||
}
|
||||
|
||||
void RegionTaskQueueWithOverflow::save(RegionTask t) {
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
|
||||
}
|
||||
if(!_region_queue.push(t)) {
|
||||
_overflow_stack->push(t);
|
||||
}
|
||||
}
|
||||
|
||||
// Note that using this method will retrieve all regions
|
||||
// that have been saved but that it will always check
|
||||
// the overflow stack. It may be more efficient to
|
||||
// check the stealable queue and the overflow stack
|
||||
// separately.
|
||||
bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
|
||||
bool result = retrieve_from_overflow(region_task);
|
||||
if (!result) {
|
||||
result = retrieve_from_stealable_queue(region_task);
|
||||
}
|
||||
if (TraceRegionTasksQueuing && Verbose && result) {
|
||||
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
|
||||
RegionTask& region_task) {
|
||||
bool result = _region_queue.pop_local(region_task);
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool
|
||||
RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
|
||||
bool result;
|
||||
if (!_overflow_stack->is_empty()) {
|
||||
region_task = _overflow_stack->pop();
|
||||
result = true;
|
||||
} else {
|
||||
region_task = (RegionTask) NULL;
|
||||
result = false;
|
||||
}
|
||||
if (TraceRegionTasksQueuing && Verbose) {
|
||||
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -109,8 +109,9 @@ protected:
|
||||
public:
|
||||
TaskQueueSuper() : _bottom(0), _age() {}
|
||||
|
||||
// Return true if the TaskQueue contains any tasks.
|
||||
bool peek() { return _bottom != _age.top(); }
|
||||
// Return true if the TaskQueue contains/does not contain any tasks.
|
||||
bool peek() const { return _bottom != _age.top(); }
|
||||
bool is_empty() const { return size() == 0; }
|
||||
|
||||
// Return an estimate of the number of elements in the queue.
|
||||
// The "careful" version admits the possibility of pop_local/pop_global
|
||||
@ -165,18 +166,16 @@ public:
|
||||
|
||||
void initialize();
|
||||
|
||||
// Push the task "t" on the queue. Returns "false" iff the queue is
|
||||
// full.
|
||||
// Push the task "t" on the queue. Returns "false" iff the queue is full.
|
||||
inline bool push(E t);
|
||||
|
||||
// If succeeds in claiming a task (from the 'local' end, that is, the
|
||||
// most recently pushed task), returns "true" and sets "t" to that task.
|
||||
// Otherwise, the queue is empty and returns false.
|
||||
// Attempts to claim a task from the "local" end of the queue (the most
|
||||
// recently pushed). If successful, returns true and sets t to the task;
|
||||
// otherwise, returns false (the queue is empty).
|
||||
inline bool pop_local(E& t);
|
||||
|
||||
// If succeeds in claiming a task (from the 'global' end, that is, the
|
||||
// least recently pushed task), returns "true" and sets "t" to that task.
|
||||
// Otherwise, the queue is empty and returns false.
|
||||
// Like pop_local(), but uses the "global" end of the queue (the least
|
||||
// recently pushed).
|
||||
bool pop_global(E& t);
|
||||
|
||||
// Delete any resource associated with the queue.
|
||||
@ -198,7 +197,6 @@ GenericTaskQueue<E, N>::GenericTaskQueue() {
|
||||
template<class E, unsigned int N>
|
||||
void GenericTaskQueue<E, N>::initialize() {
|
||||
_elems = NEW_C_HEAP_ARRAY(E, N);
|
||||
guarantee(_elems != NULL, "Allocation failed.");
|
||||
}
|
||||
|
||||
template<class E, unsigned int N>
|
||||
@ -289,7 +287,87 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
|
||||
FREE_C_HEAP_ARRAY(E, _elems);
|
||||
}
|
||||
|
||||
// Inherits the typedef of "Task" from above.
|
||||
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
|
||||
// elements that do not fit in the TaskQueue.
|
||||
//
|
||||
// Three methods from super classes are overridden:
|
||||
//
|
||||
// initialize() - initialize the super classes and create the overflow stack
|
||||
// push() - push onto the task queue or, if that fails, onto the overflow stack
|
||||
// is_empty() - return true if both the TaskQueue and overflow stack are empty
|
||||
//
|
||||
// Note that size() is not overridden--it returns the number of elements in the
|
||||
// TaskQueue, and does not include the size of the overflow stack. This
|
||||
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
|
||||
template<class E, unsigned int N = TASKQUEUE_SIZE>
|
||||
class OverflowTaskQueue: public GenericTaskQueue<E, N>
|
||||
{
|
||||
public:
|
||||
typedef GrowableArray<E> overflow_t;
|
||||
typedef GenericTaskQueue<E, N> taskqueue_t;
|
||||
|
||||
OverflowTaskQueue();
|
||||
~OverflowTaskQueue();
|
||||
void initialize();
|
||||
|
||||
inline overflow_t* overflow_stack() const { return _overflow_stack; }
|
||||
|
||||
// Push task t onto the queue or onto the overflow stack. Return true.
|
||||
inline bool push(E t);
|
||||
|
||||
// Attempt to pop from the overflow stack; return true if anything was popped.
|
||||
inline bool pop_overflow(E& t);
|
||||
|
||||
inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
|
||||
inline bool overflow_empty() const { return overflow_stack()->is_empty(); }
|
||||
inline bool is_empty() const {
|
||||
return taskqueue_empty() && overflow_empty();
|
||||
}
|
||||
|
||||
private:
|
||||
overflow_t* _overflow_stack;
|
||||
};
|
||||
|
||||
template <class E, unsigned int N>
|
||||
OverflowTaskQueue<E, N>::OverflowTaskQueue()
|
||||
{
|
||||
_overflow_stack = NULL;
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
OverflowTaskQueue<E, N>::~OverflowTaskQueue()
|
||||
{
|
||||
if (_overflow_stack != NULL) {
|
||||
delete _overflow_stack;
|
||||
_overflow_stack = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
void OverflowTaskQueue<E, N>::initialize()
|
||||
{
|
||||
taskqueue_t::initialize();
|
||||
assert(_overflow_stack == NULL, "memory leak");
|
||||
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
bool OverflowTaskQueue<E, N>::push(E t)
|
||||
{
|
||||
if (!taskqueue_t::push(t)) {
|
||||
overflow_stack()->push(t);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class E, unsigned int N>
|
||||
bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
|
||||
{
|
||||
if (overflow_empty()) return false;
|
||||
t = overflow_stack()->pop();
|
||||
return true;
|
||||
}
|
||||
|
||||
class TaskQueueSetSuper: public CHeapObj {
|
||||
protected:
|
||||
static int randomParkAndMiller(int* seed0);
|
||||
@ -323,11 +401,11 @@ public:
|
||||
|
||||
T* queue(uint n);
|
||||
|
||||
// The thread with queue number "queue_num" (and whose random number seed
|
||||
// is at "seed") is trying to steal a task from some other queue. (It
|
||||
// may try several queues, according to some configuration parameter.)
|
||||
// If some steal succeeds, returns "true" and sets "t" the stolen task,
|
||||
// otherwise returns false.
|
||||
// The thread with queue number "queue_num" (and whose random number seed is
|
||||
// at "seed") is trying to steal a task from some other queue. (It may try
|
||||
// several queues, according to some configuration parameter.) If some steal
|
||||
// succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
|
||||
// false.
|
||||
bool steal(uint queue_num, int* seed, E& t);
|
||||
|
||||
bool peek();
|
||||
@ -507,7 +585,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
|
||||
uint localBot = _bottom;
|
||||
// This value cannot be N-1. That can only occur as a result of
|
||||
// the assignment to bottom in this method. If it does, this method
|
||||
// resets the size( to 0 before the next call (which is sequential,
|
||||
// resets the size to 0 before the next call (which is sequential,
|
||||
// since this is pop_local.)
|
||||
uint dirty_n_elems = dirty_size(localBot, _age.top());
|
||||
assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
|
||||
@ -533,8 +611,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
|
||||
}
|
||||
}
|
||||
|
||||
typedef oop Task;
|
||||
typedef GenericTaskQueue<Task> OopTaskQueue;
|
||||
typedef GenericTaskQueue<oop> OopTaskQueue;
|
||||
typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
|
||||
|
||||
#ifdef _MSC_VER
|
||||
@ -615,35 +692,8 @@ private:
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
|
||||
typedef OverflowTaskQueue<StarTask> OopStarTaskQueue;
|
||||
typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
|
||||
|
||||
typedef size_t RegionTask; // index for region
|
||||
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
|
||||
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
|
||||
|
||||
class RegionTaskQueueWithOverflow: public CHeapObj {
|
||||
protected:
|
||||
RegionTaskQueue _region_queue;
|
||||
GrowableArray<RegionTask>* _overflow_stack;
|
||||
|
||||
public:
|
||||
RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
|
||||
// Initialize both stealable queue and overflow
|
||||
void initialize();
|
||||
// Save first to stealable queue and then to overflow
|
||||
void save(RegionTask t);
|
||||
// Retrieve first from overflow and then from stealable queue
|
||||
bool retrieve(RegionTask& region_index);
|
||||
// Retrieve from stealable queue
|
||||
bool retrieve_from_stealable_queue(RegionTask& region_index);
|
||||
// Retrieve from overflow
|
||||
bool retrieve_from_overflow(RegionTask& region_index);
|
||||
bool is_empty();
|
||||
bool stealable_is_empty();
|
||||
bool overflow_is_empty();
|
||||
uint stealable_size() { return _region_queue.size(); }
|
||||
RegionTaskQueue* task_queue() { return &_region_queue; }
|
||||
};
|
||||
|
||||
#define USE_RegionTaskQueueWithOverflow
|
||||
typedef OverflowTaskQueue<size_t> RegionTaskQueue;
|
||||
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user