This commit is contained in:
Jesper Wilhelmsson 2014-09-22 16:22:21 +02:00
commit bc3ffaa29f
26 changed files with 109 additions and 68 deletions

View File

@ -234,10 +234,10 @@ JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) vm.def
mapfile : $(MAPFILE) vm.def mapfile_ext
rm -f $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
@ -249,6 +249,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/bsd/makefiles/build_vm_def.sh *.o > $@
mapfile_ext:
rm -f $@
touch $@
if [ -f $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext ]; then \
cat $(HS_ALT_MAKE)/bsd/makefiles/mapfile-ext > $@; \
fi
STATIC_CXX = false
ifeq ($(LINK_INTO),AOUT)

View File

@ -227,10 +227,10 @@ JVM_OBJ_FILES = $(Obj_Files)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) vm.def
mapfile : $(MAPFILE) vm.def mapfile_ext
rm -f $@
awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") \
{ system ("cat vm.def"); } \
{ system ("cat mapfile_ext"); system ("cat vm.def"); } \
else \
{ print $$0 } \
}' > $@ < $(MAPFILE)
@ -242,6 +242,13 @@ mapfile_reorder : mapfile $(REORDERFILE)
vm.def: $(Res_Files) $(Obj_Files)
sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
mapfile_ext:
rm -f $@
touch $@
if [ -f $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext ]; then \
cat $(HS_ALT_MAKE)/linux/makefiles/mapfile-ext > $@; \
fi
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
STATIC_CXX = false
else

View File

@ -258,6 +258,8 @@ flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
[ -n "$(ZIPEXE)" ] && \
echo && echo "ZIPEXE = $(ZIPEXE)"; \
[ -n "$(HS_ALT_MAKE)" ] && \
echo && echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
echo && \
echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \

View File

@ -249,11 +249,12 @@ JVM_OBJ_FILES = $(Obj_Files) $(DTRACE_OBJS)
vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def
mapfile : $(MAPFILE) $(MAPFILE_DTRACE_OPT) vm.def mapfile_ext
rm -f $@
cat $(MAPFILE) $(MAPFILE_DTRACE_OPT) \
| $(NAWK) '{ \
if ($$0 ~ "INSERT VTABLE SYMBOLS HERE") { \
system ("cat mapfile_ext"); \
system ("cat vm.def"); \
} else { \
print $$0; \
@ -267,6 +268,13 @@ mapfile_extended : mapfile $(MAPFILE_DTRACE_OPT)
vm.def: $(Obj_Files)
sh $(GAMMADIR)/make/solaris/makefiles/build_vm_def.sh *.o > $@
mapfile_ext:
rm -f $@
touch $@
if [ -f $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext ]; then \
cat $(HS_ALT_MAKE)/solaris/makefiles/mapfile-ext > $@; \
fi
ifeq ($(LINK_INTO),AOUT)
LIBJVM.o =
LIBJVM_MAPFILE =

View File

@ -3129,8 +3129,7 @@ bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
return true;
}
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr,
bool exec) {
char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
fatal("os::reserve_memory_special should not be called on Solaris.");
return NULL;
}

View File

@ -4167,7 +4167,7 @@ class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
// been published), so we do not need to check for
// uninitialized objects before pushing here.
void Par_ConcMarkingClosure::do_oop(oop obj) {
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
@ -7226,7 +7226,7 @@ void SurvivorSpacePrecleanClosure::do_yield_work() {
// isMarked() query is "safe".
bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
// Ignore mark word because we are running concurrent with mutators
assert(p->is_oop_or_null(true), "expected an oop or null");
assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
HeapWord* addr = (HeapWord*)p;
assert(_span.contains(addr), "we are scanning the CMS generation");
bool is_obj_array = false;
@ -7666,7 +7666,7 @@ void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
}
void PushAndMarkVerifyClosure::do_oop(oop obj) {
assert(obj->is_oop_or_null(), "expected an oop or NULL");
assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@ -7764,7 +7764,7 @@ void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
void PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@ -7802,7 +7802,7 @@ void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p)
void Par_PushOrMarkClosure::do_oop(oop obj) {
// Ignore mark word because we are running concurrent with mutators.
assert(obj->is_oop_or_null(true), "expected an oop or NULL");
assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
// Oop lies in _span and isn't yet grey or black
@ -7879,7 +7879,7 @@ void PushAndMarkClosure::do_oop(oop obj) {
// path and may be at the end of the global overflow list (so
// the mark word may be NULL).
assert(obj->is_oop_or_null(true /* ignore mark word */),
"expected an oop or NULL");
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked
@ -7959,7 +7959,7 @@ void Par_PushAndMarkClosure::do_oop(oop obj) {
// the debugger, is_oop_or_null(false) may subsequently start
// to hold.
assert(obj->is_oop_or_null(true),
"expected an oop or NULL");
err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
HeapWord* addr = (HeapWord*)obj;
// Check if oop points into the CMS generation
// and is not marked

View File

@ -73,7 +73,7 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res))));
return res;
}
inline void setNext(PromotedObject* x) {

View File

@ -277,7 +277,7 @@ inline void CMTask::deal_with_reference(oop obj) {
++_refs_reached;
HeapWord* objAddr = (HeapWord*) obj;
assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
assert(obj->is_oop_or_null(true /* ignore mark word */), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
if (_g1h->is_in_g1_reserved(objAddr)) {
assert(obj != NULL, "null check is implicit");
if (!_nextMarkBitMap->isMarked(objAddr)) {

View File

@ -1960,15 +1960,10 @@ jint G1CollectedHeap::initialize() {
ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
heap_alignment);
// It is important to do this in a way such that concurrent readers can't
// temporarily think something is in the heap. (I've actually seen this
// happen in asserts: DLD.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base());
_reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(_reserved, 2);
_rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
set_barrier_set(rem_set()->bs());
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@ -2052,7 +2047,7 @@ jint G1CollectedHeap::initialize() {
FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
_bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);
_bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
_g1h = this;

View File

@ -43,8 +43,8 @@ inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
assert(is_in_reserved(addr),
err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {

View File

@ -43,9 +43,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
_hot_cache_idx = 0;
// For refining the cards in the hot cache in parallel
uint n_workers = (ParallelGCThreads > 0 ?
_g1h->workers()->total_workers() : 1);
_hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
_hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
_hot_cache_par_claimed_idx = 0;
_card_counts.initialize(card_counts_storage);

View File

@ -70,6 +70,9 @@ class G1HotCardCache: public CHeapObj<mtGC> {
G1CardCounts _card_counts;
// The number of cached cards a thread claims when flushing the cache
static const int ClaimChunkSize = 32;
bool default_use_cache() const {
return (G1ConcRSLogCacheSize > 0);
}

View File

@ -213,7 +213,7 @@ void HeapRegion::reset_after_compaction() {
void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
assert(_humongous_start_region == NULL,
"we should have already filtered out humongous regions");
assert(_end == _orig_end,
assert(_end == orig_end(),
"we should have already filtered out humongous regions");
_in_collection_set = false;
@ -266,7 +266,7 @@ void HeapRegion::calc_gc_efficiency() {
void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
assert(!isHumongous(), "sanity / pre-condition");
assert(end() == _orig_end,
assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
@ -280,7 +280,7 @@ void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
void HeapRegion::set_continuesHumongous(HeapRegion* first_hr) {
assert(!isHumongous(), "sanity / pre-condition");
assert(end() == _orig_end,
assert(end() == orig_end(),
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(first_hr->startsHumongous(), "pre-condition");
@ -294,14 +294,14 @@ void HeapRegion::clear_humongous() {
if (startsHumongous()) {
assert(top() <= end(), "pre-condition");
set_end(_orig_end);
set_end(orig_end());
if (top() > end()) {
// at least one "continues humongous" region after it
set_top(end());
}
} else {
// continues humongous
assert(end() == _orig_end, "sanity");
assert(end() == orig_end(), "sanity");
}
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
@ -326,7 +326,7 @@ HeapRegion::HeapRegion(uint hrm_index,
_hrm_index(hrm_index),
_humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
_next_in_special_set(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_next_young_region(NULL),
@ -349,10 +349,14 @@ void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space);
_orig_end = mr.end();
hr_clear(false /*par*/, false /*clear_space*/);
set_top(bottom());
record_top_and_timestamp();
assert(mr.end() == orig_end(),
err_msg("Given region end address " PTR_FORMAT " should match exactly "
"bottom plus one region size, i.e. " PTR_FORMAT,
p2i(mr.end()), p2i(orig_end())));
}
CompactibleSpace* HeapRegion::next_compaction_space() const {

View File

@ -226,9 +226,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
// For a humongous region, region in which it starts.
HeapRegion* _humongous_start_region;
// For the start region of a humongous sequence, it's original end().
HeapWord* _orig_end;
// True iff the region is in current collection_set.
bool _in_collection_set;
@ -452,7 +449,7 @@ class HeapRegion: public G1OffsetTableContigSpace {
// their _end set up to be the end of the last continues region of the
// corresponding humongous object.
bool is_in_reserved_raw(const void* p) const {
return _bottom <= p && p < _orig_end;
return _bottom <= p && p < orig_end();
}
// Makes the current region be a "starts humongous" region, i.e.,
@ -556,7 +553,8 @@ class HeapRegion: public G1OffsetTableContigSpace {
void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
HeapWord* orig_end() const { return _orig_end; }
// For the start region of a humongous sequence, it's original end().
HeapWord* orig_end() const { return _bottom + GrainWords; }
// Reset HR stuff to default values.
void hr_clear(bool par, bool clear_space, bool locked = false);

View File

@ -288,7 +288,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
while (p < to) {
Prefetch::write(p, interval);
oop m = oop(p);
assert(m->is_oop_or_null(), "check for header");
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
m->push_contents(pm);
p += m->size();
}
@ -296,7 +296,7 @@ void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_arra
} else {
while (p < to) {
oop m = oop(p);
assert(m->is_oop_or_null(), "check for header");
assert(m->is_oop_or_null(), err_msg("Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)));
m->push_contents(pm);
p += m->size();
}

View File

@ -74,10 +74,9 @@ jint ParallelScavengeHeap::initialize() {
return JNI_ENOMEM;
}
_reserved = MemRegion((HeapWord*)heap_rs.base(),
(HeapWord*)(heap_rs.base() + heap_rs.size()));
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
barrier_set->initialize();
_barrier_set = barrier_set;
oopDesc::set_bs(_barrier_set);

View File

@ -2882,7 +2882,7 @@ void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
start_array->allocate_block(addr);
}
oop(addr)->update_contents(cm);
assert(oop(addr)->is_oop_or_null(), "should be an oop now");
assert(oop(addr)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(addr))));
}
}
}
@ -3366,7 +3366,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
oop moved_oop = (oop) destination();
moved_oop->update_contents(compaction_manager());
assert(moved_oop->is_oop_or_null(), "Object should be whole at this point");
assert(moved_oop->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop)));
update_state(words);
assert(destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");

View File

@ -582,6 +582,14 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
}
}
void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
// It is important to do this in a way such that concurrent readers can't
// temporarily think something is in the heap. (Seen this happen in asserts.)
_reserved.set_word_size(0);
_reserved.set_start(start);
_reserved.set_end(end);
}
/////////////// Unit tests ///////////////
#ifndef PRODUCT

View File

@ -85,6 +85,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
friend class VMStructs;
friend class IsGCActiveMark; // Block structured external access to _is_gc_active
private:
#ifdef ASSERT
static int _fire_out_of_memory_count;
#endif
@ -97,8 +98,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used
bool _defer_initial_card_mark;
protected:
MemRegion _reserved;
protected:
BarrierSet* _barrier_set;
bool _is_gc_active;
uint _n_par_threads;
@ -211,6 +213,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
void initialize_reserved_region(HeapWord *start, HeapWord *end);
MemRegion reserved_region() const { return _reserved; }
address base() const { return (address)reserved_region().start(); }

View File

@ -35,7 +35,7 @@
#ifdef ASSERT
#define VERIFY_OOP(o_) \
if (VerifyOops) { \
assert((oop(o_))->is_oop_or_null(), "Not an oop!"); \
assert((oop(o_))->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(oop(o_)))); \
StubRoutines::_verify_oop_count++; \
}
#else

View File

@ -123,17 +123,9 @@ jint GenCollectedHeap::initialize() {
return JNI_ENOMEM;
}
_reserved = MemRegion((HeapWord*)heap_rs.base(),
(HeapWord*)(heap_rs.base() + heap_rs.size()));
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// It is important to do this in a way such that concurrent readers can't
// temporarily think something is in the heap. (Seen this happen in asserts.)
_reserved.set_word_size(0);
_reserved.set_start((HeapWord*)heap_rs.base());
size_t actual_heap_size = heap_rs.size();
_reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
_rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
_rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
set_barrier_set(rem_set()->bs());
_gch = this;

View File

@ -473,7 +473,7 @@ void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
oop discovered = java_lang_ref_Reference::discovered(_ref);
assert(_discovered_addr && discovered->is_oop_or_null(),
"discovered field is bad");
err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
_next = discovered;
_referent_addr = java_lang_ref_Reference::referent_addr(_ref);
_referent = java_lang_ref_Reference::referent(_ref);
@ -482,7 +482,9 @@ void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
assert(allow_null_referent ?
_referent->is_oop_or_null()
: _referent->is_oop(),
"bad referent");
err_msg("Expected an oop%s for referent field at " PTR_FORMAT,
(allow_null_referent ? " or NULL" : ""),
p2i(_referent)));
}
void DiscoveredListIterator::remove() {
@ -630,7 +632,7 @@ ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
oop next = java_lang_ref_Reference::next(iter.obj());
if ((iter.referent() == NULL || iter.is_referent_alive() ||
next != NULL)) {
assert(next->is_oop_or_null(), "bad next field");
assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
// Remove Reference object from list
iter.remove();
// Trace the cohorts
@ -979,7 +981,7 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list)
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
oop next = java_lang_ref_Reference::next(iter.obj());
assert(next->is_oop_or_null(), "bad next field");
assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
// If referent has been cleared or Reference is not active,
// drop it.
if (iter.referent() == NULL || next != NULL) {
@ -1172,7 +1174,7 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
const oop discovered = java_lang_ref_Reference::discovered(obj);
assert(discovered->is_oop_or_null(), "bad discovered field");
assert(discovered->is_oop_or_null(), err_msg("Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)));
if (discovered != NULL) {
// The reference has already been discovered...
if (TraceReferenceGC) {

View File

@ -88,6 +88,8 @@ const char* Arguments::_gc_log_filename = NULL;
bool Arguments::_has_profile = false;
size_t Arguments::_conservative_max_heap_alignment = 0;
uintx Arguments::_min_heap_size = 0;
uintx Arguments::_min_heap_free_ratio = 0;
uintx Arguments::_max_heap_free_ratio = 0;
Arguments::Mode Arguments::_mode = _mixed;
bool Arguments::_java_compiler = false;
bool Arguments::_xdebug_mode = false;
@ -1630,9 +1632,11 @@ void Arguments::set_parallel_gc_flags() {
// unless the user actually sets these flags.
if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
_min_heap_free_ratio = MinHeapFreeRatio;
}
if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
_max_heap_free_ratio = MaxHeapFreeRatio;
}
}
@ -2025,6 +2029,8 @@ bool Arguments::verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_hea
MaxHeapFreeRatio);
return false;
}
// This does not set the flag itself, but stores the value in a safe place for later usage.
_min_heap_free_ratio = min_heap_free_ratio;
return true;
}
@ -2039,6 +2045,8 @@ bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_hea
MinHeapFreeRatio);
return false;
}
// This does not set the flag itself, but stores the value in a safe place for later usage.
_max_heap_free_ratio = max_heap_free_ratio;
return true;
}

View File

@ -285,7 +285,11 @@ class Arguments : AllStatic {
// Value of the conservative maximum heap alignment needed
static size_t _conservative_max_heap_alignment;
static uintx _min_heap_size;
static uintx _min_heap_size;
// Used to store original flag values
static uintx _min_heap_free_ratio;
static uintx _max_heap_free_ratio;
// -Xrun arguments
static AgentLibraryList _libraryList;
@ -516,6 +520,10 @@ class Arguments : AllStatic {
static uintx min_heap_size() { return _min_heap_size; }
static void set_min_heap_size(uintx v) { _min_heap_size = v; }
// Returns the original values of -XX:MinHeapFreeRatio and -XX:MaxHeapFreeRatio
static uintx min_heap_free_ratio() { return _min_heap_free_ratio; }
static uintx max_heap_free_ratio() { return _max_heap_free_ratio; }
// -Xrun
static AgentLibrary* libraries() { return _libraryList.first(); }
static bool init_libraries_at_startup() { return !_libraryList.is_empty(); }

View File

@ -722,7 +722,7 @@ void DumperSupport::dump_field_value(DumpWriter* writer, char type, address addr
// reflection and sun.misc.Unsafe classes may have a reference to a
// Klass* so filter it out.
assert(o->is_oop_or_null(), "should always be an oop");
assert(o->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(o)));
writer->write_objectID(o);
break;
}

View File

@ -331,7 +331,7 @@ void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
// index, &_elems[index], _elems[index]);
E* t = (E*)&_elems[index]; // cast away volatility
oop* p = (oop*)t;
assert((*t)->is_oop_or_null(), "Not an oop or null");
assert((*t)->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(*t)));
f->do_oop(p);
}
// tty->print_cr("END OopTaskQueue::oops_do");