mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-14 20:35:09 +00:00
8040722: G1: Clean up usages of heap_region_containing
Reviewed-by: tschatzl, jmasa
This commit is contained in:
parent
69c3c31317
commit
5d967f0aea
@ -2655,7 +2655,6 @@ public:
|
||||
str = " O";
|
||||
} else {
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
guarantee(hr != NULL, "invariant");
|
||||
bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
|
||||
bool marked = _g1h->is_marked(obj, _vo);
|
||||
|
||||
@ -3413,9 +3412,8 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
|
||||
}
|
||||
|
||||
void CMTask::setup_for_region(HeapRegion* hr) {
|
||||
// Separated the asserts so that we know which one fires.
|
||||
assert(hr != NULL,
|
||||
"claim_region() should have filtered out continues humongous regions");
|
||||
"claim_region() should have filtered out NULL regions");
|
||||
assert(!hr->continuesHumongous(),
|
||||
"claim_region() should have filtered out continues humongous regions");
|
||||
|
||||
|
||||
@ -445,24 +445,18 @@ void G1CollectedHeap::stop_conc_gc_threads() {
|
||||
// implementation of is_scavengable() for G1 will indicate that
|
||||
// all nmethods must be scanned during a partial collection.
|
||||
bool G1CollectedHeap::is_in_partial_collection(const void* p) {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
return hr != NULL && hr->in_collection_set();
|
||||
if (p == NULL) {
|
||||
return false;
|
||||
}
|
||||
return heap_region_containing(p)->in_collection_set();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns true if the reference points to an object that
|
||||
// can move in an incremental collection.
|
||||
bool G1CollectedHeap::is_scavengable(const void* p) {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
G1CollectorPolicy* g1p = g1h->g1_policy();
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
if (hr == NULL) {
|
||||
// null
|
||||
assert(p == NULL, err_msg("Not NULL " PTR_FORMAT ,p));
|
||||
return false;
|
||||
} else {
|
||||
return !hr->isHumongous();
|
||||
}
|
||||
return !hr->isHumongous();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::check_ct_logs_at_safepoint() {
|
||||
@ -2952,21 +2946,16 @@ CompactibleSpace* G1CollectedHeap::first_compactible_space() {
|
||||
|
||||
|
||||
Space* G1CollectedHeap::space_containing(const void* addr) const {
|
||||
Space* res = heap_region_containing(addr);
|
||||
return res;
|
||||
return heap_region_containing(addr);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::block_start(const void* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
if (sp != NULL) {
|
||||
return sp->block_start(addr);
|
||||
}
|
||||
return NULL;
|
||||
return sp->block_start(addr);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
|
||||
Space* sp = space_containing(addr);
|
||||
assert(sp != NULL, "block_size of address outside of heap");
|
||||
return sp->block_size(addr);
|
||||
}
|
||||
|
||||
@ -4680,30 +4669,19 @@ G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
|
||||
_worker_id(par_scan_state->queue_num()) { }
|
||||
|
||||
void G1ParCopyHelper::mark_object(oop obj) {
|
||||
#ifdef ASSERT
|
||||
HeapRegion* hr = _g1->heap_region_containing(obj);
|
||||
assert(hr != NULL, "sanity");
|
||||
assert(!hr->in_collection_set(), "should not mark objects in the CSet");
|
||||
#endif // ASSERT
|
||||
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
// We know that the object is not moving so it's safe to read its size.
|
||||
_cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
|
||||
}
|
||||
|
||||
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
#ifdef ASSERT
|
||||
assert(from_obj->is_forwarded(), "from obj should be forwarded");
|
||||
assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
|
||||
assert(from_obj != to_obj, "should not be self-forwarded");
|
||||
|
||||
HeapRegion* from_hr = _g1->heap_region_containing(from_obj);
|
||||
assert(from_hr != NULL, "sanity");
|
||||
assert(from_hr->in_collection_set(), "from obj should be in the CSet");
|
||||
|
||||
HeapRegion* to_hr = _g1->heap_region_containing(to_obj);
|
||||
assert(to_hr != NULL, "sanity");
|
||||
assert(!to_hr->in_collection_set(), "should not mark objects in the CSet");
|
||||
#endif // ASSERT
|
||||
assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
|
||||
assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
|
||||
|
||||
// The object might be in the process of being copied by another
|
||||
// worker so we cannot trust that its to-space image is
|
||||
@ -6461,11 +6439,7 @@ void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
|
||||
|
||||
bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
|
||||
HeapRegion* hr = heap_region_containing(p);
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
} else {
|
||||
return hr->is_in(p);
|
||||
}
|
||||
return hr->is_in(p);
|
||||
}
|
||||
|
||||
// Methods for the mutator alloc region
|
||||
|
||||
@ -1380,17 +1380,15 @@ public:
|
||||
// space containing a given address, or else returns NULL.
|
||||
virtual Space* space_containing(const void* addr) const;
|
||||
|
||||
// A G1CollectedHeap will contain some number of heap regions. This
|
||||
// finds the region containing a given address, or else returns NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing(const T addr) const;
|
||||
|
||||
// Like the above, but requires "addr" to be in the heap (to avoid a
|
||||
// null-check), and unlike the above, may return an continuing humongous
|
||||
// region.
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing_raw(const T addr) const;
|
||||
|
||||
// Returns the HeapRegion that contains addr. addr must not be NULL.
|
||||
// If addr is within a humongous continues region, it returns its humongous start region.
|
||||
template <class T>
|
||||
inline HeapRegion* heap_region_containing(const T addr) const;
|
||||
|
||||
// A CollectedHeap is divided into a dense sequence of "blocks"; that is,
|
||||
// each address in the (reserved) heap is a member of exactly
|
||||
// one block. The defining characteristic of a block is that it is
|
||||
@ -1532,7 +1530,6 @@ public:
|
||||
// the region to which the object belongs. An object is dead
|
||||
// iff a) it was not allocated since the last mark and b) it
|
||||
// is not marked.
|
||||
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||
@ -1542,7 +1539,6 @@ public:
|
||||
// This function returns true when an object has been
|
||||
// around since the previous marking and hasn't yet
|
||||
// been marked during this marking.
|
||||
|
||||
bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_next_marking(obj) &&
|
||||
|
||||
@ -42,21 +42,22 @@ inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
|
||||
// hr can be null if addr in perm_gen
|
||||
if (hr != NULL && hr->continuesHumongous()) {
|
||||
hr = hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
assert(addr != NULL, "invariant");
|
||||
assert(_g1_reserved.contains((const void*) addr),
|
||||
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
|
||||
(void*)addr, _g1_reserved.start(), _g1_reserved.end()));
|
||||
return _hrs.addr_to_region((HeapWord*) addr);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline HeapRegion*
|
||||
G1CollectedHeap::heap_region_containing_raw(const T addr) const {
|
||||
assert(_g1_reserved.contains((const void*) addr), "invariant");
|
||||
HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
|
||||
return res;
|
||||
G1CollectedHeap::heap_region_containing(const T addr) const {
|
||||
HeapRegion* hr = heap_region_containing_raw(addr);
|
||||
if (hr->continuesHumongous()) {
|
||||
return hr->humongous_start_region();
|
||||
}
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
|
||||
@ -134,8 +135,7 @@ G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
|
||||
// have to keep calling heap_region_containing_raw() in the
|
||||
// asserts below.
|
||||
DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
|
||||
assert(containing_hr != NULL && start != NULL && word_size > 0,
|
||||
"pre-condition");
|
||||
assert(word_size > 0, "pre-condition");
|
||||
assert(containing_hr->is_in(start), "it should contain start");
|
||||
assert(containing_hr->is_young(), "it should be young");
|
||||
assert(!containing_hr->isHumongous(), "it should not be humongous");
|
||||
@ -246,8 +246,10 @@ inline void G1CollectedHeap::reset_evacuation_should_fail() {
|
||||
#endif // #ifndef PRODUCT
|
||||
|
||||
inline bool G1CollectedHeap::is_in_young(const oop obj) {
|
||||
HeapRegion* hr = heap_region_containing(obj);
|
||||
return hr != NULL && hr->is_young();
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
return heap_region_containing(obj)->is_young();
|
||||
}
|
||||
|
||||
// We don't need barriers for initializing stores to objects
|
||||
@ -260,21 +262,17 @@ inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
else return is_obj_dead(obj, hr);
|
||||
return is_obj_dead(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
|
||||
const HeapRegion* hr = heap_region_containing(obj);
|
||||
if (hr == NULL) {
|
||||
if (obj == NULL) return false;
|
||||
else return true;
|
||||
if (obj == NULL) {
|
||||
return false;
|
||||
}
|
||||
else return is_obj_ill(obj, hr);
|
||||
return is_obj_ill(obj, heap_region_containing(obj));
|
||||
}
|
||||
|
||||
template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
|
||||
|
||||
@ -125,9 +125,7 @@ inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing((HeapWord*) obj);
|
||||
if (hr != NULL) {
|
||||
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
|
||||
}
|
||||
_cm->grayRoot(obj, obj->size(), _worker_id, hr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,57 +152,63 @@ inline void G1InvokeIfNotTriggeredClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1UpdateRSOrPushRefOopClosure::do_oop_nv(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (obj == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(_from != NULL, "from region must be non-NULL");
|
||||
assert(_from->is_in_reserved(p), "p is not in from");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && _from != to) {
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the remembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
if (_from == to) {
|
||||
// Normally this closure should only be called with cross-region references.
|
||||
// But since Java threads are manipulating the references concurrently and we
|
||||
// reload the values things may have changed.
|
||||
return;
|
||||
}
|
||||
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
// The _record_refs_into_cset flag is true during the RSet
|
||||
// updating part of an evacuation pause. It is false at all
|
||||
// other times:
|
||||
// * rebuilding the remembered sets after a full GC
|
||||
// * during concurrent refinement.
|
||||
// * updating the remembered sets of regions in the collection
|
||||
// set in the event of an evacuation failure (when deferred
|
||||
// updates are enabled).
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
return;
|
||||
if (_record_refs_into_cset && to->in_collection_set()) {
|
||||
// We are recording references that point into the collection
|
||||
// set and this particular reference does exactly that...
|
||||
// If the referenced object has already been forwarded
|
||||
// to itself, we are handling an evacuation failure and
|
||||
// we have already visited/tried to copy this object
|
||||
// there is no need to retry.
|
||||
if (!self_forwarded(obj)) {
|
||||
assert(_push_ref_cl != NULL, "should not be null");
|
||||
// Push the reference in the refs queue of the G1ParScanThreadState
|
||||
// instance for this worker thread.
|
||||
_push_ref_cl->do_oop(p);
|
||||
}
|
||||
|
||||
// Deferred updates to the CSet are either discarded (in the normal case),
|
||||
// or processed (if an evacuation failure occurs) at the end
|
||||
// of the collection.
|
||||
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
|
||||
} else {
|
||||
// We either don't care about pushing references that point into the
|
||||
// collection set (i.e. we're not during an evacuation pause) _or_
|
||||
// the reference doesn't point into the collection set. Either way
|
||||
|
||||
@ -210,7 +210,6 @@ public:
|
||||
#endif
|
||||
|
||||
HeapRegion* card_region = _g1h->heap_region_containing(card_start);
|
||||
assert(card_region != NULL, "Yielding cards not in the heap?");
|
||||
_cards++;
|
||||
|
||||
if (!card_region->is_on_dirty_cards_region_list()) {
|
||||
@ -405,7 +404,6 @@ public:
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
assert(r != NULL, "unexpected null");
|
||||
|
||||
// Scan oops in the card looking for references into the collection set
|
||||
// Don't use addr_for(card_ptr + 1) which can ask for
|
||||
@ -567,11 +565,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
HeapWord* start = _ct_bs->addr_for(card_ptr);
|
||||
// And find the region containing it.
|
||||
HeapRegion* r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Again no need to return that this card contains refs that
|
||||
// point into the collection set.
|
||||
return false; // Not in the G1 heap (might be in perm, for example.)
|
||||
}
|
||||
|
||||
// Why do we have to check here whether a card is on a young region,
|
||||
// given that we dirty young regions and, as a result, the
|
||||
@ -624,10 +617,6 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
|
||||
|
||||
start = _ct_bs->addr_for(card_ptr);
|
||||
r = _g1->heap_region_containing(start);
|
||||
if (r == NULL) {
|
||||
// Not in the G1 heap
|
||||
return false;
|
||||
}
|
||||
|
||||
// Checking whether the region we got back from the cache
|
||||
// is young here is inappropriate. The region could have been
|
||||
|
||||
@ -45,26 +45,28 @@ inline void G1RemSet::write_ref(HeapRegion* from, T* p) {
|
||||
template <class T>
|
||||
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, int tid) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
if (obj == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// can't do because of races
|
||||
// assert(obj == NULL || obj->is_oop(), "expected an oop");
|
||||
|
||||
// Do the safe subset of is_oop
|
||||
if (obj != NULL) {
|
||||
#ifdef CHECK_UNHANDLED_OOPS
|
||||
oopDesc* o = obj.obj();
|
||||
oopDesc* o = obj.obj();
|
||||
#else
|
||||
oopDesc* o = obj;
|
||||
oopDesc* o = obj;
|
||||
#endif // CHECK_UNHANDLED_OOPS
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
}
|
||||
assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned");
|
||||
assert(Universe::heap()->is_in_reserved(obj), "must be in heap");
|
||||
#endif // ASSERT
|
||||
|
||||
assert(from == NULL || from->is_in_reserved(p), "p is not in from");
|
||||
|
||||
HeapRegion* to = _g1->heap_region_containing(obj);
|
||||
if (to != NULL && from != to) {
|
||||
if (from != to) {
|
||||
assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
|
||||
to->rem_set()->add_reference(p, tid);
|
||||
}
|
||||
|
||||
@ -797,7 +797,6 @@ bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
|
||||
|
||||
bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
|
||||
HeapRegion* hr = _g1h->heap_region_containing_raw(from);
|
||||
if (hr == NULL) return false;
|
||||
RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
|
||||
// Is this region in the coarse map?
|
||||
if (_coarse_map.at(hr_ind)) return true;
|
||||
|
||||
@ -240,7 +240,6 @@ void HeapRegionSeq::verify_optional() {
|
||||
// Asserts will fire if i is >= _length
|
||||
HeapWord* addr = hr->bottom();
|
||||
guarantee(addr_to_region(addr) == hr, "sanity");
|
||||
guarantee(addr_to_region_unsafe(addr) == hr, "sanity");
|
||||
} else {
|
||||
guarantee(hr->is_empty(), "sanity");
|
||||
guarantee(!hr->isHumongous(), "sanity");
|
||||
|
||||
@ -110,10 +110,6 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
|
||||
// HeapRegion, otherwise return NULL.
|
||||
inline HeapRegion* addr_to_region(HeapWord* addr) const;
|
||||
|
||||
// Return the HeapRegion that corresponds to the given
|
||||
// address. Assume the address is valid.
|
||||
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
|
||||
|
||||
// Return the number of regions that have been committed in the heap.
|
||||
uint length() const { return _committed_length; }
|
||||
|
||||
|
||||
@ -28,21 +28,17 @@
|
||||
#include "gc_implementation/g1/heapRegion.hpp"
|
||||
#include "gc_implementation/g1/heapRegionSeq.hpp"
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
assert(addr < heap_end(),
|
||||
err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, addr, heap_end()));
|
||||
assert(addr >= heap_bottom(),
|
||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
|
||||
|
||||
HeapRegion* hr = _regions.get_by_address(addr);
|
||||
assert(hr != NULL, "invariant");
|
||||
return hr;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
|
||||
if (addr != NULL && addr < heap_end()) {
|
||||
assert(addr >= heap_bottom(),
|
||||
err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
|
||||
return addr_to_region_unsafe(addr);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
inline HeapRegion* HeapRegionSeq::at(uint index) const {
|
||||
assert(index < length(), "pre-condition");
|
||||
HeapRegion* hr = _regions.get_by_index(index);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user