mirror of
https://github.com/openjdk/jdk.git
synced 2026-05-10 21:50:07 +00:00
8290357: Drop HeapRegion::marked_bytes()
Reviewed-by: sangheki, iwalulya
This commit is contained in:
parent
7676be8a99
commit
459193710f
@ -73,28 +73,16 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
|
||||
const bool _should_rebuild_remset;
|
||||
|
||||
size_t _marked_words;
|
||||
size_t _processed_words;
|
||||
|
||||
const size_t ProcessingYieldLimitInWords = G1RebuildRemSetChunkSize / HeapWordSize;
|
||||
|
||||
void reset_marked_words() {
|
||||
_marked_words = 0;
|
||||
}
|
||||
|
||||
void reset_processed_words() {
|
||||
_processed_words = 0;
|
||||
}
|
||||
|
||||
void assert_marked_words(HeapRegion* hr) {
|
||||
assert((_marked_words * HeapWordSize) == hr->marked_bytes(),
|
||||
"Mismatch between marking and re-calculation for region %u, %zu != %zu",
|
||||
hr->hrm_index(), (_marked_words * HeapWordSize), hr->marked_bytes());
|
||||
}
|
||||
|
||||
void add_processed_words(size_t processed) {
|
||||
_processed_words += processed;
|
||||
_marked_words += processed;
|
||||
}
|
||||
|
||||
// Yield if enough has been processed; returns if the concurrent marking cycle
|
||||
@ -228,7 +216,6 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
bool scan_and_scrub_region(HeapRegion* hr, HeapWord* const pb) {
|
||||
assert(should_rebuild_or_scrub(hr), "must be");
|
||||
|
||||
reset_marked_words();
|
||||
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())));
|
||||
|
||||
@ -240,9 +227,6 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
// Scrubbing completed for this region - notify that we are done with it, resetting
|
||||
// pb to bottom.
|
||||
hr->note_end_of_scrubbing();
|
||||
// Assert that the size of marked objects from the marking matches
|
||||
// the size of the objects which we scanned to rebuild remembered sets.
|
||||
assert_marked_words(hr);
|
||||
|
||||
// Rebuild from TAMS (= parsable_bottom) to TARS.
|
||||
if (scan_from_pb_to_tars(hr, pb, _cm->top_at_rebuild_start(hr->hrm_index()))) {
|
||||
@ -270,7 +254,6 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
assert(_bitmap->is_marked(humongous) || pb == hr->bottom(),
|
||||
"Humongous object not live");
|
||||
|
||||
reset_marked_words();
|
||||
log_trace(gc, marking)("Rebuild for humongous region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())));
|
||||
|
||||
@ -282,13 +265,6 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
if (mark_aborted) {
|
||||
log_trace(gc, marking)("Rebuild aborted for region: %u (%s)", hr->hrm_index(), hr->get_short_type_str());
|
||||
return true;
|
||||
} else if (_bitmap->is_marked(humongous) && should_rebuild_or_scrub(hr)) {
|
||||
// Only verify that the marked size matches the rebuilt size if this object was marked
|
||||
// and the object should still be handled. The should_rebuild_or_scrub() state can
|
||||
// change during rebuild for humongous objects that are eagerly reclaimed so we need to
|
||||
// check this.
|
||||
// If the object has not been marked the size from marking will be 0.
|
||||
assert_marked_words(hr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -299,7 +275,6 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
|
||||
_bitmap(_cm->mark_bitmap()),
|
||||
_rebuild_closure(G1CollectedHeap::heap(), worker_id),
|
||||
_should_rebuild_remset(should_rebuild_remset),
|
||||
_marked_words(0),
|
||||
_processed_words(0) { }
|
||||
|
||||
bool do_heap_region(HeapRegion* hr) {
|
||||
|
||||
@ -66,14 +66,12 @@ static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_
|
||||
"total_live_bytes %zu "
|
||||
"selected %s "
|
||||
"(live_bytes %zu "
|
||||
"marked %zu "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
}
|
||||
|
||||
@ -102,7 +100,7 @@ bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes_below_tams) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index());
|
||||
|
||||
@ -114,8 +112,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
size_t between_tams_and_top = (r->top() - r->top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_tams_and_top;
|
||||
size_t live_bytes_above_tams = (r->top() - r->top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes_below_tams + live_bytes_above_tams;
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For old regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
@ -131,7 +129,7 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
|
||||
print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes);
|
||||
print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes_below_tams);
|
||||
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ public:
|
||||
bool update_humongous_before_rebuild(HeapRegion* r, bool is_live);
|
||||
// Update remembered set tracking state before we are going to rebuild remembered
|
||||
// sets. Called at safepoint in the remark pause.
|
||||
bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
|
||||
bool update_before_rebuild(HeapRegion* r, size_t live_bytes_below_tams);
|
||||
// Update remembered set tracking state after rebuild is complete, i.e. the cleanup
|
||||
// pause. Called at safepoint.
|
||||
void update_after_rebuild(HeapRegion* r);
|
||||
|
||||
@ -395,7 +395,7 @@ public:
|
||||
|
||||
// Helper class to keep statistics for the collection set freeing
|
||||
class FreeCSetStats {
|
||||
size_t _before_used_bytes; // Usage in regions successfully evacutate
|
||||
size_t _before_used_bytes; // Usage in regions successfully evacuate
|
||||
size_t _after_used_bytes; // Usage in regions failing evacuation
|
||||
size_t _bytes_allocated_in_old_since_last_gc; // Size of young regions turned into old
|
||||
size_t _failure_used_words; // Live size in failed regions
|
||||
@ -438,7 +438,7 @@ public:
|
||||
}
|
||||
|
||||
void account_failed_region(HeapRegion* r) {
|
||||
size_t used_words = r->marked_bytes() / HeapWordSize;
|
||||
size_t used_words = r->live_bytes() / HeapWordSize;
|
||||
_failure_used_words += used_words;
|
||||
_failure_waste_words += HeapRegion::GrainWords - used_words;
|
||||
_after_used_bytes += r->used();
|
||||
|
||||
@ -237,7 +237,6 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
_top_at_mark_start(NULL),
|
||||
_parsable_bottom(NULL),
|
||||
_garbage_bytes(0),
|
||||
_marked_bytes(0),
|
||||
_young_index_in_cset(-1),
|
||||
_surv_rate_group(NULL), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
|
||||
_node_index(G1NUMA::UnknownNodeIndex)
|
||||
@ -271,9 +270,7 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start) {
|
||||
// We always scrub the region to make sure the entire region is
|
||||
// parsable after the self-forwarding point removal, and update _marked_bytes
|
||||
// at the end.
|
||||
_marked_bytes = 0;
|
||||
// parsable after the self-forwarding point removal.
|
||||
_garbage_bytes = 0;
|
||||
|
||||
if (during_concurrent_start) {
|
||||
@ -291,7 +288,6 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start
|
||||
void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
|
||||
assert(marked_bytes <= used(),
|
||||
"marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
|
||||
_marked_bytes = marked_bytes;
|
||||
_garbage_bytes = used() - marked_bytes;
|
||||
}
|
||||
|
||||
|
||||
@ -246,15 +246,11 @@ private:
|
||||
|
||||
// Amount of dead data in the region.
|
||||
size_t _garbage_bytes;
|
||||
// We use concurrent marking to determine the amount of live data
|
||||
// in each heap region.
|
||||
size_t _marked_bytes; // Bytes known to be live via last completed marking.
|
||||
|
||||
void init_top_at_mark_start() {
|
||||
set_top_at_mark_start(bottom());
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
_marked_bytes = 0;
|
||||
}
|
||||
|
||||
// Data for young region survivor prediction.
|
||||
@ -339,8 +335,6 @@ public:
|
||||
// up once during initialization time.
|
||||
static void setup_heap_region_size(size_t max_heap_size);
|
||||
|
||||
// The number of bytes marked live in the region in the last marking phase.
|
||||
size_t marked_bytes() const { return _marked_bytes; }
|
||||
// An upper bound on the number of live bytes in the region.
|
||||
size_t live_bytes() const {
|
||||
return used() - garbage_bytes();
|
||||
|
||||
@ -204,7 +204,6 @@ inline void HeapRegion::reset_compacted_after_full_gc(HeapWord* new_top) {
|
||||
inline void HeapRegion::reset_skip_compacting_after_full_gc() {
|
||||
assert(!is_free(), "must be");
|
||||
|
||||
_marked_bytes = used();
|
||||
_garbage_bytes = 0;
|
||||
|
||||
set_top_at_mark_start(bottom());
|
||||
@ -317,8 +316,7 @@ inline void HeapRegion::note_start_of_marking() {
|
||||
inline void HeapRegion::note_end_of_marking(size_t marked_bytes) {
|
||||
assert_at_safepoint();
|
||||
|
||||
_marked_bytes = marked_bytes;
|
||||
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - _marked_bytes;
|
||||
_garbage_bytes = byte_size(bottom(), top_at_mark_start()) - marked_bytes;
|
||||
|
||||
if (needs_scrubbing()) {
|
||||
_parsable_bottom = top_at_mark_start();
|
||||
|
||||
@ -601,7 +601,6 @@ class OldRegionsLivenessClosure: public HeapRegionClosure {
|
||||
|
||||
bool do_heap_region(HeapRegion* r) {
|
||||
if (r->is_old()) {
|
||||
size_t prev_live = r->marked_bytes();
|
||||
size_t live = r->live_bytes();
|
||||
size_t size = r->used();
|
||||
size_t reg_size = HeapRegion::GrainBytes;
|
||||
@ -609,9 +608,9 @@ class OldRegionsLivenessClosure: public HeapRegionClosure {
|
||||
_total_memory += size;
|
||||
++_total_count;
|
||||
if (size == reg_size) {
|
||||
// we don't include non-full regions since they are unlikely included in mixed gc
|
||||
// for testing purposes it's enough to have lowest estimation of total memory that is expected to be freed
|
||||
_total_memory_to_free += size - prev_live;
|
||||
// We don't include non-full regions since they are unlikely included in mixed gc
|
||||
// for testing purposes it's enough to have lowest estimation of total memory that is expected to be freed
|
||||
_total_memory_to_free += size - live;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user