mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-08 09:35:16 +00:00
Merge
This commit is contained in:
commit
f74e57df15
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "gc/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1Analytics.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
#include "gc/g1/g1MMUTracker.hpp"
|
||||
@ -79,10 +80,11 @@ public:
|
||||
|
||||
// Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU.
|
||||
void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) {
|
||||
const G1Analytics* analytics = g1_policy->analytics();
|
||||
if (g1_policy->adaptive_young_list_length()) {
|
||||
double now = os::elapsedTime();
|
||||
double prediction_ms = remark ? g1_policy->predict_remark_time_ms()
|
||||
: g1_policy->predict_cleanup_time_ms();
|
||||
double prediction_ms = remark ? analytics->predict_remark_time_ms()
|
||||
: analytics->predict_cleanup_time_ms();
|
||||
G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
|
||||
jlong sleep_time_ms = mmu_tracker->when_ms(now, prediction_ms);
|
||||
os::sleep(this, sleep_time_ms, false);
|
||||
|
||||
329
hotspot/src/share/vm/gc/g1/g1Analytics.cpp
Normal file
329
hotspot/src/share/vm/gc/g1/g1Analytics.cpp
Normal file
@ -0,0 +1,329 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1Analytics.hpp"
|
||||
#include "gc/g1/g1Predictions.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
// Different defaults for different number of GC threads
|
||||
// They were chosen by running GCOld and SPECjbb on debris with different
|
||||
// numbers of GC threads and choosing them based on the results
|
||||
|
||||
// all the same
|
||||
static double rs_length_diff_defaults[] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||
};
|
||||
|
||||
static double cost_per_card_ms_defaults[] = {
|
||||
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
|
||||
};
|
||||
|
||||
// all the same
|
||||
static double young_cards_per_entry_ratio_defaults[] = {
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
||||
};
|
||||
|
||||
static double cost_per_entry_ms_defaults[] = {
|
||||
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
|
||||
};
|
||||
|
||||
static double cost_per_byte_ms_defaults[] = {
|
||||
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
|
||||
};
|
||||
|
||||
// these should be pretty consistent
|
||||
static double constant_other_time_ms_defaults[] = {
|
||||
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
|
||||
};
|
||||
|
||||
|
||||
static double young_other_cost_per_region_ms_defaults[] = {
|
||||
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
|
||||
};
|
||||
|
||||
static double non_young_other_cost_per_region_ms_defaults[] = {
|
||||
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
|
||||
};
|
||||
|
||||
G1Analytics::G1Analytics(const G1Predictions* predictor) :
|
||||
_predictor(predictor),
|
||||
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_prev_collection_pause_end_ms(0.0),
|
||||
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)) {
|
||||
|
||||
// Seed sequences with initial values.
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
|
||||
int index = MIN2(ParallelGCThreads - 1, 7u);
|
||||
|
||||
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
|
||||
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
|
||||
_cost_scan_hcc_seq->add(0.0);
|
||||
_young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]);
|
||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
|
||||
_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
|
||||
_young_other_cost_per_region_ms_seq->add(young_other_cost_per_region_ms_defaults[index]);
|
||||
_non_young_other_cost_per_region_ms_seq->add(non_young_other_cost_per_region_ms_defaults[index]);
|
||||
|
||||
// start conservatively (around 50ms is about right)
|
||||
_concurrent_mark_remark_times_ms->add(0.05);
|
||||
_concurrent_mark_cleanup_times_ms->add(0.20);
|
||||
}
|
||||
|
||||
double G1Analytics::get_new_prediction(TruncatedSeq const* seq) const {
|
||||
return _predictor->get_new_prediction(seq);
|
||||
}
|
||||
|
||||
size_t G1Analytics::get_new_size_prediction(TruncatedSeq const* seq) const {
|
||||
return (size_t)get_new_prediction(seq);
|
||||
}
|
||||
|
||||
int G1Analytics::num_alloc_rate_ms() const {
|
||||
return _alloc_rate_ms_seq->num();
|
||||
}
|
||||
|
||||
void G1Analytics::report_concurrent_mark_remark_times_ms(double ms) {
|
||||
_concurrent_mark_remark_times_ms->add(ms);
|
||||
}
|
||||
|
||||
void G1Analytics::report_alloc_rate_ms(double alloc_rate) {
|
||||
_alloc_rate_ms_seq->add(alloc_rate);
|
||||
}
|
||||
|
||||
void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) {
|
||||
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
|
||||
if (_recent_avg_pause_time_ratio < 0.0 ||
|
||||
(_recent_avg_pause_time_ratio - 1.0 > 0.0)) {
|
||||
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
|
||||
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
|
||||
if (_recent_avg_pause_time_ratio < 0.0) {
|
||||
_recent_avg_pause_time_ratio = 0.0;
|
||||
} else {
|
||||
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
|
||||
_recent_avg_pause_time_ratio = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the ratio of just this last pause time to the entire time range stored
|
||||
// in the vectors. Comparing this pause to the entire range, rather than only the
|
||||
// most recent interval, has the effect of smoothing over a possible transient 'burst'
|
||||
// of more frequent pauses that don't really reflect a change in heap occupancy.
|
||||
// This reduces the likelihood of a needless heap expansion being triggered.
|
||||
_last_pause_time_ratio =
|
||||
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
|
||||
}
|
||||
|
||||
void G1Analytics::report_cost_per_card_ms(double cost_per_card_ms) {
|
||||
_cost_per_card_ms_seq->add(cost_per_card_ms);
|
||||
}
|
||||
|
||||
void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) {
|
||||
_cost_scan_hcc_seq->add(cost_scan_hcc);
|
||||
}
|
||||
|
||||
void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
|
||||
if (last_gc_was_young) {
|
||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
} else {
|
||||
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
|
||||
if (last_gc_was_young) {
|
||||
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
} else {
|
||||
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
}
|
||||
}
|
||||
|
||||
void G1Analytics::report_rs_length_diff(double rs_length_diff) {
|
||||
_rs_length_diff_seq->add(rs_length_diff);
|
||||
}
|
||||
|
||||
void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
|
||||
if (in_marking_window) {
|
||||
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
|
||||
} else {
|
||||
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void G1Analytics::report_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
|
||||
_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
|
||||
}
|
||||
|
||||
void G1Analytics::report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms) {
|
||||
_non_young_other_cost_per_region_ms_seq->add(other_cost_per_region_ms);
|
||||
}
|
||||
|
||||
void G1Analytics::report_constant_other_time_ms(double constant_other_time_ms) {
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms);
|
||||
}
|
||||
|
||||
void G1Analytics::report_pending_cards(double pending_cards) {
|
||||
_pending_cards_seq->add(pending_cards);
|
||||
}
|
||||
|
||||
void G1Analytics::report_rs_lengths(double rs_lengths) {
|
||||
_rs_lengths_seq->add(rs_lengths);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_rs_length_diff() const {
|
||||
return get_new_size_prediction(_rs_length_diff_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_alloc_rate_ms() const {
|
||||
return get_new_prediction(_alloc_rate_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_cost_per_card_ms() const {
|
||||
return get_new_prediction(_cost_per_card_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_scan_hcc_ms() const {
|
||||
return get_new_prediction(_cost_scan_hcc_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const {
|
||||
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
|
||||
}
|
||||
|
||||
double G1Analytics::predict_young_cards_per_entry_ratio() const {
|
||||
return get_new_prediction(_young_cards_per_entry_ratio_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_mixed_cards_per_entry_ratio() const {
|
||||
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
|
||||
return predict_young_cards_per_entry_ratio();
|
||||
} else {
|
||||
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
|
||||
if (gcs_are_young) {
|
||||
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
|
||||
} else {
|
||||
return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
|
||||
if (gcs_are_young) {
|
||||
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||
} else {
|
||||
return predict_mixed_rs_scan_time_ms(card_num);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_mixed_rs_scan_time_ms(size_t card_num) const {
|
||||
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
|
||||
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||
} else {
|
||||
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
|
||||
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
|
||||
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const {
|
||||
if (during_concurrent_mark) {
|
||||
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1Analytics::predict_constant_other_time_ms() const {
|
||||
return get_new_prediction(_constant_other_time_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_young_other_time_ms(size_t young_num) const {
|
||||
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_non_young_other_time_ms(size_t non_young_num) const {
|
||||
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_remark_time_ms() const {
|
||||
return get_new_prediction(_concurrent_mark_remark_times_ms);
|
||||
}
|
||||
|
||||
double G1Analytics::predict_cleanup_time_ms() const {
|
||||
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_rs_lengths() const {
|
||||
return get_new_size_prediction(_rs_lengths_seq);
|
||||
}
|
||||
|
||||
size_t G1Analytics::predict_pending_cards() const {
|
||||
return get_new_size_prediction(_pending_cards_seq);
|
||||
}
|
||||
|
||||
double G1Analytics::last_known_gc_end_time_sec() const {
|
||||
return _recent_prev_end_times_for_all_gcs_sec->oldest();
|
||||
}
|
||||
|
||||
void G1Analytics::update_recent_gc_times(double end_time_sec,
|
||||
double pause_time_ms) {
|
||||
_recent_gc_times_ms->add(pause_time_ms);
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
|
||||
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
|
||||
}
|
||||
|
||||
void G1Analytics::report_concurrent_mark_cleanup_times_ms(double ms) {
|
||||
_concurrent_mark_cleanup_times_ms->add(ms);
|
||||
}
|
||||
|
||||
155
hotspot/src/share/vm/gc/g1/g1Analytics.hpp
Normal file
155
hotspot/src/share/vm/gc/g1/g1Analytics.hpp
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
|
||||
#define SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class TruncatedSeq;
|
||||
class G1Predictions;
|
||||
|
||||
class G1Analytics: public CHeapObj<mtGC> {
|
||||
const static int TruncatedSeqLength = 10;
|
||||
const static int NumPrevPausesForHeuristics = 10;
|
||||
const G1Predictions* _predictor;
|
||||
|
||||
// These exclude marking times.
|
||||
TruncatedSeq* _recent_gc_times_ms;
|
||||
|
||||
TruncatedSeq* _concurrent_mark_remark_times_ms;
|
||||
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
|
||||
|
||||
TruncatedSeq* _alloc_rate_ms_seq;
|
||||
double _prev_collection_pause_end_ms;
|
||||
|
||||
TruncatedSeq* _rs_length_diff_seq;
|
||||
TruncatedSeq* _cost_per_card_ms_seq;
|
||||
TruncatedSeq* _cost_scan_hcc_seq;
|
||||
TruncatedSeq* _young_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _cost_per_entry_ms_seq;
|
||||
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
|
||||
TruncatedSeq* _cost_per_byte_ms_seq;
|
||||
TruncatedSeq* _constant_other_time_ms_seq;
|
||||
TruncatedSeq* _young_other_cost_per_region_ms_seq;
|
||||
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
|
||||
|
||||
TruncatedSeq* _pending_cards_seq;
|
||||
TruncatedSeq* _rs_lengths_seq;
|
||||
|
||||
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
|
||||
|
||||
// Statistics kept per GC stoppage, pause or full.
|
||||
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
|
||||
|
||||
// The ratio of gc time to elapsed time, computed over recent pauses,
|
||||
// and the ratio for just the last pause.
|
||||
double _recent_avg_pause_time_ratio;
|
||||
double _last_pause_time_ratio;
|
||||
|
||||
double get_new_prediction(TruncatedSeq const* seq) const;
|
||||
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
|
||||
|
||||
public:
|
||||
G1Analytics(const G1Predictions* predictor);
|
||||
|
||||
double prev_collection_pause_end_ms() const {
|
||||
return _prev_collection_pause_end_ms;
|
||||
}
|
||||
|
||||
double recent_avg_pause_time_ratio() const {
|
||||
return _recent_avg_pause_time_ratio;
|
||||
}
|
||||
|
||||
double last_pause_time_ratio() const {
|
||||
return _last_pause_time_ratio;
|
||||
}
|
||||
|
||||
void append_prev_collection_pause_end_ms(double ms) {
|
||||
_prev_collection_pause_end_ms += ms;
|
||||
}
|
||||
|
||||
void report_concurrent_mark_remark_times_ms(double ms);
|
||||
void report_concurrent_mark_cleanup_times_ms(double ms);
|
||||
void report_alloc_rate_ms(double alloc_rate);
|
||||
void report_cost_per_card_ms(double cost_per_card_ms);
|
||||
void report_cost_scan_hcc(double cost_scan_hcc);
|
||||
void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
|
||||
void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
|
||||
void report_rs_length_diff(double rs_length_diff);
|
||||
void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
|
||||
void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
|
||||
void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
|
||||
void report_constant_other_time_ms(double constant_other_time_ms);
|
||||
void report_pending_cards(double pending_cards);
|
||||
void report_rs_lengths(double rs_lengths);
|
||||
|
||||
size_t predict_rs_length_diff() const;
|
||||
|
||||
double predict_alloc_rate_ms() const;
|
||||
int num_alloc_rate_ms() const;
|
||||
|
||||
double predict_cost_per_card_ms() const;
|
||||
|
||||
double predict_scan_hcc_ms() const;
|
||||
|
||||
double predict_rs_update_time_ms(size_t pending_cards) const;
|
||||
|
||||
double predict_young_cards_per_entry_ratio() const;
|
||||
|
||||
double predict_mixed_cards_per_entry_ratio() const;
|
||||
|
||||
size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
|
||||
|
||||
double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
|
||||
|
||||
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
|
||||
|
||||
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
|
||||
|
||||
double predict_object_copy_time_ms(size_t bytes_to_copy, bool during_concurrent_mark) const;
|
||||
|
||||
double predict_constant_other_time_ms() const;
|
||||
|
||||
double predict_young_other_time_ms(size_t young_num) const;
|
||||
|
||||
double predict_non_young_other_time_ms(size_t non_young_num) const;
|
||||
|
||||
double predict_remark_time_ms() const;
|
||||
|
||||
double predict_cleanup_time_ms() const;
|
||||
|
||||
size_t predict_rs_lengths() const;
|
||||
size_t predict_pending_cards() const;
|
||||
|
||||
// Add a new GC of the given duration and end time to the record.
|
||||
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
|
||||
void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
|
||||
|
||||
double last_known_gc_end_time_sec() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1MEASUREMENTS_HPP
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/concurrentG1Refine.hpp"
|
||||
#include "gc/g1/concurrentMarkThread.inline.hpp"
|
||||
#include "gc/g1/g1Analytics.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1CollectorPolicy.hpp"
|
||||
@ -41,85 +42,13 @@
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
|
||||
// Different defaults for different number of GC threads
|
||||
// They were chosen by running GCOld and SPECjbb on debris with different
|
||||
// numbers of GC threads and choosing them based on the results
|
||||
|
||||
// all the same
|
||||
static double rs_length_diff_defaults[] = {
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||
};
|
||||
|
||||
static double cost_per_card_ms_defaults[] = {
|
||||
0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
|
||||
};
|
||||
|
||||
// all the same
|
||||
static double young_cards_per_entry_ratio_defaults[] = {
|
||||
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
|
||||
};
|
||||
|
||||
static double cost_per_entry_ms_defaults[] = {
|
||||
0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
|
||||
};
|
||||
|
||||
static double cost_per_byte_ms_defaults[] = {
|
||||
0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
|
||||
};
|
||||
|
||||
// these should be pretty consistent
|
||||
static double constant_other_time_ms_defaults[] = {
|
||||
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
|
||||
};
|
||||
|
||||
|
||||
static double young_other_cost_per_region_ms_defaults[] = {
|
||||
0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
|
||||
};
|
||||
|
||||
static double non_young_other_cost_per_region_ms_defaults[] = {
|
||||
1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
|
||||
};
|
||||
|
||||
G1CollectorPolicy::G1CollectorPolicy() :
|
||||
_predictor(G1ConfidencePercent / 100.0),
|
||||
|
||||
_recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
|
||||
_concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
_concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
|
||||
_alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_prev_collection_pause_end_ms(0.0),
|
||||
_rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_non_young_other_cost_per_region_ms_seq(
|
||||
new TruncatedSeq(TruncatedSeqLength)),
|
||||
|
||||
_pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
_rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
|
||||
|
||||
_analytics(new G1Analytics(&_predictor)),
|
||||
_pause_time_target_ms((double) MaxGCPauseMillis),
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec(
|
||||
new TruncatedSeq(NumPrevPausesForHeuristics)),
|
||||
|
||||
_recent_avg_pause_time_ratio(0.0),
|
||||
_rs_lengths_prediction(0),
|
||||
_max_survivor_regions(0),
|
||||
|
||||
// add here any more surv rate groups
|
||||
_survivors_age_table(true),
|
||||
|
||||
_gc_overhead_perc(0.0),
|
||||
|
||||
_bytes_allocated_in_old_since_last_gc(0),
|
||||
@ -147,27 +76,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
|
||||
HeapRegionRemSet::setup_remset_size();
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
clear_ratio_check_data();
|
||||
|
||||
_phase_times = new G1GCPhaseTimes(ParallelGCThreads);
|
||||
|
||||
int index = MIN2(ParallelGCThreads - 1, 7u);
|
||||
|
||||
_rs_length_diff_seq->add(rs_length_diff_defaults[index]);
|
||||
_cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
|
||||
_cost_scan_hcc_seq->add(0.0);
|
||||
_young_cards_per_entry_ratio_seq->add(
|
||||
young_cards_per_entry_ratio_defaults[index]);
|
||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
|
||||
_cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
|
||||
_young_other_cost_per_region_ms_seq->add(
|
||||
young_other_cost_per_region_ms_defaults[index]);
|
||||
_non_young_other_cost_per_region_ms_seq->add(
|
||||
non_young_other_cost_per_region_ms_defaults[index]);
|
||||
|
||||
// Below, we might need to calculate the pause time target based on
|
||||
// the pause interval. When we do so we are going to give G1 maximum
|
||||
// flexibility and allow it to do pauses when it needs to. So, we'll
|
||||
@ -209,9 +121,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
double time_slice = (double) GCPauseIntervalMillis / 1000.0;
|
||||
_mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
|
||||
|
||||
// start conservatively (around 50ms is about right)
|
||||
_concurrent_mark_remark_times_ms->add(0.05);
|
||||
_concurrent_mark_cleanup_times_ms->add(0.20);
|
||||
_tenuring_threshold = MaxTenuringThreshold;
|
||||
|
||||
assert(GCTimeRatio > 0,
|
||||
@ -232,14 +141,6 @@ G1CollectorPolicy::~G1CollectorPolicy() {
|
||||
delete _ihop_control;
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
|
||||
return _predictor.get_new_prediction(seq);
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::get_new_size_prediction(TruncatedSeq const* seq) const {
|
||||
return (size_t)get_new_prediction(seq);
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::initialize_alignments() {
|
||||
_space_alignment = HeapRegion::GrainBytes;
|
||||
size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
|
||||
@ -313,8 +214,9 @@ bool G1CollectorPolicy::predict_will_fit(uint young_length,
|
||||
double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
|
||||
size_t bytes_to_copy =
|
||||
(size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
|
||||
double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
|
||||
double young_other_time_ms = predict_young_other_time_ms(young_length);
|
||||
double copy_time_ms = _analytics->predict_object_copy_time_ms(bytes_to_copy,
|
||||
collector_state()->during_concurrent_mark());
|
||||
double young_other_time_ms = _analytics->predict_young_other_time_ms(young_length);
|
||||
double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
|
||||
if (pause_time_ms > target_pause_time_ms) {
|
||||
// end condition 2: prediction is over the target pause time
|
||||
@ -358,10 +260,10 @@ uint G1CollectorPolicy::calculate_young_list_desired_min_length(
|
||||
uint base_min_length) const {
|
||||
uint desired_min_length = 0;
|
||||
if (adaptive_young_list_length()) {
|
||||
if (_alloc_rate_ms_seq->num() > 3) {
|
||||
if (_analytics->num_alloc_rate_ms() > 3) {
|
||||
double now_sec = os::elapsedTime();
|
||||
double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
|
||||
double alloc_rate_ms = predict_alloc_rate_ms();
|
||||
double alloc_rate_ms = _analytics->predict_alloc_rate_ms();
|
||||
desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
|
||||
} else {
|
||||
// otherwise we don't have enough info to make the prediction
|
||||
@ -380,7 +282,7 @@ uint G1CollectorPolicy::calculate_young_list_desired_max_length() const {
|
||||
}
|
||||
|
||||
uint G1CollectorPolicy::update_young_list_max_and_target_length() {
|
||||
return update_young_list_max_and_target_length(predict_rs_lengths());
|
||||
return update_young_list_max_and_target_length(_analytics->predict_rs_lengths());
|
||||
}
|
||||
|
||||
uint G1CollectorPolicy::update_young_list_max_and_target_length(size_t rs_lengths) {
|
||||
@ -485,9 +387,9 @@ G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
|
||||
|
||||
double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||
double survivor_regions_evac_time = predict_survivor_regions_evac_time();
|
||||
size_t pending_cards = get_new_size_prediction(_pending_cards_seq);
|
||||
size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
|
||||
size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
|
||||
size_t pending_cards = _analytics->predict_pending_cards();
|
||||
size_t adj_rs_lengths = rs_lengths + _analytics->predict_rs_length_diff();
|
||||
size_t scanned_cards = _analytics->predict_card_num(adj_rs_lengths, /* gcs_are_young */ true);
|
||||
double base_time_ms =
|
||||
predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
|
||||
survivor_regions_evac_time;
|
||||
@ -587,7 +489,7 @@ void G1CollectorPolicy::revise_young_list_target_length_if_necessary(size_t rs_l
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::update_rs_lengths_prediction() {
|
||||
update_rs_lengths_prediction(predict_rs_lengths());
|
||||
update_rs_lengths_prediction(_analytics->predict_rs_lengths());
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::update_rs_lengths_prediction(size_t prediction) {
|
||||
@ -655,7 +557,7 @@ void G1CollectorPolicy::record_full_collection_end() {
|
||||
double full_gc_time_sec = end_sec - _full_collection_start_sec;
|
||||
double full_gc_time_ms = full_gc_time_sec * 1000.0;
|
||||
|
||||
update_recent_gc_times(end_sec, full_gc_time_ms);
|
||||
_analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
|
||||
|
||||
collector_state()->set_full_collection(false);
|
||||
|
||||
@ -723,8 +625,8 @@ void G1CollectorPolicy::record_concurrent_mark_remark_start() {
|
||||
void G1CollectorPolicy::record_concurrent_mark_remark_end() {
|
||||
double end_time_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
|
||||
_concurrent_mark_remark_times_ms->add(elapsed_time_ms);
|
||||
_prev_collection_pause_end_ms += elapsed_time_ms;
|
||||
_analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
|
||||
_analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
|
||||
|
||||
record_pause(Remark, _mark_remark_start_sec, end_time_sec);
|
||||
}
|
||||
@ -823,7 +725,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
maybe_start_marking();
|
||||
}
|
||||
|
||||
double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
|
||||
double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
|
||||
if (app_time_ms < MIN_TIMER_GRANULARITY) {
|
||||
// This usually happens due to the timer not having the required
|
||||
// granularity. Some Linuxes are the usual culprits.
|
||||
@ -842,31 +744,12 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
// place we can safely ignore them here.
|
||||
uint regions_allocated = _collection_set->eden_region_length();
|
||||
double alloc_rate_ms = (double) regions_allocated / app_time_ms;
|
||||
_alloc_rate_ms_seq->add(alloc_rate_ms);
|
||||
_analytics->report_alloc_rate_ms(alloc_rate_ms);
|
||||
|
||||
double interval_ms =
|
||||
(end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
|
||||
update_recent_gc_times(end_time_sec, pause_time_ms);
|
||||
_recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
|
||||
if (recent_avg_pause_time_ratio() < 0.0 ||
|
||||
(recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
|
||||
// Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
|
||||
// CR 6902692 by redoing the manner in which the ratio is incrementally computed.
|
||||
if (_recent_avg_pause_time_ratio < 0.0) {
|
||||
_recent_avg_pause_time_ratio = 0.0;
|
||||
} else {
|
||||
assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
|
||||
_recent_avg_pause_time_ratio = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the ratio of just this last pause time to the entire time range stored
|
||||
// in the vectors. Comparing this pause to the entire range, rather than only the
|
||||
// most recent interval, has the effect of smoothing over a possible transient 'burst'
|
||||
// of more frequent pauses that don't really reflect a change in heap occupancy.
|
||||
// This reduces the likelihood of a needless heap expansion being triggered.
|
||||
_last_pause_time_ratio =
|
||||
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
|
||||
(end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
|
||||
_analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
|
||||
_analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
|
||||
}
|
||||
|
||||
bool new_in_marking_window = collector_state()->in_marking_window();
|
||||
@ -912,28 +795,20 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
double cost_per_card_ms = 0.0;
|
||||
if (_pending_cards > 0) {
|
||||
cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
|
||||
_cost_per_card_ms_seq->add(cost_per_card_ms);
|
||||
_analytics->report_cost_per_card_ms(cost_per_card_ms);
|
||||
}
|
||||
_cost_scan_hcc_seq->add(scan_hcc_time_ms);
|
||||
_analytics->report_cost_scan_hcc(scan_hcc_time_ms);
|
||||
|
||||
double cost_per_entry_ms = 0.0;
|
||||
if (cards_scanned > 10) {
|
||||
cost_per_entry_ms = average_time_ms(G1GCPhaseTimes::ScanRS) / (double) cards_scanned;
|
||||
if (collector_state()->last_gc_was_young()) {
|
||||
_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
} else {
|
||||
_mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
|
||||
}
|
||||
_analytics->report_cost_per_entry_ms(cost_per_entry_ms, collector_state()->last_gc_was_young());
|
||||
}
|
||||
|
||||
if (_max_rs_lengths > 0) {
|
||||
double cards_per_entry_ratio =
|
||||
(double) cards_scanned / (double) _max_rs_lengths;
|
||||
if (collector_state()->last_gc_was_young()) {
|
||||
_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
} else {
|
||||
_mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
|
||||
}
|
||||
_analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, collector_state()->last_gc_was_young());
|
||||
}
|
||||
|
||||
// This is defensive. For a while _max_rs_lengths could get
|
||||
@ -954,7 +829,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
if (_max_rs_lengths > recorded_rs_lengths) {
|
||||
rs_length_diff = _max_rs_lengths - recorded_rs_lengths;
|
||||
}
|
||||
_rs_length_diff_seq->add((double) rs_length_diff);
|
||||
_analytics->report_rs_length_diff((double) rs_length_diff);
|
||||
|
||||
size_t freed_bytes = heap_used_bytes_before_gc - cur_used_bytes;
|
||||
size_t copied_bytes = _collection_set->bytes_used_before() - freed_bytes;
|
||||
@ -962,27 +837,23 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
|
||||
if (copied_bytes > 0) {
|
||||
cost_per_byte_ms = average_time_ms(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes;
|
||||
if (collector_state()->in_marking_window()) {
|
||||
_cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
|
||||
} else {
|
||||
_cost_per_byte_ms_seq->add(cost_per_byte_ms);
|
||||
}
|
||||
_analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->in_marking_window());
|
||||
}
|
||||
|
||||
if (_collection_set->young_region_length() > 0) {
|
||||
_young_other_cost_per_region_ms_seq->add(young_other_time_ms() /
|
||||
_collection_set->young_region_length());
|
||||
_analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
|
||||
_collection_set->young_region_length());
|
||||
}
|
||||
|
||||
if (_collection_set->old_region_length() > 0) {
|
||||
_non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms() /
|
||||
_collection_set->old_region_length());
|
||||
_analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
|
||||
_collection_set->old_region_length());
|
||||
}
|
||||
|
||||
_constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
|
||||
_analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
|
||||
|
||||
_pending_cards_seq->add((double) _pending_cards);
|
||||
_rs_lengths_seq->add((double) _max_rs_lengths);
|
||||
_analytics->report_pending_cards((double) _pending_cards);
|
||||
_analytics->report_rs_lengths((double) _max_rs_lengths);
|
||||
}
|
||||
|
||||
collector_state()->set_in_marking_window(new_in_marking_window);
|
||||
@ -1119,106 +990,10 @@ void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
|
||||
dcqs.notify_if_necessary();
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::predict_rs_lengths() const {
|
||||
return get_new_size_prediction(_rs_lengths_seq);
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::predict_rs_length_diff() const {
|
||||
return get_new_size_prediction(_rs_length_diff_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_alloc_rate_ms() const {
|
||||
return get_new_prediction(_alloc_rate_ms_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_cost_per_card_ms() const {
|
||||
return get_new_prediction(_cost_per_card_ms_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_scan_hcc_ms() const {
|
||||
return get_new_prediction(_cost_scan_hcc_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
|
||||
return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
|
||||
return get_new_prediction(_young_cards_per_entry_ratio_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
|
||||
if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
|
||||
return predict_young_cards_per_entry_ratio();
|
||||
} else {
|
||||
return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
|
||||
return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
|
||||
return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
|
||||
if (collector_state()->gcs_are_young()) {
|
||||
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||
} else {
|
||||
return predict_mixed_rs_scan_time_ms(card_num);
|
||||
}
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
|
||||
if (_mixed_cost_per_entry_ms_seq->num() < 3) {
|
||||
return card_num * get_new_prediction(_cost_per_entry_ms_seq);
|
||||
} else {
|
||||
return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
|
||||
if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
|
||||
return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
|
||||
if (collector_state()->during_concurrent_mark()) {
|
||||
return predict_object_copy_time_ms_during_cm(bytes_to_copy);
|
||||
} else {
|
||||
return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
|
||||
}
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_constant_other_time_ms() const {
|
||||
return get_new_prediction(_constant_other_time_ms_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
|
||||
return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
|
||||
return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_remark_time_ms() const {
|
||||
return get_new_prediction(_concurrent_mark_remark_times_ms);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_cleanup_time_ms() const {
|
||||
return get_new_prediction(_concurrent_mark_cleanup_times_ms);
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
|
||||
TruncatedSeq* seq = surv_rate_group->get_seq(age);
|
||||
guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
|
||||
double pred = get_new_prediction(seq);
|
||||
double pred = _predictor.get_new_prediction(seq);
|
||||
if (pred > 1.0) {
|
||||
pred = 1.0;
|
||||
}
|
||||
@ -1236,19 +1011,14 @@ double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
|
||||
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
size_t scanned_cards) const {
|
||||
return
|
||||
predict_rs_update_time_ms(pending_cards) +
|
||||
predict_rs_scan_time_ms(scanned_cards) +
|
||||
predict_constant_other_time_ms();
|
||||
_analytics->predict_rs_update_time_ms(pending_cards) +
|
||||
_analytics->predict_rs_scan_time_ms(scanned_cards, collector_state()->gcs_are_young()) +
|
||||
_analytics->predict_constant_other_time_ms();
|
||||
}
|
||||
|
||||
double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
|
||||
size_t rs_length = predict_rs_lengths() + predict_rs_length_diff();
|
||||
size_t card_num;
|
||||
if (collector_state()->gcs_are_young()) {
|
||||
card_num = predict_young_card_num(rs_length);
|
||||
} else {
|
||||
card_num = predict_non_young_card_num(rs_length);
|
||||
}
|
||||
size_t rs_length = _analytics->predict_rs_lengths() + _analytics->predict_rs_length_diff();
|
||||
size_t card_num = _analytics->predict_card_num(rs_length, collector_state()->gcs_are_young());
|
||||
return predict_base_elapsed_time_ms(pending_cards, card_num);
|
||||
}
|
||||
|
||||
@ -1268,38 +1038,25 @@ size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) const {
|
||||
double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
|
||||
bool for_young_gc) const {
|
||||
size_t rs_length = hr->rem_set()->occupied();
|
||||
size_t card_num;
|
||||
|
||||
// Predicting the number of cards is based on which type of GC
|
||||
// we're predicting for.
|
||||
if (for_young_gc) {
|
||||
card_num = predict_young_card_num(rs_length);
|
||||
} else {
|
||||
card_num = predict_non_young_card_num(rs_length);
|
||||
}
|
||||
size_t card_num = _analytics->predict_card_num(rs_length, for_young_gc);
|
||||
size_t bytes_to_copy = predict_bytes_to_copy(hr);
|
||||
|
||||
double region_elapsed_time_ms =
|
||||
predict_rs_scan_time_ms(card_num) +
|
||||
predict_object_copy_time_ms(bytes_to_copy);
|
||||
_analytics->predict_rs_scan_time_ms(card_num, collector_state()->gcs_are_young()) +
|
||||
_analytics->predict_object_copy_time_ms(bytes_to_copy, collector_state()->during_concurrent_mark());
|
||||
|
||||
// The prediction of the "other" time for this region is based
|
||||
// upon the region type and NOT the GC type.
|
||||
if (hr->is_young()) {
|
||||
region_elapsed_time_ms += predict_young_other_time_ms(1);
|
||||
region_elapsed_time_ms += _analytics->predict_young_other_time_ms(1);
|
||||
} else {
|
||||
region_elapsed_time_ms += predict_non_young_other_time_ms(1);
|
||||
region_elapsed_time_ms += _analytics->predict_non_young_other_time_ms(1);
|
||||
}
|
||||
return region_elapsed_time_ms;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
|
||||
double elapsed_ms) {
|
||||
_recent_gc_times_ms->add(elapsed_ms);
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
|
||||
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::clear_ratio_check_data() {
|
||||
_ratio_over_threshold_count = 0;
|
||||
_ratio_over_threshold_sum = 0.0;
|
||||
@ -1307,8 +1064,8 @@ void G1CollectorPolicy::clear_ratio_check_data() {
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::expansion_amount() {
|
||||
double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
|
||||
double last_gc_overhead = _last_pause_time_ratio * 100.0;
|
||||
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
|
||||
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
|
||||
double threshold = _gc_overhead_perc;
|
||||
size_t expand_bytes = 0;
|
||||
|
||||
@ -1593,8 +1350,8 @@ void G1CollectorPolicy::record_concurrent_mark_cleanup_end() {
|
||||
|
||||
double end_sec = os::elapsedTime();
|
||||
double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
|
||||
_concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
|
||||
_prev_collection_pause_end_ms += elapsed_time_ms;
|
||||
_analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
|
||||
_analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
|
||||
|
||||
record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
|
||||
}
|
||||
@ -1732,4 +1489,3 @@ void G1CollectorPolicy::finalize_collection_set(double target_pause_time_ms) {
|
||||
double time_remaining_ms = _collection_set->finalize_young_part(target_pause_time_ms);
|
||||
_collection_set->finalize_old_part(time_remaining_ms);
|
||||
}
|
||||
|
||||
|
||||
@ -43,6 +43,7 @@ class HeapRegion;
|
||||
class G1CollectionSet;
|
||||
class CollectionSetChooser;
|
||||
class G1IHOPControl;
|
||||
class G1Analytics;
|
||||
class G1YoungGenSizer;
|
||||
|
||||
class G1CollectorPolicy: public CollectorPolicy {
|
||||
@ -57,9 +58,7 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
void report_ihop_statistics();
|
||||
|
||||
G1Predictions _predictor;
|
||||
|
||||
double get_new_prediction(TruncatedSeq const* seq) const;
|
||||
size_t get_new_size_prediction(TruncatedSeq const* seq) const;
|
||||
G1Analytics* _analytics;
|
||||
|
||||
G1MMUTracker* _mmu_tracker;
|
||||
|
||||
@ -68,12 +67,6 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
|
||||
double _full_collection_start_sec;
|
||||
|
||||
// These exclude marking times.
|
||||
TruncatedSeq* _recent_gc_times_ms;
|
||||
|
||||
TruncatedSeq* _concurrent_mark_remark_times_ms;
|
||||
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
|
||||
|
||||
// Ratio check data for determining if heap growth is necessary.
|
||||
uint _ratio_over_threshold_count;
|
||||
double _ratio_over_threshold_sum;
|
||||
@ -88,7 +81,6 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
|
||||
SurvRateGroup* _short_lived_surv_rate_group;
|
||||
SurvRateGroup* _survivor_surv_rate_group;
|
||||
// add here any more surv rate groups
|
||||
|
||||
double _gc_overhead_perc;
|
||||
|
||||
@ -96,34 +88,12 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
uint _reserve_regions;
|
||||
|
||||
enum PredictionConstants {
|
||||
TruncatedSeqLength = 10,
|
||||
NumPrevPausesForHeuristics = 10,
|
||||
// MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
|
||||
// representing the minimum number of pause time ratios that exceed
|
||||
// GCTimeRatio before a heap expansion will be triggered.
|
||||
MinOverThresholdForGrowth = 4
|
||||
};
|
||||
|
||||
TruncatedSeq* _alloc_rate_ms_seq;
|
||||
double _prev_collection_pause_end_ms;
|
||||
|
||||
TruncatedSeq* _rs_length_diff_seq;
|
||||
TruncatedSeq* _cost_per_card_ms_seq;
|
||||
TruncatedSeq* _cost_scan_hcc_seq;
|
||||
TruncatedSeq* _young_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
|
||||
TruncatedSeq* _cost_per_entry_ms_seq;
|
||||
TruncatedSeq* _mixed_cost_per_entry_ms_seq;
|
||||
TruncatedSeq* _cost_per_byte_ms_seq;
|
||||
TruncatedSeq* _constant_other_time_ms_seq;
|
||||
TruncatedSeq* _young_other_cost_per_region_ms_seq;
|
||||
TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
|
||||
|
||||
TruncatedSeq* _pending_cards_seq;
|
||||
TruncatedSeq* _rs_lengths_seq;
|
||||
|
||||
TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
|
||||
|
||||
G1YoungGenSizer* _young_gen_sizer;
|
||||
|
||||
uint _free_regions_at_end_of_collection;
|
||||
@ -151,6 +121,7 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed;
|
||||
public:
|
||||
const G1Predictions& predictor() const { return _predictor; }
|
||||
const G1Analytics* analytics() const { return const_cast<const G1Analytics*>(_analytics); }
|
||||
|
||||
// Add the given number of bytes to the total number of allocated bytes in the old gen.
|
||||
void add_bytes_allocated_in_old_since_last_gc(size_t bytes) { _bytes_allocated_in_old_since_last_gc += bytes; }
|
||||
@ -177,39 +148,6 @@ public:
|
||||
_max_rs_lengths = rs_lengths;
|
||||
}
|
||||
|
||||
size_t predict_rs_lengths() const;
|
||||
|
||||
size_t predict_rs_length_diff() const;
|
||||
|
||||
double predict_alloc_rate_ms() const;
|
||||
|
||||
double predict_cost_per_card_ms() const;
|
||||
|
||||
double predict_scan_hcc_ms() const;
|
||||
|
||||
double predict_rs_update_time_ms(size_t pending_cards) const;
|
||||
|
||||
double predict_young_cards_per_entry_ratio() const;
|
||||
|
||||
double predict_mixed_cards_per_entry_ratio() const;
|
||||
|
||||
size_t predict_young_card_num(size_t rs_length) const;
|
||||
|
||||
size_t predict_non_young_card_num(size_t rs_length) const;
|
||||
|
||||
double predict_rs_scan_time_ms(size_t card_num) const;
|
||||
|
||||
double predict_mixed_rs_scan_time_ms(size_t card_num) const;
|
||||
|
||||
double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
|
||||
|
||||
double predict_object_copy_time_ms(size_t bytes_to_copy) const;
|
||||
|
||||
double predict_constant_other_time_ms() const;
|
||||
|
||||
double predict_young_other_time_ms(size_t young_num) const;
|
||||
|
||||
double predict_non_young_other_time_ms(size_t non_young_num) const;
|
||||
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards) const;
|
||||
double predict_base_elapsed_time_ms(size_t pending_cards,
|
||||
@ -242,10 +180,6 @@ public:
|
||||
return _mmu_tracker->max_gc_time() * 1000.0;
|
||||
}
|
||||
|
||||
double predict_remark_time_ms() const;
|
||||
|
||||
double predict_cleanup_time_ms() const;
|
||||
|
||||
// Returns an estimate of the survival rate of the region at yg-age
|
||||
// "yg_age".
|
||||
double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
|
||||
@ -265,11 +199,6 @@ protected:
|
||||
|
||||
CollectionSetChooser* cset_chooser() const;
|
||||
private:
|
||||
// Statistics kept per GC stoppage, pause or full.
|
||||
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
|
||||
|
||||
// Add a new GC of the given duration and end time to the record.
|
||||
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
|
||||
|
||||
// The number of bytes copied during the GC.
|
||||
size_t _bytes_copied_during_gc;
|
||||
@ -279,15 +208,6 @@ private:
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// The ratio of gc time to elapsed time, computed over recent pauses,
|
||||
// and the ratio for just the last pause.
|
||||
double _recent_avg_pause_time_ratio;
|
||||
double _last_pause_time_ratio;
|
||||
|
||||
double recent_avg_pause_time_ratio() const {
|
||||
return _recent_avg_pause_time_ratio;
|
||||
}
|
||||
|
||||
// This set of variables tracks the collector efficiency, in order to
|
||||
// determine whether we should initiate a new marking.
|
||||
double _mark_remark_start_sec;
|
||||
@ -491,7 +411,6 @@ public:
|
||||
} else {
|
||||
_short_lived_surv_rate_group->finished_recalculating_age_indexes();
|
||||
}
|
||||
// do that for any other surv rate groups
|
||||
}
|
||||
|
||||
size_t young_list_target_length() const { return _young_list_target_length; }
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user