mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8356716: ZGC: Cleanup Uncommit Logic
Reviewed-by: eosterlund, jsikstro
This commit is contained in:
parent
457d9de81d
commit
def7355cc9
@ -405,7 +405,7 @@ ZVirtualMemory ZMappedCache::remove_vmem(ZMappedCacheEntry* const entry, size_t
|
||||
|
||||
// Update statistics
|
||||
_size -= to_remove;
|
||||
_min = MIN2(_size, _min);
|
||||
_min_size_watermark = MIN2(_size, _min_size_watermark);
|
||||
|
||||
postcond(to_remove == vmem.size());
|
||||
return vmem;
|
||||
@ -558,7 +558,7 @@ ZMappedCache::ZMappedCache()
|
||||
: _tree(),
|
||||
_size_class_lists(),
|
||||
_size(0),
|
||||
_min(_size) {}
|
||||
_min_size_watermark(_size) {}
|
||||
|
||||
void ZMappedCache::insert(const ZVirtualMemory& vmem) {
|
||||
_size += vmem.size();
|
||||
@ -688,17 +688,15 @@ size_t ZMappedCache::remove_discontiguous(size_t size, ZArray<ZVirtualMemory>* o
|
||||
return remove_discontiguous_with_strategy<RemovalStrategy::SizeClasses>(size, out);
|
||||
}
|
||||
|
||||
size_t ZMappedCache::reset_min() {
|
||||
const size_t old_min = _min;
|
||||
|
||||
_min = _size;
|
||||
|
||||
return old_min;
|
||||
void ZMappedCache::reset_min_size_watermark() {
|
||||
_min_size_watermark = _size;
|
||||
}
|
||||
|
||||
size_t ZMappedCache::remove_from_min(size_t max_size, ZArray<ZVirtualMemory>* out) {
|
||||
const size_t size = MIN2(_min, max_size);
|
||||
size_t ZMappedCache::min_size_watermark() {
|
||||
return _min_size_watermark;
|
||||
}
|
||||
|
||||
size_t ZMappedCache::remove_for_uncommit(size_t size, ZArray<ZVirtualMemory>* out) {
|
||||
if (size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ private:
|
||||
Tree _tree;
|
||||
SizeClassList _size_class_lists[NumSizeClasses];
|
||||
size_t _size;
|
||||
size_t _min;
|
||||
size_t _min_size_watermark;
|
||||
|
||||
static int size_class_index(size_t size);
|
||||
static int guaranteed_size_class_index(size_t size);
|
||||
@ -132,8 +132,10 @@ public:
|
||||
ZVirtualMemory remove_contiguous_power_of_2(size_t min_size, size_t max_size);
|
||||
size_t remove_discontiguous(size_t size, ZArray<ZVirtualMemory>* out);
|
||||
|
||||
size_t reset_min();
|
||||
size_t remove_from_min(size_t max_size, ZArray<ZVirtualMemory>* out);
|
||||
// ZUncommitter support
|
||||
void reset_min_size_watermark();
|
||||
size_t min_size_watermark();
|
||||
size_t remove_for_uncommit(size_t size, ZArray<ZVirtualMemory>* out);
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void print_extended_on(outputStream* st) const;
|
||||
|
||||
@ -630,9 +630,6 @@ ZPartition::ZPartition(uint32_t numa_id, ZPageAllocator* page_allocator)
|
||||
_capacity(0),
|
||||
_claimed(0),
|
||||
_used(0),
|
||||
_last_commit(0.0),
|
||||
_last_uncommit(0.0),
|
||||
_to_uncommit(0),
|
||||
_numa_id(numa_id) {}
|
||||
|
||||
uint32_t ZPartition::numa_id() const {
|
||||
@ -650,9 +647,7 @@ size_t ZPartition::increase_capacity(size_t size) {
|
||||
// Update atomically since we have concurrent readers
|
||||
Atomic::add(&_capacity, increased);
|
||||
|
||||
_last_commit = os::elapsedTime();
|
||||
_last_uncommit = 0;
|
||||
_cache.reset_min();
|
||||
_uncommitter.cancel_uncommit_cycle();
|
||||
}
|
||||
|
||||
return increased;
|
||||
@ -787,101 +782,6 @@ bool ZPartition::claim_capacity_fast_medium(ZMemoryAllocation* allocation) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZPartition::uncommit(uint64_t* timeout) {
|
||||
ZArray<ZVirtualMemory> flushed_vmems;
|
||||
size_t flushed = 0;
|
||||
|
||||
{
|
||||
// We need to join the suspendible thread set while manipulating capacity
|
||||
// and used, to make sure GC safepoints will have a consistent view.
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
ZLocker<ZLock> locker(&_page_allocator->_lock);
|
||||
|
||||
const double now = os::elapsedTime();
|
||||
const double time_since_last_commit = std::floor(now - _last_commit);
|
||||
const double time_since_last_uncommit = std::floor(now - _last_uncommit);
|
||||
|
||||
if (time_since_last_commit < double(ZUncommitDelay)) {
|
||||
// We have committed within the delay, stop uncommitting.
|
||||
*timeout = uint64_t(double(ZUncommitDelay) - time_since_last_commit);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// We flush out and uncommit chunks at a time (~0.8% of the max capacity,
|
||||
// but at least one granule and at most 256M), in case demand for memory
|
||||
// increases while we are uncommitting.
|
||||
const size_t limit_upper_bound = MAX2(ZGranuleSize, align_down(256 * M / ZNUMA::count(), ZGranuleSize));
|
||||
const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), limit_upper_bound);
|
||||
|
||||
if (limit == 0) {
|
||||
// This may occur if the current max capacity for this partition is 0
|
||||
|
||||
// Set timeout to ZUncommitDelay
|
||||
*timeout = ZUncommitDelay;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (time_since_last_uncommit < double(ZUncommitDelay)) {
|
||||
// We are in the uncommit phase
|
||||
const size_t num_uncommits_left = _to_uncommit / limit;
|
||||
const double time_left = double(ZUncommitDelay) - time_since_last_uncommit;
|
||||
if (time_left < *timeout * num_uncommits_left) {
|
||||
// Running out of time, speed up.
|
||||
uint64_t new_timeout = uint64_t(std::floor(time_left / double(num_uncommits_left + 1)));
|
||||
*timeout = new_timeout;
|
||||
}
|
||||
} else {
|
||||
// We are about to start uncommitting
|
||||
_to_uncommit = _cache.reset_min();
|
||||
_last_uncommit = now;
|
||||
|
||||
const size_t split = _to_uncommit / limit + 1;
|
||||
uint64_t new_timeout = ZUncommitDelay / split;
|
||||
*timeout = new_timeout;
|
||||
}
|
||||
|
||||
// Never uncommit below min capacity.
|
||||
const size_t retain = MAX2(_used, _min_capacity);
|
||||
const size_t release = _capacity - retain;
|
||||
const size_t flush = MIN3(release, limit, _to_uncommit);
|
||||
|
||||
if (flush == 0) {
|
||||
// Nothing to flush
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Flush memory from the mapped cache to uncommit
|
||||
flushed = _cache.remove_from_min(flush, &flushed_vmems);
|
||||
if (flushed == 0) {
|
||||
// Nothing flushed
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Record flushed memory as claimed and how much we've flushed for this partition
|
||||
Atomic::add(&_claimed, flushed);
|
||||
_to_uncommit -= flushed;
|
||||
}
|
||||
|
||||
// Unmap and uncommit flushed memory
|
||||
for (const ZVirtualMemory vmem : flushed_vmems) {
|
||||
unmap_virtual(vmem);
|
||||
uncommit_physical(vmem);
|
||||
free_physical(vmem);
|
||||
free_virtual(vmem);
|
||||
}
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
ZLocker<ZLock> locker(&_page_allocator->_lock);
|
||||
|
||||
// Adjust claimed and capacity to reflect the uncommit
|
||||
Atomic::sub(&_claimed, flushed);
|
||||
decrease_capacity(flushed, false /* set_max_capacity */);
|
||||
}
|
||||
|
||||
return flushed;
|
||||
}
|
||||
|
||||
void ZPartition::sort_segments_physical(const ZVirtualMemory& vmem) {
|
||||
verify_virtual_memory_association(vmem, true /* check_multi_partition */);
|
||||
|
||||
|
||||
@ -57,6 +57,7 @@ class ZWorkers;
|
||||
class ZPartition {
|
||||
friend class VMStructs;
|
||||
friend class ZPageAllocator;
|
||||
friend class ZUncommitter;
|
||||
|
||||
private:
|
||||
ZPageAllocator* const _page_allocator;
|
||||
@ -68,9 +69,6 @@ private:
|
||||
volatile size_t _capacity;
|
||||
volatile size_t _claimed;
|
||||
size_t _used;
|
||||
double _last_commit;
|
||||
double _last_uncommit;
|
||||
size_t _to_uncommit;
|
||||
const uint32_t _numa_id;
|
||||
|
||||
const ZVirtualMemoryManager& virtual_memory_manager() const;
|
||||
@ -103,8 +101,6 @@ public:
|
||||
bool claim_capacity(ZMemoryAllocation* allocation);
|
||||
bool claim_capacity_fast_medium(ZMemoryAllocation* allocation);
|
||||
|
||||
size_t uncommit(uint64_t* timeout);
|
||||
|
||||
void sort_segments_physical(const ZVirtualMemory& vmem);
|
||||
|
||||
void claim_physical(const ZVirtualMemory& vmem);
|
||||
|
||||
@ -113,6 +113,11 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t max_delay_without_overflow = std::numeric_limits<uint64_t>::max() / MILLIUNITS;
|
||||
if (ZUncommitDelay > max_delay_without_overflow) {
|
||||
FLAG_SET_ERGO(ZUncommitDelay, max_delay_without_overflow);
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Uncommit: Enabled");
|
||||
log_info_p(gc, init)("Uncommit Delay: %zus", ZUncommitDelay);
|
||||
}
|
||||
|
||||
@ -22,12 +22,21 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zMappedCache.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
|
||||
|
||||
@ -35,7 +44,13 @@ ZUncommitter::ZUncommitter(uint32_t id, ZPartition* partition)
|
||||
: _id(id),
|
||||
_partition(partition),
|
||||
_lock(),
|
||||
_stop(false) {
|
||||
_stop(false),
|
||||
_cancel_time(0.0),
|
||||
_next_cycle_timeout(0),
|
||||
_next_uncommit_timeout(0),
|
||||
_cycle_start(0.0),
|
||||
_to_uncommit(0),
|
||||
_uncommitted(0) {
|
||||
set_name("ZUncommitter#%u", id);
|
||||
create_and_start();
|
||||
}
|
||||
@ -47,8 +62,27 @@ bool ZUncommitter::wait(uint64_t timeout) const {
|
||||
}
|
||||
|
||||
if (!_stop && timeout > 0) {
|
||||
log_debug(gc, heap)("Uncommitter (%u) Timeout: " UINT64_FORMAT "s", _id, timeout);
|
||||
_lock.wait(timeout * MILLIUNITS);
|
||||
if (!uncommit_cycle_is_finished()) {
|
||||
log_trace(gc, heap)("Uncommitter (%u) Timeout: " UINT64_FORMAT "ms left to uncommit: "
|
||||
EXACTFMT, _id, timeout, EXACTFMTARGS(_to_uncommit));
|
||||
} else {
|
||||
log_debug(gc, heap)("Uncommitter (%u) Timeout: " UINT64_FORMAT "ms", _id, timeout);
|
||||
}
|
||||
|
||||
double now = os::elapsedTime();
|
||||
const double wait_until = now + double(timeout) / MILLIUNITS;
|
||||
do {
|
||||
const uint64_t remaining_timeout_ms = to_millis(wait_until - now);
|
||||
if (remaining_timeout_ms == 0) {
|
||||
// Less than a millisecond left to wait, just return early
|
||||
break;
|
||||
}
|
||||
|
||||
// Wait
|
||||
_lock.wait(remaining_timeout_ms);
|
||||
|
||||
now = os::elapsedTime();
|
||||
} while (!_stop && now < wait_until);
|
||||
}
|
||||
|
||||
return !_stop;
|
||||
@ -59,33 +93,78 @@ bool ZUncommitter::should_continue() const {
|
||||
return !_stop;
|
||||
}
|
||||
|
||||
void ZUncommitter::run_thread() {
|
||||
uint64_t timeout = 0;
|
||||
void ZUncommitter::update_statistics(size_t uncommitted, Ticks start, Tickspan* accumulated_time) const {
|
||||
// Update counter
|
||||
ZStatInc(ZCounterUncommit, uncommitted);
|
||||
|
||||
while (wait(timeout)) {
|
||||
EventZUncommit event;
|
||||
size_t total_uncommitted = 0;
|
||||
Ticks end = Ticks::now();
|
||||
|
||||
// Send event
|
||||
EventZUncommit::commit(start, end, uncommitted);
|
||||
|
||||
// Track accumulated time
|
||||
*accumulated_time += end - start;
|
||||
}
|
||||
|
||||
void ZUncommitter::run_thread() {
|
||||
// Initialize first cycle timeout
|
||||
_next_cycle_timeout = to_millis(ZUncommitDelay);
|
||||
|
||||
while (wait(_next_cycle_timeout)) {
|
||||
// Counters for event and statistics
|
||||
Ticks start = Ticks::now();
|
||||
size_t uncommitted_since_last_timeout = 0;
|
||||
Tickspan accumulated_time;
|
||||
|
||||
if (!activate_uncommit_cycle()) {
|
||||
// We failed activating a new cycle, continue until next cycle
|
||||
continue;
|
||||
}
|
||||
|
||||
while (should_continue()) {
|
||||
// Uncommit chunk
|
||||
const size_t uncommitted = _partition->uncommit(&timeout);
|
||||
if (uncommitted == 0) {
|
||||
const size_t uncommitted = uncommit();
|
||||
|
||||
// Update uncommitted counter
|
||||
uncommitted_since_last_timeout += uncommitted;
|
||||
|
||||
// 'uncommitted == 0' is a proxy for uncommit_cycle_is_canceled() without
|
||||
// having to take the page allocator lock
|
||||
if (uncommitted == 0 || uncommit_cycle_is_finished()) {
|
||||
// Done
|
||||
break;
|
||||
}
|
||||
|
||||
total_uncommitted += uncommitted;
|
||||
if (_next_uncommit_timeout != 0) {
|
||||
// Update statistics
|
||||
update_statistics(uncommitted_since_last_timeout, start, &accumulated_time);
|
||||
|
||||
// Wait until next uncommit
|
||||
wait(_next_uncommit_timeout);
|
||||
|
||||
// Reset event and statistics counters
|
||||
start = Ticks::now();
|
||||
uncommitted_since_last_timeout = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (total_uncommitted > 0) {
|
||||
// Update statistics
|
||||
ZStatInc(ZCounterUncommit, total_uncommitted);
|
||||
log_info(gc, heap)("Uncommitter (%u) Uncommitted: %zuM(%.0f%%)",
|
||||
_id, total_uncommitted / M, percent_of(total_uncommitted, ZHeap::heap()->max_capacity()));
|
||||
if (_uncommitted > 0) {
|
||||
if (uncommitted_since_last_timeout > 0) {
|
||||
// Update statistics
|
||||
update_statistics(uncommitted_since_last_timeout, start, &accumulated_time);
|
||||
}
|
||||
|
||||
// Send event
|
||||
event.commit(total_uncommitted);
|
||||
log_info(gc, heap)("Uncommitter (%u) Uncommitted: %zuM(%.0f%%) in %.3fms",
|
||||
_id, _uncommitted / M, percent_of(_uncommitted, ZHeap::heap()->max_capacity()),
|
||||
accumulated_time.seconds() * MILLIUNITS);
|
||||
}
|
||||
|
||||
if (!should_continue()) {
|
||||
// We are terminating
|
||||
return;
|
||||
}
|
||||
|
||||
deactivate_uncommit_cycle();
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,3 +173,253 @@ void ZUncommitter::terminate() {
|
||||
_stop = true;
|
||||
_lock.notify_all();
|
||||
}
|
||||
|
||||
void ZUncommitter::reset_uncommit_cycle() {
|
||||
_to_uncommit = 0;
|
||||
_uncommitted = 0;
|
||||
_cycle_start = 0.0;
|
||||
_cancel_time = 0.0;
|
||||
|
||||
postcond(uncommit_cycle_is_finished());
|
||||
postcond(!uncommit_cycle_is_canceled());
|
||||
postcond(!uncommit_cycle_is_active());
|
||||
}
|
||||
|
||||
void ZUncommitter::deactivate_uncommit_cycle() {
|
||||
ZLocker<ZLock> locker(&_partition->_page_allocator->_lock);
|
||||
|
||||
precond(uncommit_cycle_is_active());
|
||||
precond(uncommit_cycle_is_finished() || uncommit_cycle_is_canceled());
|
||||
|
||||
// Update the next timeout
|
||||
if (uncommit_cycle_is_canceled()) {
|
||||
update_next_cycle_timeout_on_cancel();
|
||||
} else {
|
||||
update_next_cycle_timeout_on_finish();
|
||||
}
|
||||
|
||||
// Reset the cycle
|
||||
reset_uncommit_cycle();
|
||||
}
|
||||
|
||||
bool ZUncommitter::activate_uncommit_cycle() {
|
||||
ZLocker<ZLock> locker(&_partition->_page_allocator->_lock);
|
||||
|
||||
precond(uncommit_cycle_is_finished());
|
||||
precond(!uncommit_cycle_is_active());
|
||||
|
||||
if (uncommit_cycle_is_canceled()) {
|
||||
// We were canceled before we managed to activate, update the timeout
|
||||
update_next_cycle_timeout_on_cancel();
|
||||
|
||||
// Reset the cycle
|
||||
reset_uncommit_cycle();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ZMappedCache* const cache = &_partition->_cache;
|
||||
|
||||
// Claim and reset the cache cycle tracking and register the cycle start time.
|
||||
_cycle_start = os::elapsedTime();
|
||||
|
||||
// Read watermark from cache
|
||||
const size_t uncommit_watermark = cache->min_size_watermark();
|
||||
|
||||
// Keep 10% as a headroom
|
||||
const size_t to_uncommit = align_up(size_t(double(uncommit_watermark) * 0.9), ZGranuleSize);
|
||||
|
||||
// Never uncommit below min capacity
|
||||
const size_t uncommit_limit = _partition->_capacity - _partition->_min_capacity;
|
||||
|
||||
_to_uncommit = MIN2(uncommit_limit, to_uncommit);
|
||||
_uncommitted = 0;
|
||||
|
||||
// Reset watermark for next uncommit cycle
|
||||
cache->reset_min_size_watermark();
|
||||
|
||||
postcond(is_aligned(_to_uncommit, ZGranuleSize));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t ZUncommitter::to_millis(double seconds) const {
|
||||
return uint64_t(std::floor(seconds * double(MILLIUNITS)));
|
||||
}
|
||||
|
||||
void ZUncommitter::update_next_cycle_timeout(double from_time) {
|
||||
const double now = os::elapsedTime();
|
||||
|
||||
if (now < from_time + double(ZUncommitDelay)) {
|
||||
_next_cycle_timeout = to_millis(ZUncommitDelay) - to_millis(now - from_time);
|
||||
} else {
|
||||
// ZUncommitDelay has already expired
|
||||
_next_cycle_timeout = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::update_next_cycle_timeout_on_cancel() {
|
||||
precond(uncommit_cycle_is_canceled());
|
||||
|
||||
update_next_cycle_timeout(_cancel_time);
|
||||
|
||||
// Skip logging if there is no delay
|
||||
if (ZUncommitDelay > 0) {
|
||||
log_debug(gc, heap)("Uncommitter (%u) Cancel Next Cycle Timeout: " UINT64_FORMAT "ms",
|
||||
_id, _next_cycle_timeout);
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::update_next_cycle_timeout_on_finish() {
|
||||
precond(uncommit_cycle_is_active());
|
||||
precond(uncommit_cycle_is_finished());
|
||||
|
||||
update_next_cycle_timeout(_cycle_start);
|
||||
|
||||
// Skip logging if there is no delay
|
||||
if (ZUncommitDelay > 0) {
|
||||
log_debug(gc, heap)("Uncommitter (%u) Finish Next Cycle Timeout: " UINT64_FORMAT "ms",
|
||||
_id, _next_cycle_timeout);
|
||||
}
|
||||
}
|
||||
|
||||
void ZUncommitter::cancel_uncommit_cycle() {
|
||||
// Reset the cache cycle tracking and register the cancel time.
|
||||
_partition->_cache.reset_min_size_watermark();
|
||||
_cancel_time = os::elapsedTime();
|
||||
}
|
||||
|
||||
void ZUncommitter::register_uncommit(size_t size) {
|
||||
precond(uncommit_cycle_is_active());
|
||||
precond(size > 0);
|
||||
precond(size <= _to_uncommit);
|
||||
precond(is_aligned(size, ZGranuleSize));
|
||||
|
||||
_to_uncommit -= size;
|
||||
_uncommitted += size;
|
||||
|
||||
if (uncommit_cycle_is_canceled()) {
|
||||
// Uncommit cycle got canceled while uncommitting.
|
||||
return;
|
||||
}
|
||||
|
||||
if (uncommit_cycle_is_finished()) {
|
||||
// Everything has been uncommitted.
|
||||
return;
|
||||
}
|
||||
|
||||
const double now = os::elapsedTime();
|
||||
const double time_since_start = now - _cycle_start;
|
||||
|
||||
if (time_since_start == 0.0) {
|
||||
// Handle degenerate case where no time has elapsed.
|
||||
_next_uncommit_timeout = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const double uncommit_rate = double(_uncommitted) / time_since_start;
|
||||
const double time_to_complete = double(_to_uncommit) / uncommit_rate;
|
||||
const double time_left = double(ZUncommitDelay) - time_since_start;
|
||||
|
||||
if (time_left < time_to_complete) {
|
||||
// Too slow, work as fast as we can.
|
||||
_next_uncommit_timeout = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t uncommits_remaining_estimate = _to_uncommit / size + 1;
|
||||
const uint64_t millis_left_rounded_down = to_millis(time_left);
|
||||
|
||||
if (uncommits_remaining_estimate < millis_left_rounded_down) {
|
||||
// We have at least one millisecond per uncommit, spread them out.
|
||||
_next_uncommit_timeout = millis_left_rounded_down / uncommits_remaining_estimate;
|
||||
return;
|
||||
}
|
||||
|
||||
// Randomly distribute the extra time, one millisecond at a time.
|
||||
const double extra_time = time_left - time_to_complete;
|
||||
const double random = double(uint32_t(os::random())) / double(std::numeric_limits<uint32_t>::max());
|
||||
|
||||
_next_uncommit_timeout = random < (extra_time / time_left) ? 1 : 0;
|
||||
}
|
||||
|
||||
bool ZUncommitter::uncommit_cycle_is_finished() const {
|
||||
return _to_uncommit == 0;
|
||||
}
|
||||
|
||||
bool ZUncommitter::uncommit_cycle_is_active() const {
|
||||
return _cycle_start != 0.0;
|
||||
}
|
||||
|
||||
bool ZUncommitter::uncommit_cycle_is_canceled() const {
|
||||
return _cancel_time != 0.0;
|
||||
}
|
||||
|
||||
size_t ZUncommitter::uncommit() {
|
||||
precond(uncommit_cycle_is_active());
|
||||
|
||||
ZArray<ZVirtualMemory> flushed_vmems;
|
||||
size_t flushed = 0;
|
||||
|
||||
{
|
||||
// We need to join the suspendible thread set while manipulating capacity
|
||||
// and used, to make sure GC safepoints will have a consistent view.
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
ZLocker<ZLock> locker(&_partition->_page_allocator->_lock);
|
||||
|
||||
if (uncommit_cycle_is_canceled()) {
|
||||
// We have committed within the delay, stop uncommitting.
|
||||
return 0;
|
||||
}
|
||||
|
||||
// We flush out and uncommit chunks at a time (~0.8% of the max capacity,
|
||||
// but at least one granule and at most 256M), in case demand for memory
|
||||
// increases while we are uncommitting.
|
||||
const size_t current_max_capacity = _partition->_current_max_capacity;
|
||||
const size_t limit_upper_bound = MAX2(ZGranuleSize, align_down(256 * M / ZNUMA::count(), ZGranuleSize));
|
||||
const size_t limit = MIN2(align_up(current_max_capacity >> 7, ZGranuleSize), limit_upper_bound);
|
||||
|
||||
ZMappedCache& cache = _partition->_cache;
|
||||
|
||||
// Never uncommit more than the current uncommit watermark,
|
||||
// (adjusted by what has already been uncommitted).
|
||||
const size_t allowed_to_uncommit = MAX2(cache.min_size_watermark(), _uncommitted) - _uncommitted;
|
||||
const size_t to_uncommit = MIN2(_to_uncommit, allowed_to_uncommit);
|
||||
|
||||
// Never uncommit below min capacity.
|
||||
const size_t retain = MAX2(_partition->_used, _partition->_min_capacity);
|
||||
const size_t release = _partition->_capacity - retain;
|
||||
const size_t flush = MIN3(release, limit, to_uncommit);
|
||||
|
||||
// Flush memory from the mapped cache for uncommit
|
||||
flushed = cache.remove_for_uncommit(flush, &flushed_vmems);
|
||||
if (flushed == 0) {
|
||||
// Nothing flushed
|
||||
cancel_uncommit_cycle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Record flushed memory as claimed and how much we've flushed for this partition
|
||||
Atomic::add(&_partition->_claimed, flushed);
|
||||
}
|
||||
|
||||
// Unmap and uncommit flushed memory
|
||||
for (const ZVirtualMemory vmem : flushed_vmems) {
|
||||
_partition->unmap_virtual(vmem);
|
||||
_partition->uncommit_physical(vmem);
|
||||
_partition->free_physical(vmem);
|
||||
_partition->free_virtual(vmem);
|
||||
}
|
||||
|
||||
{
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
ZLocker<ZLock> locker(&_partition->_page_allocator->_lock);
|
||||
|
||||
// Adjust claimed and capacity to reflect the uncommit
|
||||
Atomic::sub(&_partition->_claimed, flushed);
|
||||
_partition->decrease_capacity(flushed, false /* set_max_capacity */);
|
||||
register_uncommit(flushed);
|
||||
}
|
||||
|
||||
return flushed;
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
class ZPartition;
|
||||
|
||||
@ -35,16 +36,43 @@ private:
|
||||
ZPartition* const _partition;
|
||||
mutable ZConditionLock _lock;
|
||||
bool _stop;
|
||||
double _cancel_time;
|
||||
uint64_t _next_cycle_timeout;
|
||||
uint64_t _next_uncommit_timeout;
|
||||
double _cycle_start;
|
||||
size_t _to_uncommit;
|
||||
size_t _uncommitted;
|
||||
|
||||
bool wait(uint64_t timeout) const;
|
||||
bool should_continue() const;
|
||||
|
||||
uint64_t to_millis(double seconds) const;
|
||||
|
||||
void update_next_cycle_timeout(double from_time);
|
||||
void update_next_cycle_timeout_on_cancel();
|
||||
void update_next_cycle_timeout_on_finish();
|
||||
|
||||
void reset_uncommit_cycle();
|
||||
void deactivate_uncommit_cycle();
|
||||
bool activate_uncommit_cycle();
|
||||
void register_uncommit(size_t size);
|
||||
|
||||
bool uncommit_cycle_is_finished() const;
|
||||
bool uncommit_cycle_is_active() const;
|
||||
bool uncommit_cycle_is_canceled() const;
|
||||
|
||||
size_t uncommit();
|
||||
|
||||
void update_statistics(size_t uncommitted, Ticks start, Tickspan* accumulated_time) const;
|
||||
|
||||
protected:
|
||||
virtual void run_thread();
|
||||
virtual void terminate();
|
||||
|
||||
public:
|
||||
ZUncommitter(uint32_t id, ZPartition* partition);
|
||||
|
||||
void cancel_uncommit_cycle();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
|
||||
@ -27,12 +27,10 @@ package gc.z;
|
||||
* @test TestUncommit
|
||||
* @requires vm.gc.Z
|
||||
* @summary Test ZGC uncommit unused memory
|
||||
* @library /test/lib
|
||||
* @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+heap=debug,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=5 gc.z.TestUncommit
|
||||
*/
|
||||
|
||||
import java.util.ArrayList;
|
||||
import jdk.test.lib.Utils;
|
||||
|
||||
public class TestUncommit {
|
||||
private static final int delay = 5 * 1000; // milliseconds
|
||||
@ -110,7 +108,7 @@ public class TestUncommit {
|
||||
throw new Exception("Uncommitted too fast");
|
||||
}
|
||||
|
||||
if (actualDelay > delay * 2 * Utils.TIMEOUT_FACTOR) {
|
||||
if (actualDelay > delay * 3) {
|
||||
throw new Exception("Uncommitted too slow");
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user