8310675: Fix -Wconversion warnings in ZGC code

Reviewed-by: stefank, ayang
This commit is contained in:
Joel Sikström 2024-08-07 14:16:01 +00:00 committed by Albert Mingkun Yang
parent 88a05a853e
commit 21f710e7f6
33 changed files with 120 additions and 115 deletions

View File

@ -327,7 +327,7 @@ int ZBarrierSetC2::estimate_stub_size() const {
int size = 0;
for (int i = 0; i < stubs->length(); i++) {
CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin());
CodeBuffer cb(blob->content_begin(), checked_cast<CodeBuffer::csize_t>((address)C->output()->scratch_locs_memory() - blob->content_begin()));
MacroAssembler masm(&cb);
stubs->at(i)->emit_code(masm);
size += cb.insts_size();

View File

@ -146,7 +146,7 @@ void ZArguments::initialize() {
ZHeuristics::set_medium_page_size();
if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && ZTenuringThreshold != -1) {
FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, ZTenuringThreshold);
FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, (uint)ZTenuringThreshold);
if (MaxTenuringThreshold == 0) {
FLAG_SET_ERGO_IF_DEFAULT(AlwaysTenure, true);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,7 @@ inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const T* array, size_
template <typename T, bool Parallel>
inline ZArrayIteratorImpl<T, Parallel>::ZArrayIteratorImpl(const ZArray<T>* array)
: ZArrayIteratorImpl<T, Parallel>(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {}
: ZArrayIteratorImpl<T, Parallel>(array->is_empty() ? nullptr : array->adr_at(0), (size_t)array->length()) {}
template <typename T, bool Parallel>
inline bool ZArrayIteratorImpl<T, Parallel>::next(T* elem) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -291,7 +291,7 @@ zaddress ZBarrier::keep_alive_slow_path(zaddress addr) {
// ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
void ZBarrier::verify_on_weak(volatile zpointer* referent_addr) {
if (referent_addr != nullptr) {
const uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset();
const uintptr_t base = (uintptr_t)referent_addr - (size_t)java_lang_ref_Reference::referent_offset();
const oop obj = cast_to_oop(base);
assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
ZNMethod::nmethod_oops_do_inner(nm, &cl);
const uintptr_t prev_color = ZNMethod::color(nm);
const uintptr_t new_color = *(int*)ZPointerStoreGoodMaskLowOrderBitsAddr;
const uintptr_t new_color = *ZPointerStoreGoodMaskLowOrderBitsAddr;
log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, new_color);
// CodeCache unloading support

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "utilities/debug.hpp"
inline uint32_t ZCPU::count() {
return os::processor_count();
return (uint32_t)os::processor_count();
}
inline uint32_t ZCPU::id() {

View File

@ -102,7 +102,7 @@ static double estimated_gc_workers(double serial_gc_time, double parallelizable_
}
static uint discrete_young_gc_workers(double gc_workers) {
return clamp<uint>(ceil(gc_workers), 1, ZYoungGCThreads);
return clamp<uint>((uint)ceil(gc_workers), 1, ZYoungGCThreads);
}
static double select_young_gc_workers(const ZDirectorStats& stats, double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) {
@ -426,7 +426,7 @@ static bool rule_major_warmup(const ZDirectorStats& stats) {
const size_t soft_max_capacity = stats._heap._soft_max_heap_size;
const size_t used = stats._heap._used;
const double used_threshold_percent = (stats._old_stats._cycle._nwarmup_cycles + 1) * 0.1;
const size_t used_threshold = soft_max_capacity * used_threshold_percent;
const size_t used_threshold = (size_t)(soft_max_capacity * used_threshold_percent);
log_debug(gc, director)("Rule Major: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
used_threshold_percent * 100, used / M, used_threshold / M);
@ -497,13 +497,13 @@ static bool rule_major_allocation_rate(const ZDirectorStats& stats) {
// Doing an old collection makes subsequent young collections more efficient.
// Calculate the number of young collections ahead that we will try to amortize
// the cost of doing an old collection for.
const int lookahead = stats._heap._total_collections - stats._old_stats._general._total_collections_at_start;
const uint lookahead = stats._heap._total_collections - stats._old_stats._general._total_collections_at_start;
// Calculate extra young collection overhead predicted for a number of future
// young collections, due to not freeing up memory in the old generation.
const double extra_young_gc_time_for_lookahead = extra_young_gc_time * lookahead;
log_debug(gc, director)("Rule Major: Allocation Rate, ExtraYoungGCTime: %.3fs, OldGCTime: %.3fs, Lookahead: %d, ExtraYoungGCTimeForLookahead: %.3fs",
log_debug(gc, director)("Rule Major: Allocation Rate, ExtraYoungGCTime: %.3fs, OldGCTime: %.3fs, Lookahead: %u, ExtraYoungGCTimeForLookahead: %.3fs",
extra_young_gc_time, old_gc_time, lookahead, extra_young_gc_time_for_lookahead);
// If we continue doing as many minor collections as we already did since the
@ -565,7 +565,7 @@ static bool rule_major_proactive(const ZDirectorStats& stats) {
// passed since the previous GC. This helps avoid superfluous GCs when running
// applications with very low allocation rate.
const size_t used_after_last_gc = stats._old_stats._stat_heap._used_at_relocate_end;
const size_t used_increase_threshold = stats._heap._soft_max_heap_size * 0.10; // 10%
const size_t used_increase_threshold = (size_t)(stats._heap._soft_max_heap_size * 0.10); // 10%
const size_t used_threshold = used_after_last_gc + used_increase_threshold;
const size_t used = stats._heap._used;
const double time_since_last_gc = stats._old_stats._cycle._time_since_last;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "utilities/debug.hpp"
inline size_t ZForwardingAllocator::size() const {
return _end - _start;
return (size_t)(_end - _start);
}
inline bool ZForwardingAllocator::is_full() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -299,7 +299,7 @@ void ZGeneration::reset_statistics() {
_page_allocator->reset_statistics(_id);
}
ssize_t ZGeneration::freed() const {
size_t ZGeneration::freed() const {
return _freed;
}
@ -448,7 +448,7 @@ public:
_success = do_operation();
// Update statistics
ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
ZStatSample(ZSamplerJavaThreads, (uint64_t)Threads::number_of_threads());
}
virtual void doit_epilogue() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -122,7 +122,7 @@ public:
// Statistics
void reset_statistics();
virtual bool should_record_stats() = 0;
ssize_t freed() const;
size_t freed() const;
void increase_freed(size_t size);
size_t promoted() const;
void increase_promoted(size_t size);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -204,7 +204,7 @@ private:
assert(ZCollectedHeap::heap()->is_in(p), "Should be in heap");
if (VisitReferents) {
return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, (ptrdiff_t)_base->field_offset(p));
}
return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
@ -447,7 +447,7 @@ void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, cons
const objArrayOop obj = objArrayOop(array.obj());
const int length = obj->length();
const int start = array.index();
const int stride = MIN2<int>(length - start, ObjArrayMarkingStride);
const int stride = MIN2<int>(length - start, (int)ObjArrayMarkingStride);
const int end = start + stride;
// Push remaining array chunk first

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,14 +39,14 @@ void ZHeuristics::set_medium_page_size() {
// becomes larger than ZPageSizeSmall.
const size_t min = ZGranuleSize;
const size_t max = ZGranuleSize * 16;
const size_t unclamped = MaxHeapSize * 0.03125;
const size_t unclamped = (size_t)(MaxHeapSize * 0.03125);
const size_t clamped = clamp(unclamped, min, max);
const size_t size = round_down_power_of_2(clamped);
if (size > ZPageSizeSmall) {
// Enable medium pages
ZPageSizeMedium = size;
ZPageSizeMediumShift = log2i_exact(ZPageSizeMedium);
ZPageSizeMediumShift = (size_t)log2i_exact(ZPageSizeMedium);
ZObjectSizeLimitMedium = ZPageSizeMedium / 8;
ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13;
ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift;
@ -68,11 +68,11 @@ bool ZHeuristics::use_per_cpu_shared_small_pages() {
}
static uint nworkers_based_on_ncpus(double cpu_share_in_percent) {
return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0);
return (uint)ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0);
}
static uint nworkers_based_on_heap_size(double heap_share_in_percent) {
return (MaxHeapSize * (heap_share_in_percent / 100.0)) / ZPageSizeSmall;
return (uint)(MaxHeapSize * (heap_share_in_percent / 100.0) / ZPageSizeSmall);
}
static uint nworkers(double cpu_share_in_percent) {
@ -101,9 +101,9 @@ uint ZHeuristics::nconcurrent_workers() {
}
size_t ZHeuristics::significant_heap_overhead() {
return MaxHeapSize * (ZFragmentationLimit / 100);
return (size_t)(MaxHeapSize * (ZFragmentationLimit / 100));
}
size_t ZHeuristics::significant_young_overhead() {
return MaxHeapSize * (ZYoungCompactionLimit / 100);
return (size_t)(MaxHeapSize * (ZYoungCompactionLimit / 100));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ class ZIndexDistributorStriped : public CHeapObj<mtGC> {
}
volatile int* claim_addr(int index) {
return (volatile int*)(align_up(_mem, ZCacheLineSize) + index * ZCacheLineSize);
return (volatile int*)(align_up(_mem, ZCacheLineSize) + (size_t)index * ZCacheLineSize);
}
public:
@ -136,7 +136,7 @@ private:
// Total size used to hold all claim variables
static size_t claim_variables_size() {
return sizeof(int) * claim_level_end_index(ClaimLevels);
return sizeof(int) * (size_t)claim_level_end_index(ClaimLevels);
}
// Returns the index of the start of the current segment of the current level

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@ ZLiveMap::ZLiveMap(uint32_t size)
_segment_live_bits(0),
_segment_claim_bits(0),
_bitmap(bitmap_size(size, nsegments)),
_segment_shift(exact_log2(segment_size())) {}
_segment_shift(log2i_exact(segment_size())) {}
void ZLiveMap::reset(ZGenerationId id) {
ZGeneration* const generation = ZGeneration::generation(id);
@ -130,6 +130,6 @@ void ZLiveMap::resize(uint32_t size) {
const size_t new_bitmap_size = bitmap_size(size, nsegments);
if (_bitmap.size() != new_bitmap_size) {
_bitmap.reinitialize(new_bitmap_size, false /* clear */);
_segment_shift = exact_log2(segment_size());
_segment_shift = log2i_exact(segment_size());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,7 @@ private:
BitMap::bm_word_t _segment_live_bits;
BitMap::bm_word_t _segment_claim_bits;
ZBitMap _bitmap;
size_t _segment_shift;
int _segment_shift;
const BitMapView segment_live_bits() const;
const BitMapView segment_claim_bits() const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
#include "utilities/powerOfTwo.hpp"
static size_t shift_for_stripes(size_t nstripes) {
return ZMarkStripeShift + exact_log2(nstripes);
return ZMarkStripeShift + (size_t)log2i_exact(nstripes);
}
ZMarkCacheEntry::ZMarkCacheEntry()

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,7 @@ ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) {
const size_t spillover_nworkers = nworkers - spillover_limit;
const size_t spillover_worker_id = worker_id - spillover_limit;
const double spillover_chunk = (double)nstripes / (double)spillover_nworkers;
index = spillover_worker_id * spillover_chunk;
index = (size_t)(spillover_worker_id * spillover_chunk);
}
assert(index < nstripes, "Invalid index");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ bool ZMetronome::wait_for_tick() {
if (_nticks++ == 0) {
// First tick, set start time
const Ticks now = Ticks::now();
_start_ms = TimeHelper::counter_to_millis(now.value());
_start_ms = (uint64_t)TimeHelper::counter_to_millis(now.value());
}
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
@ -47,9 +47,9 @@ bool ZMetronome::wait_for_tick() {
// We might wake up spuriously from wait, so always recalculate
// the timeout after a wakeup to see if we need to wait again.
const Ticks now = Ticks::now();
const uint64_t now_ms = TimeHelper::counter_to_millis(now.value());
const uint64_t now_ms = (uint64_t)TimeHelper::counter_to_millis(now.value());
const uint64_t next_ms = _start_ms + (_interval_ms * _nticks);
const int64_t timeout_ms = next_ms - now_ms;
const int64_t timeout_ms = (int64_t)(next_ms - now_ms);
if (timeout_ms > 0) {
// Wait
@ -57,7 +57,7 @@ bool ZMetronome::wait_for_tick() {
} else {
// Tick
if (timeout_ms < 0) {
const uint64_t overslept = -timeout_ms;
const uint64_t overslept = (uint64_t)-timeout_ms;
if (overslept > _interval_ms) {
// Missed one or more ticks. Bump _nticks accordingly to
// avoid firing a string of immediate ticks to make up

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -302,8 +302,8 @@ void ZNMethod::nmethods_do(bool secondary, NMethodClosure* cl) {
uintptr_t ZNMethod::color(nmethod* nm) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
// color is stored at low order bits of int; implicit conversion to uintptr_t is fine
return bs_nm->guard_value(nm);
// color is stored at low order bits of int; conversion to uintptr_t is fine
return (uintptr_t)bs_nm->guard_value(nm);
}
oop ZNMethod::load_oop(oop* p, DecoratorSet decorators) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -144,9 +144,9 @@ void ZNMethodTable::rebuild_if_needed() {
// grows/shrinks by doubling/halving its size. Pruning of unregistered
// entries is done by rebuilding the table with or without resizing it.
const size_t min_size = 1024;
const size_t shrink_threshold = _size * 0.30;
const size_t prune_threshold = _size * 0.65;
const size_t grow_threshold = _size * 0.70;
const size_t shrink_threshold = (size_t)(_size * 0.30);
const size_t prune_threshold = (size_t)(_size * 0.65);
const size_t grow_threshold = (size_t)(_size * 0.70);
if (_size == 0) {
// Initialize table

View File

@ -76,8 +76,8 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const {
ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem);
const BasicType element_type = ArrayKlass::cast(_klass)->element_type();
const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type);
const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, BytesPerWord);
const size_t base_offset_in_bytes = (size_t)arrayOopDesc::base_offset_in_bytes(element_type);
const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, (size_t)BytesPerWord);
if (process_start_offset_in_bytes != base_offset_in_bytes) {
// initialize_memory can only fill word aligned memory,

View File

@ -83,13 +83,13 @@ inline uint32_t ZPage::object_max_count() const {
inline size_t ZPage::object_alignment_shift() const {
switch (type()) {
case ZPageType::small:
return ZObjectAlignmentSmallShift;
return (size_t)ZObjectAlignmentSmallShift;
case ZPageType::medium:
return ZObjectAlignmentMediumShift;
return (size_t)ZObjectAlignmentMediumShift;
case ZPageType::large:
return ZObjectAlignmentLargeShift;
return (size_t)ZObjectAlignmentLargeShift;
default:
fatal("Unexpected page type");
@ -100,13 +100,13 @@ inline size_t ZPage::object_alignment_shift() const {
inline size_t ZPage::object_alignment() const {
switch (type()) {
case ZPageType::small:
return ZObjectAlignmentSmall;
return (size_t)ZObjectAlignmentSmall;
case ZPageType::medium:
return ZObjectAlignmentMedium;
return (size_t)ZObjectAlignmentMedium;
case ZPageType::large:
return ZObjectAlignmentLarge;
return (size_t)ZObjectAlignmentLarge;
default:
fatal("Unexpected page type");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -308,7 +308,7 @@ public:
};
size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout) {
const uint64_t now = os::elapsedTime();
const uint64_t now = (uint64_t)os::elapsedTime();
const uint64_t expires = _last_commit + ZUncommitDelay;
if (expires > now) {
// Delay uncommit, set next timeout
@ -329,5 +329,5 @@ size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64
}
void ZPageCache::set_last_commit() {
_last_commit = ceil(os::elapsedTime());
_last_commit = (uint64_t)ceil(os::elapsedTime());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,7 +69,7 @@ inline ZPageTableParallelIterator::ZPageTableParallelIterator(const ZPageTable*
template <typename Function>
inline void ZPageTableParallelIterator::do_pages(Function function) {
_index_distributor.do_indices([&](int index) {
ZPage* const page = _table->at(index);
ZPage* const page = _table->at(size_t(index));
if (page != nullptr) {
const size_t start_index = untype(page->start()) >> ZGranuleSizeShift;
if (size_t(index) == start_index) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,7 @@ public:
: ZTask("ZRelocationSetInstallTask"),
_allocator(allocator),
_forwardings(nullptr),
_nforwardings(selector->selected_small()->length() + selector->selected_medium()->length()),
_nforwardings((size_t)selector->selected_small()->length() + (size_t)selector->selected_medium()->length()),
_small(selector->selected_small()),
_medium(selector->selected_medium()),
_small_iter(selector->selected_small()),
@ -113,7 +113,7 @@ public:
for (size_t page_index; _small_iter.next_index(&page_index);) {
ZPage* page = _small->at(int(page_index));
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
install_small(forwarding, _medium->length() + page_index);
install_small(forwarding, (size_t)_medium->length() + page_index);
SuspendibleThreadSet::yield();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,7 +51,7 @@ ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name,
_page_size(page_size),
_object_size_limit(object_size_limit),
_fragmentation_limit(fragmentation_limit),
_page_fragmentation_limit(page_size * (fragmentation_limit / 100)),
_page_fragmentation_limit((size_t)(page_size * (fragmentation_limit / 100))),
_live_pages(),
_not_selected_pages(),
_forwarding_entries(0),
@ -72,7 +72,7 @@ void ZRelocationSetSelectorGroup::semi_sort() {
const size_t npartitions_shift = 11;
const size_t npartitions = (size_t)1 << npartitions_shift;
const size_t partition_size = _page_size >> npartitions_shift;
const size_t partition_size_shift = exact_log2(partition_size);
const int partition_size_shift = log2i_exact(partition_size);
// Partition slots/fingers
int partitions[npartitions] = { /* zero initialize */ };
@ -135,7 +135,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
// By subtracting the object size limit from the pages size we get the maximum
// number of pages that the relocation set is guaranteed to fit in, regardless
// of in which order the objects are relocated.
const int to = ceil((double)(from_live_bytes) / (double)(_page_size - _object_size_limit));
const int to = (int)ceil(from_live_bytes / (double)(_page_size - _object_size_limit));
// Calculate the relative difference in reclaimable space compared to our
// currently selected final relocation set. If this number is larger than the

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -250,14 +250,14 @@ void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStat
"%9.3f / %-9.3f ms",
sampler.group(),
sampler.name(),
TimeHelper::counter_to_millis(history.avg_10_seconds()),
TimeHelper::counter_to_millis(history.max_10_seconds()),
TimeHelper::counter_to_millis(history.avg_10_minutes()),
TimeHelper::counter_to_millis(history.max_10_minutes()),
TimeHelper::counter_to_millis(history.avg_10_hours()),
TimeHelper::counter_to_millis(history.max_10_hours()),
TimeHelper::counter_to_millis(history.avg_total()),
TimeHelper::counter_to_millis(history.max_total()));
TimeHelper::counter_to_millis((jlong)history.avg_10_seconds()),
TimeHelper::counter_to_millis((jlong)history.max_10_seconds()),
TimeHelper::counter_to_millis((jlong)history.avg_10_minutes()),
TimeHelper::counter_to_millis((jlong)history.max_10_minutes()),
TimeHelper::counter_to_millis((jlong)history.avg_10_hours()),
TimeHelper::counter_to_millis((jlong)history.max_10_hours()),
TimeHelper::counter_to_millis((jlong)history.avg_total()),
TimeHelper::counter_to_millis((jlong)history.max_total()));
}
void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) {
@ -677,7 +677,7 @@ void ZStatPhaseCollection::register_end(ConcurrentGCTimer* timer, const Ticks& s
ZCollectedHeap::heap()->trace_heap_after_gc(jfr_tracer());
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
const size_t used_at_end = ZHeap::heap()->used();
@ -718,7 +718,7 @@ void ZStatPhaseGeneration::register_end(ConcurrentGCTimer* timer, const Ticks& s
ZCollectedHeap::heap()->print_heap_after_gc();
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
ZGeneration* const generation = ZGeneration::generation(_id);
@ -766,7 +766,7 @@ void ZStatPhasePause::register_end(ConcurrentGCTimer* timer, const Ticks& start,
timer->register_gc_pause_end(end);
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
// Track max pause time
if (_max < duration) {
@ -798,7 +798,7 @@ void ZStatPhaseConcurrent::register_end(ConcurrentGCTimer* timer, const Ticks& s
timer->register_gc_concurrent_end(end);
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
LogTarget(Info, gc, phases) log;
log_end(log, duration);
@ -835,7 +835,7 @@ void ZStatSubPhase::register_end(ConcurrentGCTimer* timer, const Ticks& start, c
ZTracer::report_thread_phase(name(), start, end);
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
if (Thread::current()->is_Worker_thread()) {
LogTarget(Trace, gc, phases) log;
@ -862,7 +862,7 @@ void ZStatCriticalPhase::register_end(ConcurrentGCTimer* timer, const Ticks& sta
ZTracer::report_thread_phase(name(), start, end);
const Tickspan duration = end - start;
ZStatSample(_sampler, duration.value());
ZStatDurationSample(_sampler, duration);
ZStatInc(_counter);
if (_verbose) {
@ -914,6 +914,10 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) {
ZTracer::report_stat_sampler(sampler, value);
}
void ZStatDurationSample(const ZStatSampler& sampler, const Tickspan& duration) {
ZStatSample(sampler, (uint64_t)duration.value());
}
void ZStatInc(const ZStatCounter& counter, uint64_t increment) {
ZStatCounterData* const cpu_data = counter.get();
const uint64_t value = Atomic::add(&cpu_data->_counter, increment);
@ -1036,7 +1040,7 @@ void ZStat::sample_and_collect(ZStatSamplerHistory* history) const {
bool ZStat::should_print(LogTargetHandle log) const {
static uint64_t print_at = ZStatisticsInterval;
const uint64_t now = os::elapsedTime();
const uint64_t now = (uint64_t)os::elapsedTime();
if (now < print_at) {
return false;
@ -1846,7 +1850,7 @@ void ZStatHeap::at_relocate_end(const ZPageAllocatorStats& stats, bool record_st
}
size_t ZStatHeap::reclaimed_avg() {
return _reclaimed_bytes.davg();
return (size_t)_reclaimed_bytes.davg();
}
size_t ZStatHeap::max_capacity() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -345,6 +345,7 @@ public:
// Stat sample/increment
//
void ZStatSample(const ZStatSampler& sampler, uint64_t value);
void ZStatDurationSample(const ZStatSampler& sampler, const Tickspan& duration);
void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1);
void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -79,7 +79,7 @@ void ZStoreBarrierBuffer::install_base_pointers_inner() {
(ZPointer::remap_bits(_last_processed_color) & ZPointerRemappedOldMask) == 0,
"Should not have double bit errors");
for (int i = current(); i < (int)_buffer_length; ++i) {
for (size_t i = current(); i < _buffer_length; ++i) {
const ZStoreBarrierEntry& entry = _buffer[i];
volatile zpointer* const p = entry._p;
const zaddress_unsafe p_unsafe = to_zaddress_unsafe((uintptr_t)p);
@ -141,7 +141,7 @@ static volatile zpointer* make_load_good(volatile zpointer* p, zaddress_unsafe p
return (volatile zpointer*)p_remapped;
}
void ZStoreBarrierBuffer::on_new_phase_relocate(int i) {
void ZStoreBarrierBuffer::on_new_phase_relocate(size_t i) {
const uintptr_t last_remap_bits = ZPointer::remap_bits(_last_processed_color);
if (last_remap_bits == ZPointerRemapped) {
// All pointers are already remapped
@ -160,7 +160,7 @@ void ZStoreBarrierBuffer::on_new_phase_relocate(int i) {
entry._p = make_load_good(entry._p, p_base, _last_processed_color);
}
void ZStoreBarrierBuffer::on_new_phase_remember(int i) {
void ZStoreBarrierBuffer::on_new_phase_remember(size_t i) {
volatile zpointer* const p = _buffer[i]._p;
if (ZHeap::heap()->is_young(p)) {
@ -197,7 +197,7 @@ bool ZStoreBarrierBuffer::stored_during_old_mark() const {
return last_mark_old_bits == ZPointerMarkedOld;
}
void ZStoreBarrierBuffer::on_new_phase_mark(int i) {
void ZStoreBarrierBuffer::on_new_phase_mark(size_t i) {
const ZStoreBarrierEntry& entry = _buffer[i];
const zpointer prev = entry._prev;
@ -229,7 +229,7 @@ void ZStoreBarrierBuffer::on_new_phase() {
// Install all base pointers for relocation
install_base_pointers();
for (int i = current(); i < (int)_buffer_length; ++i) {
for (size_t i = current(); i < _buffer_length; ++i) {
on_new_phase_relocate(i);
on_new_phase_remember(i);
on_new_phase_mark(i);
@ -259,8 +259,8 @@ void ZStoreBarrierBuffer::on_error(outputStream* st) {
st->print_cr(" _last_processed_color: " PTR_FORMAT, _last_processed_color);
st->print_cr(" _last_installed_color: " PTR_FORMAT, _last_installed_color);
for (int i = current(); i < (int)_buffer_length; ++i) {
st->print_cr(" [%2d]: base: " PTR_FORMAT " p: " PTR_FORMAT " prev: " PTR_FORMAT,
for (size_t i = current(); i < _buffer_length; ++i) {
st->print_cr(" [%2zu]: base: " PTR_FORMAT " p: " PTR_FORMAT " prev: " PTR_FORMAT,
i,
untype(_base_pointers[i]),
p2i(_buffer[i]._p),
@ -276,7 +276,7 @@ void ZStoreBarrierBuffer::flush() {
OnError on_error(this);
VMErrorCallbackMark mark(&on_error);
for (int i = current(); i < (int)_buffer_length; ++i) {
for (size_t i = current(); i < _buffer_length; ++i) {
const ZStoreBarrierEntry& entry = _buffer[i];
const zaddress addr = ZBarrier::make_load_good(entry._prev);
ZBarrier::mark_and_remember(entry._p, addr);
@ -296,7 +296,7 @@ bool ZStoreBarrierBuffer::is_in(volatile zpointer* p) {
const uintptr_t last_remap_bits = ZPointer::remap_bits(buffer->_last_processed_color) & ZPointerRemappedMask;
const bool needs_remap = last_remap_bits != ZPointerRemapped;
for (int i = buffer->current(); i < (int)_buffer_length; ++i) {
for (size_t i = buffer->current(); i < _buffer_length; ++i) {
const ZStoreBarrierEntry& entry = buffer->_buffer[i];
volatile zpointer* entry_p = entry._p;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,16 +59,16 @@ private:
// sizeof(ZStoreBarrierEntry) scaled index growing downwards
size_t _current;
void on_new_phase_relocate(int i);
void on_new_phase_remember(int i);
void on_new_phase_mark(int i);
void on_new_phase_relocate(size_t i);
void on_new_phase_remember(size_t i);
void on_new_phase_mark(size_t i);
void clear();
bool is_old_mark() const;
bool stored_during_old_mark() const;
bool is_empty() const;
intptr_t current() const;
size_t current() const;
void install_base_pointers_inner();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "gc/z/zThreadLocalData.hpp"
#include "runtime/thread.hpp"
inline intptr_t ZStoreBarrierBuffer::current() const {
inline size_t ZStoreBarrierBuffer::current() const {
return _current / sizeof(ZStoreBarrierEntry);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,7 +85,7 @@ bool ZUnmapper::try_enqueue(ZPage* page) {
}
size_t ZUnmapper::queue_capacity() const {
return align_up<size_t>(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0, ZGranuleSize);
return align_up((size_t)(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0), ZGranuleSize);
}
bool ZUnmapper::is_saturated() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -583,7 +583,7 @@ void ZVerify::on_color_flip() {
for (JavaThreadIteratorWithHandle jtiwh; JavaThread* const jt = jtiwh.next(); ) {
const ZStoreBarrierBuffer* const buffer = ZThreadLocalData::store_barrier_buffer(jt);
for (int i = buffer->current(); i < (int)ZStoreBarrierBuffer::_buffer_length; ++i) {
for (size_t i = buffer->current(); i < ZStoreBarrierBuffer::_buffer_length; ++i) {
volatile zpointer* const p = buffer->_buffer[i]._p;
bool created = false;
z_verify_store_barrier_buffer_table->put_if_absent(p, true, &created);