diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp index dbec45fd96e..f72e84eaf59 100644 --- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp +++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp @@ -327,7 +327,7 @@ int ZBarrierSetC2::estimate_stub_size() const { int size = 0; for (int i = 0; i < stubs->length(); i++) { - CodeBuffer cb(blob->content_begin(), (address)C->output()->scratch_locs_memory() - blob->content_begin()); + CodeBuffer cb(blob->content_begin(), checked_cast((address)C->output()->scratch_locs_memory() - blob->content_begin())); MacroAssembler masm(&cb); stubs->at(i)->emit_code(masm); size += cb.insts_size(); diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp index c71e59944f0..f3ff568c64d 100644 --- a/src/hotspot/share/gc/z/zArguments.cpp +++ b/src/hotspot/share/gc/z/zArguments.cpp @@ -146,7 +146,7 @@ void ZArguments::initialize() { ZHeuristics::set_medium_page_size(); if (!FLAG_IS_DEFAULT(ZTenuringThreshold) && ZTenuringThreshold != -1) { - FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, ZTenuringThreshold); + FLAG_SET_ERGO_IF_DEFAULT(MaxTenuringThreshold, (uint)ZTenuringThreshold); if (MaxTenuringThreshold == 0) { FLAG_SET_ERGO_IF_DEFAULT(AlwaysTenure, true); } diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index 2ec87a76156..ec7feda8d63 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ inline ZArrayIteratorImpl::ZArrayIteratorImpl(const T* array, size_ template inline ZArrayIteratorImpl::ZArrayIteratorImpl(const ZArray* array) - : ZArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), array->length()) {} + : ZArrayIteratorImpl(array->is_empty() ? nullptr : array->adr_at(0), (size_t)array->length()) {} template inline bool ZArrayIteratorImpl::next(T* elem) { diff --git a/src/hotspot/share/gc/z/zBarrier.cpp b/src/hotspot/share/gc/z/zBarrier.cpp index c4b6e0f8239..7d7f1284bdf 100644 --- a/src/hotspot/share/gc/z/zBarrier.cpp +++ b/src/hotspot/share/gc/z/zBarrier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -291,7 +291,7 @@ zaddress ZBarrier::keep_alive_slow_path(zaddress addr) { // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents. void ZBarrier::verify_on_weak(volatile zpointer* referent_addr) { if (referent_addr != nullptr) { - const uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset(); + const uintptr_t base = (uintptr_t)referent_addr - (size_t)java_lang_ref_Reference::referent_offset(); const oop obj = cast_to_oop(base); assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base); assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset()), "Sanity"); diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp index b8ecc3eddd3..33894f166a3 100644 --- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { ZNMethod::nmethod_oops_do_inner(nm, &cl); const uintptr_t prev_color = ZNMethod::color(nm); - const uintptr_t new_color = *(int*)ZPointerStoreGoodMaskLowOrderBitsAddr; + const uintptr_t new_color = *ZPointerStoreGoodMaskLowOrderBitsAddr; log_develop_trace(gc, nmethod)("nmethod: " PTR_FORMAT " visited by entry (complete) [" PTR_FORMAT " -> " PTR_FORMAT "]", p2i(nm), prev_color, new_color); // CodeCache unloading support diff --git a/src/hotspot/share/gc/z/zCPU.inline.hpp b/src/hotspot/share/gc/z/zCPU.inline.hpp index 67d26f4c2e1..b3712dc5ac9 100644 --- a/src/hotspot/share/gc/z/zCPU.inline.hpp +++ b/src/hotspot/share/gc/z/zCPU.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "utilities/debug.hpp" inline uint32_t ZCPU::count() { - return os::processor_count(); + return (uint32_t)os::processor_count(); } inline uint32_t ZCPU::id() { diff --git a/src/hotspot/share/gc/z/zDirector.cpp b/src/hotspot/share/gc/z/zDirector.cpp index 162e05f2c07..3ba8698bd8e 100644 --- a/src/hotspot/share/gc/z/zDirector.cpp +++ b/src/hotspot/share/gc/z/zDirector.cpp @@ -102,7 +102,7 @@ static double estimated_gc_workers(double serial_gc_time, double parallelizable_ } static uint discrete_young_gc_workers(double gc_workers) { - return clamp(ceil(gc_workers), 1, ZYoungGCThreads); + return clamp((uint)ceil(gc_workers), 1, ZYoungGCThreads); } static double select_young_gc_workers(const ZDirectorStats& stats, double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) { @@ -426,7 +426,7 @@ static bool rule_major_warmup(const ZDirectorStats& stats) { const size_t soft_max_capacity = stats._heap._soft_max_heap_size; const size_t used = stats._heap._used; const double used_threshold_percent = (stats._old_stats._cycle._nwarmup_cycles + 1) * 0.1; - const size_t used_threshold = soft_max_capacity * used_threshold_percent; + const size_t used_threshold = (size_t)(soft_max_capacity * used_threshold_percent); log_debug(gc, director)("Rule Major: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB", used_threshold_percent * 100, used / M, used_threshold / M); @@ -497,13 +497,13 @@ static bool rule_major_allocation_rate(const ZDirectorStats& stats) { // Doing an old collection makes subsequent young collections more efficient. // Calculate the number of young collections ahead that we will try to amortize // the cost of doing an old collection for. - const int lookahead = stats._heap._total_collections - stats._old_stats._general._total_collections_at_start; + const uint lookahead = stats._heap._total_collections - stats._old_stats._general._total_collections_at_start; // Calculate extra young collection overhead predicted for a number of future // young collections, due to not freeing up memory in the old generation. const double extra_young_gc_time_for_lookahead = extra_young_gc_time * lookahead; - log_debug(gc, director)("Rule Major: Allocation Rate, ExtraYoungGCTime: %.3fs, OldGCTime: %.3fs, Lookahead: %d, ExtraYoungGCTimeForLookahead: %.3fs", + log_debug(gc, director)("Rule Major: Allocation Rate, ExtraYoungGCTime: %.3fs, OldGCTime: %.3fs, Lookahead: %u, ExtraYoungGCTimeForLookahead: %.3fs", extra_young_gc_time, old_gc_time, lookahead, extra_young_gc_time_for_lookahead); // If we continue doing as many minor collections as we already did since the @@ -565,7 +565,7 @@ static bool rule_major_proactive(const ZDirectorStats& stats) { // passed since the previous GC. This helps avoid superfluous GCs when running // applications with very low allocation rate. const size_t used_after_last_gc = stats._old_stats._stat_heap._used_at_relocate_end; - const size_t used_increase_threshold = stats._heap._soft_max_heap_size * 0.10; // 10% + const size_t used_increase_threshold = (size_t)(stats._heap._soft_max_heap_size * 0.10); // 10% const size_t used_threshold = used_after_last_gc + used_increase_threshold; const size_t used = stats._heap._used; const double time_since_last_gc = stats._old_stats._cycle._time_since_last; diff --git a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp index 41d91068434..42d006a5b37 100644 --- a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp +++ b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "utilities/debug.hpp" inline size_t ZForwardingAllocator::size() const { - return _end - _start; + return (size_t)(_end - _start); } inline bool ZForwardingAllocator::is_full() const { diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index be86550d321..8a7d38e2991 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -299,7 +299,7 @@ void ZGeneration::reset_statistics() { _page_allocator->reset_statistics(_id); } -ssize_t ZGeneration::freed() const { +size_t ZGeneration::freed() const { return _freed; } @@ -448,7 +448,7 @@ public: _success = do_operation(); // Update statistics - ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads()); + ZStatSample(ZSamplerJavaThreads, (uint64_t)Threads::number_of_threads()); } virtual void doit_epilogue() { diff --git a/src/hotspot/share/gc/z/zGeneration.hpp b/src/hotspot/share/gc/z/zGeneration.hpp index 32762a50b62..aec48a0a072 100644 --- a/src/hotspot/share/gc/z/zGeneration.hpp +++ b/src/hotspot/share/gc/z/zGeneration.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,7 +122,7 @@ public: // Statistics void reset_statistics(); virtual bool should_record_stats() = 0; - ssize_t freed() const; + size_t freed() const; void increase_freed(size_t size); size_t promoted() const; void increase_promoted(size_t size); diff --git a/src/hotspot/share/gc/z/zHeapIterator.cpp b/src/hotspot/share/gc/z/zHeapIterator.cpp index cbcac39a11b..50fc921131f 100644 --- a/src/hotspot/share/gc/z/zHeapIterator.cpp +++ b/src/hotspot/share/gc/z/zHeapIterator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -204,7 +204,7 @@ private: assert(ZCollectedHeap::heap()->is_in(p), "Should be in heap"); if (VisitReferents) { - return HeapAccess::oop_load_at(_base, _base->field_offset(p)); + return HeapAccess::oop_load_at(_base, (ptrdiff_t)_base->field_offset(p)); } return HeapAccess::oop_load(p); @@ -447,7 +447,7 @@ void ZHeapIterator::follow_array_chunk(const ZHeapIteratorContext& context, cons const objArrayOop obj = objArrayOop(array.obj()); const int length = obj->length(); const int start = array.index(); - const int stride = MIN2(length - start, ObjArrayMarkingStride); + const int stride = MIN2(length - start, (int)ObjArrayMarkingStride); const int end = start + stride; // Push remaining array chunk first diff --git a/src/hotspot/share/gc/z/zHeuristics.cpp b/src/hotspot/share/gc/z/zHeuristics.cpp index c764227061e..ebf979af795 100644 --- a/src/hotspot/share/gc/z/zHeuristics.cpp +++ b/src/hotspot/share/gc/z/zHeuristics.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,14 +39,14 @@ void ZHeuristics::set_medium_page_size() { // becomes larger than ZPageSizeSmall. const size_t min = ZGranuleSize; const size_t max = ZGranuleSize * 16; - const size_t unclamped = MaxHeapSize * 0.03125; + const size_t unclamped = (size_t)(MaxHeapSize * 0.03125); const size_t clamped = clamp(unclamped, min, max); const size_t size = round_down_power_of_2(clamped); if (size > ZPageSizeSmall) { // Enable medium pages ZPageSizeMedium = size; - ZPageSizeMediumShift = log2i_exact(ZPageSizeMedium); + ZPageSizeMediumShift = (size_t)log2i_exact(ZPageSizeMedium); ZObjectSizeLimitMedium = ZPageSizeMedium / 8; ZObjectAlignmentMediumShift = (int)ZPageSizeMediumShift - 13; ZObjectAlignmentMedium = 1 << ZObjectAlignmentMediumShift; @@ -68,11 +68,11 @@ bool ZHeuristics::use_per_cpu_shared_small_pages() { } static uint nworkers_based_on_ncpus(double cpu_share_in_percent) { - return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0); + return (uint)ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0); } static uint nworkers_based_on_heap_size(double heap_share_in_percent) { - return (MaxHeapSize * (heap_share_in_percent / 100.0)) / ZPageSizeSmall; + return (uint)(MaxHeapSize * (heap_share_in_percent / 100.0) / ZPageSizeSmall); } static uint nworkers(double cpu_share_in_percent) { @@ -101,9 +101,9 @@ uint ZHeuristics::nconcurrent_workers() { } size_t ZHeuristics::significant_heap_overhead() { - return MaxHeapSize * (ZFragmentationLimit / 100); + return (size_t)(MaxHeapSize * (ZFragmentationLimit / 100)); } size_t ZHeuristics::significant_young_overhead() { - return MaxHeapSize * (ZYoungCompactionLimit / 100); + return (size_t)(MaxHeapSize * (ZYoungCompactionLimit / 100)); } diff --git a/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp index 172ba250575..26afdef9d05 100644 --- a/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp +++ b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ class ZIndexDistributorStriped : public CHeapObj { } volatile int* claim_addr(int index) { - return (volatile int*)(align_up(_mem, ZCacheLineSize) + index * ZCacheLineSize); + return (volatile int*)(align_up(_mem, ZCacheLineSize) + (size_t)index * ZCacheLineSize); } public: @@ -136,7 +136,7 @@ private: // Total size used to hold all claim variables static size_t claim_variables_size() { - return sizeof(int) * claim_level_end_index(ClaimLevels); + return sizeof(int) * (size_t)claim_level_end_index(ClaimLevels); } // Returns the index of the start of the current segment of the current level diff --git a/src/hotspot/share/gc/z/zLiveMap.cpp b/src/hotspot/share/gc/z/zLiveMap.cpp index 2e4a8edd356..4123620f8b7 100644 --- a/src/hotspot/share/gc/z/zLiveMap.cpp +++ b/src/hotspot/share/gc/z/zLiveMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ ZLiveMap::ZLiveMap(uint32_t size) _segment_live_bits(0), _segment_claim_bits(0), _bitmap(bitmap_size(size, nsegments)), - _segment_shift(exact_log2(segment_size())) {} + _segment_shift(log2i_exact(segment_size())) {} void ZLiveMap::reset(ZGenerationId id) { ZGeneration* const generation = ZGeneration::generation(id); @@ -130,6 +130,6 @@ void ZLiveMap::resize(uint32_t size) { const size_t new_bitmap_size = bitmap_size(size, nsegments); if (_bitmap.size() != new_bitmap_size) { _bitmap.reinitialize(new_bitmap_size, false /* clear */); - _segment_shift = exact_log2(segment_size()); + _segment_shift = log2i_exact(segment_size()); } } diff --git a/src/hotspot/share/gc/z/zLiveMap.hpp b/src/hotspot/share/gc/z/zLiveMap.hpp index e3bcd2e267d..f8b16d06dc5 100644 --- a/src/hotspot/share/gc/z/zLiveMap.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ private: BitMap::bm_word_t _segment_live_bits; BitMap::bm_word_t _segment_claim_bits; ZBitMap _bitmap; - size_t _segment_shift; + int _segment_shift; const BitMapView segment_live_bits() const; const BitMapView segment_claim_bits() const; diff --git a/src/hotspot/share/gc/z/zMarkCache.cpp b/src/hotspot/share/gc/z/zMarkCache.cpp index 7fbb5a8be4e..0eecfbfaa67 100644 --- a/src/hotspot/share/gc/z/zMarkCache.cpp +++ b/src/hotspot/share/gc/z/zMarkCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "utilities/powerOfTwo.hpp" static size_t shift_for_stripes(size_t nstripes) { - return ZMarkStripeShift + exact_log2(nstripes); + return ZMarkStripeShift + (size_t)log2i_exact(nstripes); } ZMarkCacheEntry::ZMarkCacheEntry() diff --git a/src/hotspot/share/gc/z/zMarkStack.cpp b/src/hotspot/share/gc/z/zMarkStack.cpp index 1e10c41eb41..c4938af0a5f 100644 --- a/src/hotspot/share/gc/z/zMarkStack.cpp +++ b/src/hotspot/share/gc/z/zMarkStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { const size_t spillover_nworkers = nworkers - spillover_limit; const size_t spillover_worker_id = worker_id - spillover_limit; const double spillover_chunk = (double)nstripes / (double)spillover_nworkers; - index = spillover_worker_id * spillover_chunk; + index = (size_t)(spillover_worker_id * spillover_chunk); } assert(index < nstripes, "Invalid index"); diff --git a/src/hotspot/share/gc/z/zMetronome.cpp b/src/hotspot/share/gc/z/zMetronome.cpp index 0cc209cd7c8..876b1f69227 100644 --- a/src/hotspot/share/gc/z/zMetronome.cpp +++ b/src/hotspot/share/gc/z/zMetronome.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ bool ZMetronome::wait_for_tick() { if (_nticks++ == 0) { // First tick, set start time const Ticks now = Ticks::now(); - _start_ms = TimeHelper::counter_to_millis(now.value()); + _start_ms = (uint64_t)TimeHelper::counter_to_millis(now.value()); } MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); @@ -47,9 +47,9 @@ bool ZMetronome::wait_for_tick() { // We might wake up spuriously from wait, so always recalculate // the timeout after a wakeup to see if we need to wait again. const Ticks now = Ticks::now(); - const uint64_t now_ms = TimeHelper::counter_to_millis(now.value()); + const uint64_t now_ms = (uint64_t)TimeHelper::counter_to_millis(now.value()); const uint64_t next_ms = _start_ms + (_interval_ms * _nticks); - const int64_t timeout_ms = next_ms - now_ms; + const int64_t timeout_ms = (int64_t)(next_ms - now_ms); if (timeout_ms > 0) { // Wait @@ -57,7 +57,7 @@ bool ZMetronome::wait_for_tick() { } else { // Tick if (timeout_ms < 0) { - const uint64_t overslept = -timeout_ms; + const uint64_t overslept = (uint64_t)-timeout_ms; if (overslept > _interval_ms) { // Missed one or more ticks. Bump _nticks accordingly to // avoid firing a string of immediate ticks to make up diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index 7ae7b204d08..7c5b1e06edb 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -302,8 +302,8 @@ void ZNMethod::nmethods_do(bool secondary, NMethodClosure* cl) { uintptr_t ZNMethod::color(nmethod* nm) { BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); - // color is stored at low order bits of int; implicit conversion to uintptr_t is fine - return bs_nm->guard_value(nm); + // color is stored at low order bits of int; conversion to uintptr_t is fine + return (uintptr_t)bs_nm->guard_value(nm); } oop ZNMethod::load_oop(oop* p, DecoratorSet decorators) { diff --git a/src/hotspot/share/gc/z/zNMethodTable.cpp b/src/hotspot/share/gc/z/zNMethodTable.cpp index a4b56292c52..9714bee4bd8 100644 --- a/src/hotspot/share/gc/z/zNMethodTable.cpp +++ b/src/hotspot/share/gc/z/zNMethodTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -144,9 +144,9 @@ void ZNMethodTable::rebuild_if_needed() { // grows/shrinks by doubling/halving its size. Pruning of unregistered // entries is done by rebuilding the table with or without resizing it. const size_t min_size = 1024; - const size_t shrink_threshold = _size * 0.30; - const size_t prune_threshold = _size * 0.65; - const size_t grow_threshold = _size * 0.70; + const size_t shrink_threshold = (size_t)(_size * 0.30); + const size_t prune_threshold = (size_t)(_size * 0.65); + const size_t grow_threshold = (size_t)(_size * 0.70); if (_size == 0) { // Initialize table diff --git a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp index ad19f273dcf..ada8351a9f6 100644 --- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp @@ -76,8 +76,8 @@ oop ZObjArrayAllocator::initialize(HeapWord* mem) const { ZThreadLocalData::set_invisible_root(_thread, (zaddress_unsafe*)&mem); const BasicType element_type = ArrayKlass::cast(_klass)->element_type(); - const size_t base_offset_in_bytes = arrayOopDesc::base_offset_in_bytes(element_type); - const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, BytesPerWord); + const size_t base_offset_in_bytes = (size_t)arrayOopDesc::base_offset_in_bytes(element_type); + const size_t process_start_offset_in_bytes = align_up(base_offset_in_bytes, (size_t)BytesPerWord); if (process_start_offset_in_bytes != base_offset_in_bytes) { // initialize_memory can only fill word aligned memory, diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp index 0b4ee28ba7d..fcf69c685f7 100644 --- a/src/hotspot/share/gc/z/zPage.inline.hpp +++ b/src/hotspot/share/gc/z/zPage.inline.hpp @@ -83,13 +83,13 @@ inline uint32_t ZPage::object_max_count() const { inline size_t ZPage::object_alignment_shift() const { switch (type()) { case ZPageType::small: - return ZObjectAlignmentSmallShift; + return (size_t)ZObjectAlignmentSmallShift; case ZPageType::medium: - return ZObjectAlignmentMediumShift; + return (size_t)ZObjectAlignmentMediumShift; case ZPageType::large: - return ZObjectAlignmentLargeShift; + return (size_t)ZObjectAlignmentLargeShift; default: fatal("Unexpected page type"); @@ -100,13 +100,13 @@ inline size_t ZPage::object_alignment_shift() const { inline size_t ZPage::object_alignment() const { switch (type()) { case ZPageType::small: - return ZObjectAlignmentSmall; + return (size_t)ZObjectAlignmentSmall; case ZPageType::medium: - return ZObjectAlignmentMedium; + return (size_t)ZObjectAlignmentMedium; case ZPageType::large: - return ZObjectAlignmentLarge; + return (size_t)ZObjectAlignmentLarge; default: fatal("Unexpected page type"); diff --git a/src/hotspot/share/gc/z/zPageCache.cpp b/src/hotspot/share/gc/z/zPageCache.cpp index 96fd2eafe0f..163bb395560 100644 --- a/src/hotspot/share/gc/z/zPageCache.cpp +++ b/src/hotspot/share/gc/z/zPageCache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -308,7 +308,7 @@ public: }; size_t ZPageCache::flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout) { - const uint64_t now = os::elapsedTime(); + const uint64_t now = (uint64_t)os::elapsedTime(); const uint64_t expires = _last_commit + ZUncommitDelay; if (expires > now) { // Delay uncommit, set next timeout @@ -329,5 +329,5 @@ size_t ZPageCache::flush_for_uncommit(size_t requested, ZList* to, uint64 } void ZPageCache::set_last_commit() { - _last_commit = ceil(os::elapsedTime()); + _last_commit = (uint64_t)ceil(os::elapsedTime()); } diff --git a/src/hotspot/share/gc/z/zPageTable.inline.hpp b/src/hotspot/share/gc/z/zPageTable.inline.hpp index 3b4cbe9220c..79a2d297df8 100644 --- a/src/hotspot/share/gc/z/zPageTable.inline.hpp +++ b/src/hotspot/share/gc/z/zPageTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,7 +69,7 @@ inline ZPageTableParallelIterator::ZPageTableParallelIterator(const ZPageTable* template inline void ZPageTableParallelIterator::do_pages(Function function) { _index_distributor.do_indices([&](int index) { - ZPage* const page = _table->at(index); + ZPage* const page = _table->at(size_t(index)); if (page != nullptr) { const size_t start_index = untype(page->start()) >> ZGranuleSizeShift; if (size_t(index) == start_index) { diff --git a/src/hotspot/share/gc/z/zRelocationSet.cpp b/src/hotspot/share/gc/z/zRelocationSet.cpp index 92f245777b4..5c82f55bbbf 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.cpp +++ b/src/hotspot/share/gc/z/zRelocationSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,7 @@ public: : ZTask("ZRelocationSetInstallTask"), _allocator(allocator), _forwardings(nullptr), - _nforwardings(selector->selected_small()->length() + selector->selected_medium()->length()), + _nforwardings((size_t)selector->selected_small()->length() + (size_t)selector->selected_medium()->length()), _small(selector->selected_small()), _medium(selector->selected_medium()), _small_iter(selector->selected_small()), @@ -113,7 +113,7 @@ public: for (size_t page_index; _small_iter.next_index(&page_index);) { ZPage* page = _small->at(int(page_index)); ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page)); - install_small(forwarding, _medium->length() + page_index); + install_small(forwarding, (size_t)_medium->length() + page_index); SuspendibleThreadSet::yield(); } diff --git a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp index ab1df68fe51..ec904b914fb 100644 --- a/src/hotspot/share/gc/z/zRelocationSetSelector.cpp +++ b/src/hotspot/share/gc/z/zRelocationSetSelector.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ ZRelocationSetSelectorGroup::ZRelocationSetSelectorGroup(const char* name, _page_size(page_size), _object_size_limit(object_size_limit), _fragmentation_limit(fragmentation_limit), - _page_fragmentation_limit(page_size * (fragmentation_limit / 100)), + _page_fragmentation_limit((size_t)(page_size * (fragmentation_limit / 100))), _live_pages(), _not_selected_pages(), _forwarding_entries(0), @@ -72,7 +72,7 @@ void ZRelocationSetSelectorGroup::semi_sort() { const size_t npartitions_shift = 11; const size_t npartitions = (size_t)1 << npartitions_shift; const size_t partition_size = _page_size >> npartitions_shift; - const size_t partition_size_shift = exact_log2(partition_size); + const int partition_size_shift = log2i_exact(partition_size); // Partition slots/fingers int partitions[npartitions] = { /* zero initialize */ }; @@ -135,7 +135,7 @@ void ZRelocationSetSelectorGroup::select_inner() { // By subtracting the object size limit from the pages size we get the maximum // number of pages that the relocation set is guaranteed to fit in, regardless // of in which order the objects are relocated. - const int to = ceil((double)(from_live_bytes) / (double)(_page_size - _object_size_limit)); + const int to = (int)ceil(from_live_bytes / (double)(_page_size - _object_size_limit)); // Calculate the relative difference in reclaimable space compared to our // currently selected final relocation set. If this number is larger than the diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp index d838cb0b813..613f7b2740b 100644 --- a/src/hotspot/share/gc/z/zStat.cpp +++ b/src/hotspot/share/gc/z/zStat.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -250,14 +250,14 @@ void ZStatUnitTime(LogTargetHandle log, const ZStatSampler& sampler, const ZStat "%9.3f / %-9.3f ms", sampler.group(), sampler.name(), - TimeHelper::counter_to_millis(history.avg_10_seconds()), - TimeHelper::counter_to_millis(history.max_10_seconds()), - TimeHelper::counter_to_millis(history.avg_10_minutes()), - TimeHelper::counter_to_millis(history.max_10_minutes()), - TimeHelper::counter_to_millis(history.avg_10_hours()), - TimeHelper::counter_to_millis(history.max_10_hours()), - TimeHelper::counter_to_millis(history.avg_total()), - TimeHelper::counter_to_millis(history.max_total())); + TimeHelper::counter_to_millis((jlong)history.avg_10_seconds()), + TimeHelper::counter_to_millis((jlong)history.max_10_seconds()), + TimeHelper::counter_to_millis((jlong)history.avg_10_minutes()), + TimeHelper::counter_to_millis((jlong)history.max_10_minutes()), + TimeHelper::counter_to_millis((jlong)history.avg_10_hours()), + TimeHelper::counter_to_millis((jlong)history.max_10_hours()), + TimeHelper::counter_to_millis((jlong)history.avg_total()), + TimeHelper::counter_to_millis((jlong)history.max_total())); } void ZStatUnitBytes(LogTargetHandle log, const ZStatSampler& sampler, const ZStatSamplerHistory& history) { @@ -677,7 +677,7 @@ void ZStatPhaseCollection::register_end(ConcurrentGCTimer* timer, const Ticks& s ZCollectedHeap::heap()->trace_heap_after_gc(jfr_tracer()); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); const size_t used_at_end = ZHeap::heap()->used(); @@ -718,7 +718,7 @@ void ZStatPhaseGeneration::register_end(ConcurrentGCTimer* timer, const Ticks& s ZCollectedHeap::heap()->print_heap_after_gc(); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); ZGeneration* const generation = ZGeneration::generation(_id); @@ -766,7 +766,7 @@ void ZStatPhasePause::register_end(ConcurrentGCTimer* timer, const Ticks& start, timer->register_gc_pause_end(end); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); // Track max pause time if (_max < duration) { @@ -798,7 +798,7 @@ void ZStatPhaseConcurrent::register_end(ConcurrentGCTimer* timer, const Ticks& s timer->register_gc_concurrent_end(end); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); LogTarget(Info, gc, phases) log; log_end(log, duration); @@ -835,7 +835,7 @@ void ZStatSubPhase::register_end(ConcurrentGCTimer* timer, const Ticks& start, c ZTracer::report_thread_phase(name(), start, end); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); if (Thread::current()->is_Worker_thread()) { LogTarget(Trace, gc, phases) log; @@ -862,7 +862,7 @@ void ZStatCriticalPhase::register_end(ConcurrentGCTimer* timer, const Ticks& sta ZTracer::report_thread_phase(name(), start, end); const Tickspan duration = end - start; - ZStatSample(_sampler, duration.value()); + ZStatDurationSample(_sampler, duration); ZStatInc(_counter); if (_verbose) { @@ -914,6 +914,10 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) { ZTracer::report_stat_sampler(sampler, value); } +void ZStatDurationSample(const ZStatSampler& sampler, const Tickspan& duration) { + ZStatSample(sampler, (uint64_t)duration.value()); +} + void ZStatInc(const ZStatCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); const uint64_t value = Atomic::add(&cpu_data->_counter, increment); @@ -1036,7 +1040,7 @@ void ZStat::sample_and_collect(ZStatSamplerHistory* history) const { bool ZStat::should_print(LogTargetHandle log) const { static uint64_t print_at = ZStatisticsInterval; - const uint64_t now = os::elapsedTime(); + const uint64_t now = (uint64_t)os::elapsedTime(); if (now < print_at) { return false; @@ -1846,7 +1850,7 @@ void ZStatHeap::at_relocate_end(const ZPageAllocatorStats& stats, bool record_st } size_t ZStatHeap::reclaimed_avg() { - return _reclaimed_bytes.davg(); + return (size_t)_reclaimed_bytes.davg(); } size_t ZStatHeap::max_capacity() { diff --git a/src/hotspot/share/gc/z/zStat.hpp b/src/hotspot/share/gc/z/zStat.hpp index 346773c3b7e..d7482dbe6aa 100644 --- a/src/hotspot/share/gc/z/zStat.hpp +++ b/src/hotspot/share/gc/z/zStat.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -345,6 +345,7 @@ public: // Stat sample/increment // void ZStatSample(const ZStatSampler& sampler, uint64_t value); +void ZStatDurationSample(const ZStatSampler& sampler, const Tickspan& duration); void ZStatInc(const ZStatCounter& counter, uint64_t increment = 1); void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1); diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp index 537609e723b..c94551dc62d 100644 --- a/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,7 +79,7 @@ void ZStoreBarrierBuffer::install_base_pointers_inner() { (ZPointer::remap_bits(_last_processed_color) & ZPointerRemappedOldMask) == 0, "Should not have double bit errors"); - for (int i = current(); i < (int)_buffer_length; ++i) { + for (size_t i = current(); i < _buffer_length; ++i) { const ZStoreBarrierEntry& entry = _buffer[i]; volatile zpointer* const p = entry._p; const zaddress_unsafe p_unsafe = to_zaddress_unsafe((uintptr_t)p); @@ -141,7 +141,7 @@ static volatile zpointer* make_load_good(volatile zpointer* p, zaddress_unsafe p return (volatile zpointer*)p_remapped; } -void ZStoreBarrierBuffer::on_new_phase_relocate(int i) { +void ZStoreBarrierBuffer::on_new_phase_relocate(size_t i) { const uintptr_t last_remap_bits = ZPointer::remap_bits(_last_processed_color); if (last_remap_bits == ZPointerRemapped) { // All pointers are already remapped @@ -160,7 +160,7 @@ void ZStoreBarrierBuffer::on_new_phase_relocate(int i) { entry._p = make_load_good(entry._p, p_base, _last_processed_color); } -void ZStoreBarrierBuffer::on_new_phase_remember(int i) { +void ZStoreBarrierBuffer::on_new_phase_remember(size_t i) { volatile zpointer* const p = _buffer[i]._p; if (ZHeap::heap()->is_young(p)) { @@ -197,7 +197,7 @@ bool ZStoreBarrierBuffer::stored_during_old_mark() const { return last_mark_old_bits == ZPointerMarkedOld; } -void ZStoreBarrierBuffer::on_new_phase_mark(int i) { +void ZStoreBarrierBuffer::on_new_phase_mark(size_t i) { const ZStoreBarrierEntry& entry = _buffer[i]; const zpointer prev = entry._prev; @@ -229,7 +229,7 @@ void ZStoreBarrierBuffer::on_new_phase() { // Install all base pointers for relocation install_base_pointers(); - for (int i = current(); i < (int)_buffer_length; ++i) { + for (size_t i = current(); i < _buffer_length; ++i) { on_new_phase_relocate(i); on_new_phase_remember(i); on_new_phase_mark(i); @@ -259,8 +259,8 @@ void ZStoreBarrierBuffer::on_error(outputStream* st) { st->print_cr(" _last_processed_color: " PTR_FORMAT, _last_processed_color); st->print_cr(" _last_installed_color: " PTR_FORMAT, _last_installed_color); - for (int i = current(); i < (int)_buffer_length; ++i) { - st->print_cr(" [%2d]: base: " PTR_FORMAT " p: " PTR_FORMAT " prev: " PTR_FORMAT, + for (size_t i = current(); i < _buffer_length; ++i) { + st->print_cr(" [%2zu]: base: " PTR_FORMAT " p: " PTR_FORMAT " prev: " PTR_FORMAT, i, untype(_base_pointers[i]), p2i(_buffer[i]._p), @@ -276,7 +276,7 @@ void ZStoreBarrierBuffer::flush() { OnError on_error(this); VMErrorCallbackMark mark(&on_error); - for (int i = current(); i < (int)_buffer_length; ++i) { + for (size_t i = current(); i < _buffer_length; ++i) { const ZStoreBarrierEntry& entry = _buffer[i]; const zaddress addr = ZBarrier::make_load_good(entry._prev); ZBarrier::mark_and_remember(entry._p, addr); @@ -296,7 +296,7 @@ bool ZStoreBarrierBuffer::is_in(volatile zpointer* p) { const uintptr_t last_remap_bits = ZPointer::remap_bits(buffer->_last_processed_color) & ZPointerRemappedMask; const bool needs_remap = last_remap_bits != ZPointerRemapped; - for (int i = buffer->current(); i < (int)_buffer_length; ++i) { + for (size_t i = buffer->current(); i < _buffer_length; ++i) { const ZStoreBarrierEntry& entry = buffer->_buffer[i]; volatile zpointer* entry_p = entry._p; diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp index f917a6c3e7b..5903edb6ad4 100644 --- a/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,16 +59,16 @@ private: // sizeof(ZStoreBarrierEntry) scaled index growing downwards size_t _current; - void on_new_phase_relocate(int i); - void on_new_phase_remember(int i); - void on_new_phase_mark(int i); + void on_new_phase_relocate(size_t i); + void on_new_phase_remember(size_t i); + void on_new_phase_mark(size_t i); void clear(); bool is_old_mark() const; bool stored_during_old_mark() const; bool is_empty() const; - intptr_t current() const; + size_t current() const; void install_base_pointers_inner(); diff --git a/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp b/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp index 762aac3ccd5..72327fe8346 100644 --- a/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp +++ b/src/hotspot/share/gc/z/zStoreBarrierBuffer.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/z/zThreadLocalData.hpp" #include "runtime/thread.hpp" -inline intptr_t ZStoreBarrierBuffer::current() const { +inline size_t ZStoreBarrierBuffer::current() const { return _current / sizeof(ZStoreBarrierEntry); } diff --git a/src/hotspot/share/gc/z/zUnmapper.cpp b/src/hotspot/share/gc/z/zUnmapper.cpp index 3b2bac7eb00..b6ef40b6b05 100644 --- a/src/hotspot/share/gc/z/zUnmapper.cpp +++ b/src/hotspot/share/gc/z/zUnmapper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,7 +85,7 @@ bool ZUnmapper::try_enqueue(ZPage* page) { } size_t ZUnmapper::queue_capacity() const { - return align_up(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0, ZGranuleSize); + return align_up((size_t)(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0), ZGranuleSize); } bool ZUnmapper::is_saturated() const { diff --git a/src/hotspot/share/gc/z/zVerify.cpp b/src/hotspot/share/gc/z/zVerify.cpp index fba8adfb3c1..d47886ec7c2 100644 --- a/src/hotspot/share/gc/z/zVerify.cpp +++ b/src/hotspot/share/gc/z/zVerify.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -583,7 +583,7 @@ void ZVerify::on_color_flip() { for (JavaThreadIteratorWithHandle jtiwh; JavaThread* const jt = jtiwh.next(); ) { const ZStoreBarrierBuffer* const buffer = ZThreadLocalData::store_barrier_buffer(jt); - for (int i = buffer->current(); i < (int)ZStoreBarrierBuffer::_buffer_length; ++i) { + for (size_t i = buffer->current(); i < ZStoreBarrierBuffer::_buffer_length; ++i) { volatile zpointer* const p = buffer->_buffer[i]._p; bool created = false; z_verify_store_barrier_buffer_table->put_if_absent(p, true, &created);