diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index 94c544a7ea3..823e7245f31 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,8 +97,8 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand size_t free_target = (capacity / 100 * ShenandoahMinFreeThreshold) + max_cset; size_t min_garbage = (free_target > actual_free ? (free_target - actual_free) : 0); - log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "%s, Actual Free: " - SIZE_FORMAT "%s, Max Evacuation: " SIZE_FORMAT "%s, Min Garbage: " SIZE_FORMAT "%s", + log_info(gc, ergo)("Adaptive CSet Selection. Target Free: %zu%s, Actual Free: " + "%zu%s, Max Evacuation: %zu%s, Min Garbage: %zu%s", byte_size_in_proper_unit(free_target), proper_unit_for_byte_size(free_target), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), @@ -142,7 +143,7 @@ void ShenandoahAdaptiveHeuristics::record_success_concurrent() { if (available_sd > 0) { double available_avg = _available.avg(); z_score = (double(available) - available_avg) / available_sd; - log_debug(gc, ergo)("Available: " SIZE_FORMAT " %sB, z-score=%.3f. Average available: %.1f %sB +/- %.1f %sB.", + log_debug(gc, ergo)("Available: %zu %sB, z-score=%.3f. Average available: %.1f %sB +/- %.1f %sB.", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), z_score, byte_size_in_proper_unit(available_avg), proper_unit_for_byte_size(available_avg), @@ -237,8 +238,8 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { size_t available = _space_info->soft_available(); size_t allocated = _space_info->bytes_allocated_since_gc_start(); - log_debug(gc)("should_start_gc? available: " SIZE_FORMAT ", soft_max_capacity: " SIZE_FORMAT - ", allocated: " SIZE_FORMAT, available, capacity, allocated); + log_debug(gc)("should_start_gc? available: %zu, soft_max_capacity: %zu" + ", allocated: %zu", available, capacity, allocated); // Track allocation rate even if we decide to start a cycle for other reasons. double rate = _allocation_rate.sample(allocated); @@ -246,7 +247,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { size_t min_threshold = min_free_threshold(); if (available < min_threshold) { - log_trigger("Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + log_trigger("Free (%zu%s) is below minimum threshold (%zu%s)", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; @@ -257,7 +258,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_trigger("Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + log_trigger("Learning %zu of %zu. Free (%zu%s) is below initial threshold (%zu%s)", _gc_times_learned + 1, max_learn, byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); @@ -282,12 +283,12 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate)); if (avg_cycle_time * avg_alloc_rate > allocation_headroom) { log_trigger("Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s)" - " to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", + " to deplete free headroom (%zu%s) (margin of error = %.2f)", avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), _margin_of_error_sd); - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + log_info(gc, ergo)("Free headroom: %zu%s (free) - %zu%s (spike) - %zu%s (penalties) = %zu%s", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), @@ -298,7 +299,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); if (is_spiking && avg_cycle_time > allocation_headroom / rate) { - log_trigger("Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", + log_trigger("Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (%zu%s) (spike threshold = %.2f)", avg_cycle_time * 1000, byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp index 2c7594e10dc..efccfbb9c83 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +59,7 @@ bool ShenandoahCompactHeuristics::should_start_gc() { size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_trigger("Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + log_trigger("Free (%zu%s) is below minimum threshold (%zu%s)", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; @@ -66,7 +67,7 @@ bool ShenandoahCompactHeuristics::should_start_gc() { size_t bytes_allocated = _space_info->bytes_allocated_since_gc_start(); if (bytes_allocated > threshold_bytes_allocated) { - log_trigger("Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", + log_trigger("Allocated since last cycle (%zu%s) is larger than allocation threshold (%zu%s)", byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); return true; @@ -81,7 +82,7 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando // Do not select too large CSet that would overflow the available free space size_t max_cset = actual_free * 3 / 4; - log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + log_info(gc, ergo)("CSet Selection. Actual Free: %zu%s, Max CSet: %zu%s", byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp index 5b6d82d97a4..3249df7f29a 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,7 +135,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio bool reg_live = region->has_live(); bool bm_live = heap->complete_marking_context()->is_marked(cast_to_oop(region->bottom())); assert(reg_live == bm_live, - "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT, + "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu", BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); #endif if (!region->has_live()) { @@ -158,8 +159,8 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio } heap->old_generation()->set_expected_humongous_region_promotions(humongous_regions_promoted); heap->old_generation()->set_expected_regular_region_promotions(regular_regions_promoted_in_place); - log_info(gc, ergo)("Planning to promote in place " SIZE_FORMAT " humongous regions and " SIZE_FORMAT - " regular regions, spanning a total of " SIZE_FORMAT " used bytes", + log_info(gc, ergo)("Planning to promote in place %zu humongous regions and %zu" + " regular regions, spanning a total of %zu used bytes", humongous_regions_promoted, regular_regions_promoted_in_place, humongous_regions_promoted * ShenandoahHeapRegion::region_size_bytes() + regular_regions_promoted_usage); @@ -168,7 +169,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. assert (immediate_garbage <= total_garbage, - "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", + "Cannot have more immediate garbage than total garbage: %zu%s vs %zu%s", byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); @@ -193,9 +194,9 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio size_t collectable_garbage = collection_set->garbage() + immediate_garbage; size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage); - log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " - "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions, " - "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions", + log_info(gc, ergo)("Collectable Garbage: %zu%s (%zu%%), " + "Immediate: %zu%s (%zu%%), %zu regions, " + "CSet: %zu%s (%zu%%), %zu regions", byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), @@ -216,10 +217,10 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio size_t promote_evac_bytes = collection_set->get_young_bytes_to_be_promoted(); size_t old_evac_bytes = collection_set->get_old_bytes_reserved_for_evacuation(); size_t total_evac_bytes = young_evac_bytes + promote_evac_bytes + old_evac_bytes; - log_info(gc, ergo)("Evacuation Targets: YOUNG: " SIZE_FORMAT "%s, " - "PROMOTE: " SIZE_FORMAT "%s, " - "OLD: " SIZE_FORMAT "%s, " - "TOTAL: " SIZE_FORMAT "%s", + log_info(gc, ergo)("Evacuation Targets: YOUNG: %zu%s, " + "PROMOTE: %zu%s, " + "OLD: %zu%s, " + "TOTAL: %zu%s", byte_size_in_proper_unit(young_evac_bytes), proper_unit_for_byte_size(young_evac_bytes), byte_size_in_proper_unit(promote_evac_bytes), proper_unit_for_byte_size(promote_evac_bytes), byte_size_in_proper_unit(old_evac_bytes), proper_unit_for_byte_size(old_evac_bytes), @@ -282,8 +283,8 @@ void ShenandoahGenerationalHeuristics::log_cset_composition(ShenandoahCollection size_t collected_young = cset->get_young_bytes_reserved_for_evacuation(); log_info(gc, ergo)( - "Chosen CSet evacuates young: " SIZE_FORMAT "%s (of which at least: " SIZE_FORMAT "%s are to be promoted), " - "old: " SIZE_FORMAT "%s", + "Chosen CSet evacuates young: %zu%s (of which at least: %zu%s are to be promoted), " + "old: %zu%s", byte_size_in_proper_unit(collected_young), proper_unit_for_byte_size(collected_young), byte_size_in_proper_unit(collected_promoted), proper_unit_for_byte_size(collected_promoted), byte_size_in_proper_unit(collected_old), proper_unit_for_byte_size(collected_old)); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 4c1e6b7bdff..aeaf74fffff 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,8 +93,8 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset; size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; - log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: " SIZE_FORMAT - "%s, Max Old Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu" + "%s, Max Old Evacuation: %zu%s, Actual Free: %zu%s.", byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset), byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp index 4aac8263bc5..783086cd398 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp @@ -122,7 +122,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec bool reg_live = region->has_live(); bool bm_live = ctx->is_marked(cast_to_oop(region->bottom())); assert(reg_live == bm_live, - "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT, + "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu", BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words()); #endif if (!region->has_live()) { @@ -143,7 +143,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec // given the amount of immediately reclaimable garbage. If we do, figure out the collection set. assert (immediate_garbage <= total_garbage, - "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s", + "Cannot have more immediate garbage than total garbage: %zu%s vs %zu%s", byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage)); @@ -157,9 +157,9 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec size_t collectable_garbage = collection_set->garbage() + immediate_garbage; size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage); - log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " - "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions, " - "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), " SIZE_FORMAT " regions", + log_info(gc, ergo)("Collectable Garbage: %zu%s (%zu%%), " + "Immediate: %zu%s (%zu%%), %zu regions, " + "CSet: %zu%s (%zu%%), %zu regions", byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index abb2b7b266a..052181a8c61 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,7 +123,7 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll } size_t remaining_old_evacuation_budget = old_evacuation_budget; - log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u", + log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u", byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget), unprocessed_old_collection_candidates()); @@ -362,7 +363,7 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { immediate_regions++; immediate_garbage += garbage; size_t region_count = heap->trash_humongous_region_at(region); - log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count); + log_debug(gc)("Trashed %zu regions for humongous object.", region_count); } } else if (region->is_trash()) { // Count humongous objects made into trash here. @@ -443,7 +444,7 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { while ((defrag_count < bound_on_additional_regions) && (total_uncollected_old_regions < 7 * span_of_uncollected_regions / 8)) { ShenandoahHeapRegion* r = candidates[_last_old_collection_candidate].get_region(); - assert(r->is_regular() || r->is_regular_pinned(), "Region " SIZE_FORMAT " has wrong state for collection: %s", + assert(r->is_regular() || r->is_regular_pinned(), "Region %zu has wrong state for collection: %s", r->index(), ShenandoahHeapRegion::region_state_to_string(r->state())); const size_t region_garbage = r->garbage(); const size_t region_free = r->free(); @@ -466,12 +467,12 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { const size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented); set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live); - log_info(gc, ergo)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over " SIZE_FORMAT " regions", + log_info(gc, ergo)("Old-Gen Collectable Garbage: " PROPERFMT " consolidated with free: " PROPERFMT ", over %zu regions", PROPERFMTARGS(collectable_garbage), PROPERFMTARGS(unfragmented), old_candidates); - log_info(gc, ergo)("Old-Gen Immediate Garbage: " PROPERFMT " over " SIZE_FORMAT " regions", + log_info(gc, ergo)("Old-Gen Immediate Garbage: " PROPERFMT " over %zu regions", PROPERFMTARGS(immediate_garbage), immediate_regions); - log_info(gc, ergo)("Old regions selected for defragmentation: " SIZE_FORMAT, defrag_count); - log_info(gc, ergo)("Old regions not selected: " SIZE_FORMAT, total_uncollected_old_regions); + log_info(gc, ergo)("Old regions selected for defragmentation: %zu", defrag_count); + log_info(gc, ergo)("Old regions not selected: %zu", total_uncollected_old_regions); if (unprocessed_old_collection_candidates() > 0) { _old_generation->transition_to(ShenandoahOldGeneration::EVACUATING); @@ -608,7 +609,7 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() { size_t trigger_threshold = _old_gen->usage_trigger_threshold(); // Detects unsigned arithmetic underflow assert(old_used <= _heap->capacity(), - "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")", + "Old used (%zu, %zu) must not be more than heap capacity (%zu)", _old_gen->used(), _old_gen->get_humongous_waste(), _heap->capacity()); if (old_used > trigger_threshold) { _growth_trigger = true; @@ -635,7 +636,7 @@ bool ShenandoahOldHeuristics::should_start_gc() { const size_t old_gen_capacity = _old_generation->max_capacity(); const size_t heap_capacity = heap->capacity(); const double percent = percent_of(old_gen_capacity, heap_capacity); - log_trigger("Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size", + log_trigger("Expansion failure, current size: %zu%s which is %.1f%% of total heap size", byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent); return true; } @@ -655,8 +656,8 @@ bool ShenandoahOldHeuristics::should_start_gc() { const size_t fragmented_free = used_regions_size - used; log_trigger("Old has become fragmented: " - SIZE_FORMAT "%s available bytes spread between range spanned from " - SIZE_FORMAT " to " SIZE_FORMAT " (" SIZE_FORMAT "), density: %.1f%%", + "%zu%s available bytes spread between range spanned from " + "%zu to %zu (%zu), density: %.1f%%", byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), first_old_region, last_old_region, span_of_old_regions, density * 100); return true; @@ -673,8 +674,8 @@ bool ShenandoahOldHeuristics::should_start_gc() { if ((current_usage < ignore_threshold) && ((consecutive_young_cycles = heap->shenandoah_policy()->consecutive_young_gc_count()) < ShenandoahDoNotIgnoreGrowthAfterYoungCycles)) { - log_debug(gc)("Ignoring Trigger: Old has overgrown: usage (" SIZE_FORMAT "%s) is below threshold (" - SIZE_FORMAT "%s) after " SIZE_FORMAT " consecutive completed young GCs", + log_debug(gc)("Ignoring Trigger: Old has overgrown: usage (%zu%s) is below threshold (" + "%zu%s) after %zu consecutive completed young GCs", byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), byte_size_in_proper_unit(ignore_threshold), proper_unit_for_byte_size(ignore_threshold), consecutive_young_cycles); @@ -683,7 +684,7 @@ bool ShenandoahOldHeuristics::should_start_gc() { const size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark(); const double percent_growth = percent_of(current_usage - live_at_previous_old, live_at_previous_old); log_trigger("Old has overgrown, live at end of previous OLD marking: " - SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%", + "%zu%s, current usage: %zu%s, percent growth: %.1f%%", byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old), byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth); return true; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp index 7c65482d8c4..11de43a4081 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +61,7 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando size_t available = MAX2(max_capacity / 100 * ShenandoahEvacReserve, actual_free); size_t max_cset = (size_t)(available / ShenandoahEvacWaste); - log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "%s, Max CSet: " SIZE_FORMAT "%s", + log_info(gc, ergo)("CSet Selection. Actual Free: %zu%s, Max CSet: %zu%s", byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free), byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset)); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp index db179d0a80a..70e732d79d8 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +53,7 @@ bool ShenandoahStaticHeuristics::should_start_gc() { size_t threshold_available = capacity / 100 * ShenandoahMinFreeThreshold; if (available < threshold_available) { - log_trigger("Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + log_trigger("Free (%zu%s) is below minimum threshold (%zu%s)", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); return true; diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp index ced406611b1..f2cebd28f95 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,7 +83,7 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection log_info(gc, ergo)( - "Adaptive CSet Selection for YOUNG. Max Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.", + "Adaptive CSet Selection for YOUNG. Max Evacuation: %zu%s, Actual Free: %zu%s.", byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); @@ -150,7 +151,7 @@ bool ShenandoahYoungHeuristics::should_start_gc() { // If concurrent weak root processing is in progress, it means the old cycle has chosen mixed collection // candidates, but has not completed. There is no point in trying to start the young cycle before the old // cycle completes. - log_trigger("Expedite mixed evacuation of " SIZE_FORMAT " regions", mixed_candidates); + log_trigger("Expedite mixed evacuation of %zu regions", mixed_candidates); return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp index a4c78c637d9..dc399bd2af9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahAgeCensus.cpp @@ -33,8 +33,8 @@ ShenandoahAgeCensus::ShenandoahAgeCensus() { assert(ShenandoahHeap::heap()->mode()->is_generational(), "Only in generational mode"); if (ShenandoahGenerationalMinTenuringAge > ShenandoahGenerationalMaxTenuringAge) { vm_exit_during_initialization( - err_msg("ShenandoahGenerationalMinTenuringAge=" SIZE_FORMAT - " should be no more than ShenandoahGenerationalMaxTenuringAge=" SIZE_FORMAT, + err_msg("ShenandoahGenerationalMinTenuringAge=%zu" + " should be no more than ShenandoahGenerationalMaxTenuringAge=%zu", ShenandoahGenerationalMinTenuringAge, ShenandoahGenerationalMaxTenuringAge)); } @@ -327,7 +327,7 @@ double ShenandoahAgeCensus::mortality_rate(size_t prev_pop, size_t cur_pop) { // or increased. if (cur_pop > prev_pop) { log_trace(gc, age) - (" (dark matter) Cohort population " SIZE_FORMAT_W(10) " to " SIZE_FORMAT_W(10), + (" (dark matter) Cohort population %10zu to %10zu", prev_pop*oopSize, cur_pop*oopSize); } return 0.0; @@ -356,7 +356,7 @@ void ShenandoahAgeCensus::print() { // Suppress printing when everything is zero if (prev_pop + cur_pop > 0) { log_info(gc, age) - (" - age %3u: prev " SIZE_FORMAT_W(10) " bytes, curr " SIZE_FORMAT_W(10) " bytes, mortality %.2f ", + (" - age %3u: prev %10zu bytes, curr %10zu bytes, mortality %.2f ", i, prev_pop*oopSize, cur_pop*oopSize, mr); } total += cur_pop; @@ -375,8 +375,8 @@ void ShenandoahNoiseStats::print(size_t total) { float f_aged = (float)aged/(float)total; float f_clamped = (float)clamped/(float)total; float f_young = (float)young/(float)total; - log_info(gc, age)("Skipped: " SIZE_FORMAT_W(10) " (%.2f), R-Aged: " SIZE_FORMAT_W(10) " (%.2f), " - "Clamped: " SIZE_FORMAT_W(10) " (%.2f), R-Young: " SIZE_FORMAT_W(10) " (%.2f)", + log_info(gc, age)("Skipped: %10zu (%.2f), R-Aged: %10zu (%.2f), " + "Clamped: %10zu (%.2f), R-Young: %10zu (%.2f)", skipped*oopSize, f_skipped, aged*oopSize, f_aged, clamped*oopSize, f_clamped, young*oopSize, f_young); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp index fa3f9019af4..8723e2632fe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +61,7 @@ void ShenandoahArguments::initialize() { if (UseLargePages) { size_t large_page_size = os::large_page_size(); if ((align_up(MaxHeapSize, large_page_size) / large_page_size) < ShenandoahHeapRegion::MIN_NUM_REGIONS) { - warning("Large pages size (" SIZE_FORMAT "K) is too large to afford page-sized regions, disabling uncommit", + warning("Large pages size (%zuK) is too large to afford page-sized regions, disabling uncommit", os::large_page_size() / K); FLAG_SET_DEFAULT(ShenandoahUncommit, false); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp index ef2d6e134b2..df63f4440de 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCardStats.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +32,9 @@ #ifndef PRODUCT void ShenandoahCardStats::log() const { if (ShenandoahEnableCardStats) { - log_info(gc,remset)("Card stats: dirty " SIZE_FORMAT " (max run: " SIZE_FORMAT ")," - " clean " SIZE_FORMAT " (max run: " SIZE_FORMAT ")," - " dirty scans/objs " SIZE_FORMAT, + log_info(gc,remset)("Card stats: dirty %zu (max run: %zu)," + " clean %zu (max run: %zu)," + " dirty scans/objs %zu", _dirty_card_cnt, _max_dirty_run, _clean_card_cnt, _max_clean_run, _dirty_scan_obj_cnt); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 4d521d2e7f0..29818e39b91 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved. - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -186,7 +186,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::next() { void ShenandoahCollectionSet::print_on(outputStream* out) const { out->print_cr("Collection Set: Regions: " - SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(), + "%zu, Garbage: %zu%s, Live: %zu%s, Used: %zu%s", count(), byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()), byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()), byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp index 42500ae8778..76f2d5c2529 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -179,7 +180,7 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { out->cr(); size_t completed_gcs = _success_full_gcs + _success_degenerated_gcs + _success_concurrent_gcs + _success_old_gcs; - out->print_cr(SIZE_FORMAT_W(5) " Completed GCs", completed_gcs); + out->print_cr("%5zu Completed GCs", completed_gcs); size_t explicit_requests = 0; size_t implicit_requests = 0; @@ -193,48 +194,48 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { implicit_requests += cause_count; } const char* desc = GCCause::to_string(cause); - out->print_cr(" " SIZE_FORMAT_W(5) " caused by %s (%.2f%%)", cause_count, desc, percent_of(cause_count, completed_gcs)); + out->print_cr(" %5zu caused by %s (%.2f%%)", cause_count, desc, percent_of(cause_count, completed_gcs)); } } out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " Successful Concurrent GCs (%.2f%%)", _success_concurrent_gcs, percent_of(_success_concurrent_gcs, completed_gcs)); + out->print_cr("%5zu Successful Concurrent GCs (%.2f%%)", _success_concurrent_gcs, percent_of(_success_concurrent_gcs, completed_gcs)); if (ExplicitGCInvokesConcurrent) { - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); } if (ShenandoahImplicitGCInvokesConcurrent) { - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); } - out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_concurrent_gcs, percent_of(_abbreviated_concurrent_gcs, _success_concurrent_gcs)); + out->print_cr(" %5zu abbreviated (%.2f%%)", _abbreviated_concurrent_gcs, percent_of(_abbreviated_concurrent_gcs, _success_concurrent_gcs)); out->cr(); if (ShenandoahHeap::heap()->mode()->is_generational()) { - out->print_cr(SIZE_FORMAT_W(5) " Completed Old GCs (%.2f%%)", _success_old_gcs, percent_of(_success_old_gcs, completed_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " mixed", _mixed_gcs); - out->print_cr(" " SIZE_FORMAT_W(5) " interruptions", _interrupted_old_gcs); + out->print_cr("%5zu Completed Old GCs (%.2f%%)", _success_old_gcs, percent_of(_success_old_gcs, completed_gcs)); + out->print_cr(" %5zu mixed", _mixed_gcs); + out->print_cr(" %5zu interruptions", _interrupted_old_gcs); out->cr(); } size_t degenerated_gcs = _alloc_failure_degenerated_upgrade_to_full + _success_degenerated_gcs; - out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs (%.2f%%)", degenerated_gcs, percent_of(degenerated_gcs, completed_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " upgraded to Full GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, degenerated_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_degenerated, percent_of(_alloc_failure_degenerated, degenerated_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_degenerated_gcs, percent_of(_abbreviated_degenerated_gcs, degenerated_gcs)); + out->print_cr("%5zu Degenerated GCs (%.2f%%)", degenerated_gcs, percent_of(degenerated_gcs, completed_gcs)); + out->print_cr(" %5zu upgraded to Full GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, degenerated_gcs)); + out->print_cr(" %5zu caused by allocation failure (%.2f%%)", _alloc_failure_degenerated, percent_of(_alloc_failure_degenerated, degenerated_gcs)); + out->print_cr(" %5zu abbreviated (%.2f%%)", _abbreviated_degenerated_gcs, percent_of(_abbreviated_degenerated_gcs, degenerated_gcs)); for (int c = 0; c < ShenandoahGC::_DEGENERATED_LIMIT; c++) { if (_degen_point_counts[c] > 0) { const char* desc = ShenandoahGC::degen_point_to_string((ShenandoahGC::ShenandoahDegenPoint)c); - out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_point_counts[c], desc); + out->print_cr(" %5zu happened at %s", _degen_point_counts[c], desc); } } out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); + out->print_cr("%5zu Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); if (!ExplicitGCInvokesConcurrent) { - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); } if (!ShenandoahImplicitGCInvokesConcurrent) { - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); + out->print_cr(" %5zu invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); } - out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_full, percent_of(_alloc_failure_full, _success_full_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, _success_full_gcs)); + out->print_cr(" %5zu caused by allocation failure (%.2f%%)", _alloc_failure_full, percent_of(_alloc_failure_full, _success_full_gcs)); + out->print_cr(" %5zu upgraded from Degenerated GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, _success_full_gcs)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index bd703cdb96f..a4c13d24f04 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -599,7 +599,7 @@ public: ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} void heap_region_do(ShenandoahHeapRegion* r) { - assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); + assert(!r->has_live(), "Region %zu should have no live data", r->index()); if (r->is_active()) { // Check if region needs updating its TAMS. We have updated it already during concurrent // reset, so it is very likely we don't need to do another write here. Since most regions @@ -609,7 +609,7 @@ public: } } else { assert(_ctx->top_at_mark_start(r) == r->top(), - "Region " SIZE_FORMAT " should already have correct TAMS", r->index()); + "Region %zu should already have correct TAMS", r->index()); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index effa4a8f1fc..6fe79f76e05 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +62,7 @@ void ShenandoahController::handle_alloc_failure(ShenandoahAllocRequest& req, boo if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure - log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s", + log_info(gc)("Failed to allocate %s, %zu%s", req.type_string(), byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize)); @@ -84,7 +85,7 @@ void ShenandoahController::handle_alloc_failure_evac(size_t words) { if (try_set_alloc_failure_gc(is_humongous)) { // Only report the first allocation failure - log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation", + log_info(gc)("Failed to allocate %zu%s for evacuation", byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp index ededb99b24e..4aad55dd368 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacTracker.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,8 +85,8 @@ void ShenandoahEvacuationStats::print_on(outputStream* st) { #ifndef PRODUCT size_t abandoned_size = _bytes_attempted - _bytes_completed; size_t abandoned_count = _evacuations_attempted - _evacuations_completed; - st->print_cr("Evacuated " SIZE_FORMAT "%s across " SIZE_FORMAT " objects, " - "abandoned " SIZE_FORMAT "%s across " SIZE_FORMAT " objects.", + st->print_cr("Evacuated %zu%s across %zu objects, " + "abandoned %zu%s across %zu objects.", byte_size_in_proper_unit(_bytes_completed), proper_unit_for_byte_size(_bytes_completed), _evacuations_completed, byte_size_in_proper_unit(abandoned_size), proper_unit_for_byte_size(abandoned_size), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index b080002a929..032e478d4c2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -273,7 +273,7 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic assert (which_partition < NumPartitions, "Partition must be valid"); _used[int(which_partition)] += bytes; assert (_used[int(which_partition)] <= _capacity[int(which_partition)], - "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT, + "Must not use (%zu) more than capacity (%zu) after increase by %zu", _used[int(which_partition)], _capacity[int(which_partition)], bytes); } @@ -338,7 +338,7 @@ void ShenandoahRegionPartitions::retire_range_from_partition( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() - assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT, + assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: %zu and %zu < %zu", low_idx, high_idx, _max); assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); @@ -353,7 +353,7 @@ void ShenandoahRegionPartitions::retire_range_from_partition( void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); @@ -367,7 +367,7 @@ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitio } void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free"); assert (which_partition < NumPartitions, "selected free partition must be valid"); assert (available <= _region_size_bytes, "Available cannot exceed region size"); @@ -399,14 +399,14 @@ bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_reg void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available) { ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx); - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (orig_partition < NumPartitions, "Original partition must be valid"); assert (new_partition < NumPartitions, "New partition must be valid"); assert (available <= _region_size_bytes, "Available cannot exceed region size"); assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition"); assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) || (r->used() + available == _region_size_bytes)), - "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT, + "Used: %zu + available: %zu should equal region size: %zu", ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes); // Expected transitions: @@ -423,13 +423,13 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) || (is_old_collector_partition(orig_partition) && available_implies_empty(available) && is_mutator_partition(new_partition)), - "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT + "Unexpected movement between partitions, available: %zu, _region_size_bytes: %zu" ", orig_partition: %s, new_partition: %s", available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition)); size_t used = _region_size_bytes - available; assert (_used[int(orig_partition)] >= used, - "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region %zd", + "Orig partition used: %zu must exceed moved used: %zu within region %zd", _used[int(orig_partition)], used, idx); _membership[int(orig_partition)].clear_bit(idx); @@ -452,7 +452,7 @@ const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) con } inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const { - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree; for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) { if (_membership[partition_id].is_set(idx)) { @@ -465,7 +465,7 @@ inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t #ifdef ASSERT inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const { - assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition"); return membership(idx) == test_partition; @@ -938,7 +938,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_from_mutator(ShenandoahAllocRequest& r } // Region r is entirely empty. If try_allocate_in fails on region r, something else is really wrong. // Don't bother to retry with other regions. - log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req)); + log_debug(gc, free)("Flipped region %zu to gc for request: " PTR_FORMAT, idx, p2i(&req)); return try_allocate_in(r, req, in_new_region); } } @@ -997,7 +997,7 @@ HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocR } HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { - assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); + assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: %zu", r->index()); if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) { // We cannot use this region for allocation when weak roots are in progress because the collector may need // to reference unmarked oops during concurrent classunloading. @@ -1008,9 +1008,10 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah in_new_region = r->is_empty(); if (in_new_region) { - log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").", + log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").", r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); - assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index()); + assert(!r->is_affiliated(), "New region %zu should be unaffiliated", r->index()); + r->set_affiliation(req.affiliation()); if (r->is_old()) { // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because @@ -1029,10 +1030,10 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); assert(ctx->is_bitmap_range_within_region_clear(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear"); #endif - log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").", + log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").", r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); } else { - assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index()); + assert(r->is_affiliated(), "Region %zu that is not new should be affiliated", r->index()); if (r->affiliation() != req.affiliation()) { assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.", req.affiliation_name(), r->affiliation_name()); @@ -1063,8 +1064,8 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah req.set_actual_size(adjusted_size); } else { // Otherwise, leave result == nullptr because the adjusted size is smaller than min size. - log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT - " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); + log_trace(gc, free)("Failed to shrink PLAB request (%zu) in region %zu to %zu" + " because min_size() is %zu", req.size(), r->index(), adjusted_size, req.min_size()); } } else { // This is a GCLAB or a TLAB allocation @@ -1075,11 +1076,11 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah } if (adjusted_size >= req.min_size()) { result = r->allocate(adjusted_size, req); - assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size); + assert (result != nullptr, "Allocation must succeed: free %zu, actual %zu", free, adjusted_size); req.set_actual_size(adjusted_size); } else { - log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT - " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); + log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (%zu) in region %zu to %zu" + " because min_size() is %zu", req.size(), r->index(), adjusted_size, req.min_size()); } } } else { @@ -1295,7 +1296,7 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { _heap->old_generation()->augment_evacuation_reserve(region_capacity); bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (!transferred) { - log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); + log_warning(gc, free)("Forcing transfer of %zu to old reserve.", idx); gen_heap->generation_sizer()->force_transfer_to_old(1); } // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, @@ -1427,24 +1428,25 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } } } - log_debug(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT - ", mutator_rightmost: " SIZE_FORMAT - ", mutator_leftmost_empty: " SIZE_FORMAT - ", mutator_rightmost_empty: " SIZE_FORMAT - ", mutator_regions: " SIZE_FORMAT - ", mutator_used: " SIZE_FORMAT, + log_debug(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" + ", mutator_rightmost: %zu" + ", mutator_leftmost_empty: %zu" + ", mutator_rightmost_empty: %zu" + ", mutator_regions: %zu" + ", mutator_used: %zu", mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty, mutator_regions, mutator_used); - log_debug(gc, free)(" old_collector_leftmost: " SIZE_FORMAT - ", old_collector_rightmost: " SIZE_FORMAT - ", old_collector_leftmost_empty: " SIZE_FORMAT - ", old_collector_rightmost_empty: " SIZE_FORMAT - ", old_collector_regions: " SIZE_FORMAT - ", old_collector_used: " SIZE_FORMAT, + log_debug(gc, free)(" old_collector_leftmost: %zu" + ", old_collector_rightmost: %zu" + ", old_collector_leftmost_empty: %zu" + ", old_collector_rightmost_empty: %zu" + ", old_collector_regions: %zu" + ", old_collector_used: %zu", old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty, old_collector_regions, old_collector_used); + idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost; idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, @@ -1536,8 +1538,8 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } size_t total_xfer = collector_xfer + old_collector_xfer; - log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve (" - SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)", + log_info(gc, ergo)("At start of update refs, moving %zu%s to Mutator free set from Collector Reserve (" + "%zu%s) and from Old Collector Reserve (%zu%s)", byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer), byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer), byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer)); @@ -1573,11 +1575,11 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si if (new_old_capacity > original_old_capacity) { size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity)); } else if (new_old_capacity < original_old_capacity) { size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity)); } // This balances generations, so clear any pending request to balance. @@ -1655,7 +1657,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions young_reserve_result = young_generation->get_evacuation_reserve(); old_reserve_result = promoted_reserve + old_evac_reserve; assert(old_reserve_result <= old_available, - "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, + "Cannot reserve (%zu + %zu) more OLD than is available: %zu", promoted_reserve, old_evac_reserve, old_available); } else { // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) @@ -1714,13 +1716,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old // OLD regions that have available memory are already in the old_collector free set. _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, ac); - log_trace(gc, free)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx); + log_trace(gc, free)(" Shifting region %zu from mutator_free to old_collector_free", idx); log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + old_region_count++; continue; } @@ -1737,13 +1740,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old // collection set, and they are easily evacuated because they have low density of live objects. _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); - log_trace(gc, free)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx); + log_trace(gc, free)(" Shifting region %zu from mutator_free to collector_free", idx); log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," " Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector)); + } } @@ -1817,6 +1821,7 @@ void ShenandoahFreeSet::log_status() { buffer[i] = '\0'; } + ls.cr(); ls.print_cr("Mutator free range [%zd..%zd] allocating from %s", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), @@ -1917,7 +1922,7 @@ void ShenandoahFreeSet::log_status() { // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match // my internally tracked values of used() and free(). assert(free == total_free, "Free memory should match"); - ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", + ls.print("Free: %zu%s, Max: %zu%s regular, %zu%s humongous, ", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) @@ -1930,7 +1935,7 @@ void ShenandoahFreeSet::log_status() { } else { frag_ext = 0; } - ls.print(SIZE_FORMAT "%% external, ", frag_ext); + ls.print("%zu%% external, ", frag_ext); size_t frag_int; if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) { @@ -1939,8 +1944,8 @@ void ShenandoahFreeSet::log_status() { } else { frag_int = 0; } - ls.print(SIZE_FORMAT "%% internal; ", frag_int); - ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT, + ls.print("%zu%% internal; ", frag_int); + ls.print("Used: %zu%s, Mutator Free: %zu", byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); } @@ -1960,7 +1965,7 @@ void ShenandoahFreeSet::log_status() { total_used += r->used(); } } - ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", + ls.print(" Collector Reserve: %zu%s, Max: %zu%s; Used: %zu%s", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); @@ -1981,7 +1986,7 @@ void ShenandoahFreeSet::log_status() { total_used += r->used(); } } - ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", + ls.print_cr(" Old Collector Reserve: %zu%s, Max: %zu%s; Used: %zu%s", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); @@ -2001,7 +2006,7 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ case ShenandoahAllocRequest::_alloc_gclab: case ShenandoahAllocRequest::_alloc_tlab: in_new_region = false; - assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size()); + assert(false, "Trying to allocate TLAB in humongous region: %zu", req.size()); return nullptr; default: ShouldNotReachHere(); @@ -2013,20 +2018,20 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ } void ShenandoahFreeSet::print_on(outputStream* out) const { - out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); + out->print_cr("Mutator Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); ShenandoahLeftRightIterator mutator(const_cast(&_partitions), ShenandoahFreeSetPartitionId::Mutator); for (idx_t index = mutator.current(); mutator.has_next(); index = mutator.next()) { _heap->get_region(index)->print_on(out); } - out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector)); + out->print_cr("Collector Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::Collector)); ShenandoahLeftRightIterator collector(const_cast(&_partitions), ShenandoahFreeSetPartitionId::Collector); for (idx_t index = collector.current(); collector.has_next(); index = collector.next()) { _heap->get_region(index)->print_on(out); } if (_heap->mode()->is_generational()) { - out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector)); + out->print_cr("Old Collector Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector)); for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 2847d7c78ba..12249171a00 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -501,7 +502,7 @@ void ShenandoahFullGC::calculate_target_humongous_objects() { size_t to_begin = heap->num_regions(); size_t to_end = heap->num_regions(); - log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end); + log_debug(gc)("Full GC calculating target humongous objects from end %zu", to_end); for (size_t c = heap->num_regions(); c > 0; c--) { ShenandoahHeapRegion *r = heap->get_region(c - 1); if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { @@ -550,7 +551,7 @@ public: if (r->is_empty_uncommitted()) { r->make_committed_bypass(); } - assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); + assert (r->is_committed(), "only committed regions in heap now, see region %zu", r->index()); // Record current region occupancy: this communicates empty regions are free // to the rest of Full GC code. @@ -572,14 +573,14 @@ public: if (r->is_humongous_start()) { oop humongous_obj = cast_to_oop(r->bottom()); if (!_ctx->is_marked(humongous_obj)) { - assert(!r->has_live(), "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); + assert(!r->has_live(), "Region %zu is not marked, should not have live", r->index()); _heap->trash_humongous_region_at(r); } else { - assert(r->has_live(), "Region " SIZE_FORMAT " should have live", r->index()); + assert(r->has_live(), "Region %zu should have live", r->index()); } } else if (r->is_humongous_continuation()) { // If we hit continuation, the non-live humongous starts should have been trashed already - assert(r->humongous_start_region()->has_live(), "Region " SIZE_FORMAT " should have live", r->index()); + assert(r->humongous_start_region()->has_live(), "Region %zu should have live", r->index()); } else if (r->is_regular()) { if (!r->has_live()) { r->make_trash_immediate(); @@ -715,8 +716,8 @@ void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices ShenandoahHeapRegion* r = it.next(); while (r != nullptr) { size_t idx = r->index(); - assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); - assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); + assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: %zu", idx); + assert(!map.at(idx), "No region distributed twice: %zu", idx); map.at_put(idx, true); r = it.next(); } @@ -725,7 +726,7 @@ void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices for (size_t rid = 0; rid < n_regions; rid++) { bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); bool is_distributed = map.at(rid); - assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); + assert(is_distributed || !is_candidate, "All candidates are distributed: %zu", rid); } #endif } @@ -1045,9 +1046,9 @@ void ShenandoahFullGC::compact_humongous_objects() { size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj)); size_t new_end = new_start + num_regions - 1; assert(old_start != new_start, "must be real move"); - assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); + assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index()); - log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start); + log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start); Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size); ContinuationGCSupport::relativize_stack_chunk(cast_to_oop(r->bottom())); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index c72940c3c3a..eb8027a0f7c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -419,7 +420,7 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, if (old_evacuated_committed > old_evacuation_reserve) { // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32, - "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, + "Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu", old_evacuated_committed, old_evacuation_reserve); old_evacuated_committed = old_evacuation_reserve; // Leave old_evac_reserve as previously configured @@ -449,13 +450,13 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // This can happen due to round-off errors when adding the results of truncated integer arithmetic. // We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here. assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32, - "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT, + "Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu", young_advance_promoted_reserve_used, old_available - old_evacuated_committed); young_advance_promoted_reserve_used = old_available - old_evacuated_committed; old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used; } - assert(old_available >= old_consumed, "Cannot consume (" SIZE_FORMAT ") more than is available (" SIZE_FORMAT ")", + assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)", old_consumed, old_available); size_t excess_old = old_available - old_consumed; size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); @@ -494,10 +495,10 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, if (regions_to_xfer > 0) { bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); assert(excess_old >= regions_to_xfer * region_size_bytes, - "Cannot transfer (" SIZE_FORMAT ", " SIZE_FORMAT ") more than excess old (" SIZE_FORMAT ")", + "Cannot transfer (%zu, %zu) more than excess old (%zu)", regions_to_xfer, region_size_bytes, excess_old); excess_old -= regions_to_xfer * region_size_bytes; - log_debug(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation", + log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation", result? "Successfully": "Unsuccessfully", regions_to_xfer); } @@ -527,7 +528,7 @@ inline void assert_no_in_place_promotions() { public: void heap_region_do(ShenandoahHeapRegion *r) override { assert(r->get_top_before_promote() == nullptr, - "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); + "Region %zu should not be ready for in-place promotion", r->index()); } } cl; ShenandoahHeap::heap()->heap_region_iterate(&cl); @@ -671,8 +672,8 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // We keep going even if one region is excluded from selection because we need to accumulate all eligible // regions that are not preselected into promo_potential } - log_debug(gc)("Preselected " SIZE_FORMAT " regions containing " SIZE_FORMAT " live bytes," - " consuming: " SIZE_FORMAT " of budgeted: " SIZE_FORMAT, + log_debug(gc)("Preselected %zu regions containing %zu live bytes," + " consuming: %zu of budgeted: %zu", selected_regions, selected_live, old_consumed, old_available); } @@ -724,7 +725,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // We use integer division so anything up to just less than 2 is considered // reasonable, and the "+1" is to avoid divide-by-zero. assert((total_pop+1)/(total_census+1) == 1, "Extreme divergence: " - SIZE_FORMAT "/" SIZE_FORMAT, total_pop, total_census); + "%zu/%zu", total_pop, total_census); #endif } @@ -941,7 +942,7 @@ void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { if (bytes > 0) { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), - "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes); + "Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes); Atomic::sub(&_humongous_waste, bytes); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index dfbc6b673ff..7ace37c36c1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,8 +52,8 @@ ShenandoahGenerationSizer::ShenandoahGenerationSizer() if (NewSize > MaxNewSize) { if (FLAG_IS_CMDLINE(MaxNewSize)) { - log_warning(gc, ergo)("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " - "A new max generation size of " SIZE_FORMAT "k will be used.", + log_warning(gc, ergo)("NewSize (%zuk) is greater than the MaxNewSize (%zuk). " + "A new max generation size of %zuk will be used.", NewSize/K, MaxNewSize/K, NewSize/K); } FLAG_SET_ERGO(MaxNewSize, NewSize); @@ -138,7 +139,7 @@ bool ShenandoahGenerationSizer::transfer_regions(ShenandoahGeneration* src, Shen src->decrease_capacity(bytes_to_transfer); dst->increase_capacity(bytes_to_transfer); const size_t new_size = dst->max_capacity(); - log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, regions, src->name(), dst->name(), PROPERFMTARGS(new_size)); return true; } @@ -190,7 +191,7 @@ void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { young_gen->decrease_capacity(bytes_to_transfer); old_gen->increase_capacity(bytes_to_transfer); const size_t new_size = old_gen->max_capacity(); - log_info(gc, ergo)("Forcing transfer of " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT, + log_info(gc, ergo)("Forcing transfer of %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 9dcdf002b7e..4048eeee568 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,7 +90,7 @@ void ShenandoahGenerationalEvacuationTask::do_work() { } void log_region(const ShenandoahHeapRegion* r, LogStream* ls) { - ls->print_cr("GenerationalEvacuationTask, looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]", + ls->print_cr("GenerationalEvacuationTask, looking at %s region %zu, (age: %d) [%s, %s, %s]", r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(), r->is_active()? "active": "inactive", r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular", @@ -126,7 +127,7 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() { } if (r->is_cset()) { - assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index()); + assert(r->has_live(), "Region %zu should have been reclaimed early", r->index()); _heap->marked_object_iterate(r, &cl); if (ShenandoahPacing) { _heap->pacer()->report_evac(r->used() >> LogHeapWordSize); @@ -177,11 +178,11 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; shenandoah_assert_generations_reconciled(); assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking"); - assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index()); + assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region %zu has too much garbage for promotion", region->index()); assert(region->is_young(), "Only young regions can be promoted"); assert(region->is_regular(), "Use different service to promote humongous regions"); assert(region->age() >= _tenuring_threshold, "Only promote regions that are sufficiently aged"); - assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index()); + assert(region->get_top_before_promote() == tams, "Region %zu has been used for allocations before promotion", region->index()); } ShenandoahOldGeneration* const old_gen = _heap->old_generation(); @@ -280,7 +281,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio // We promote humongous objects unconditionally, without checking for availability. We adjust // usage totals, including humongous waste, after evacuation is done. - log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions); + log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions); young_gen->decrease_used(used_bytes); young_gen->decrease_humongous_waste(humongous_waste); @@ -294,7 +295,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio // in the last humongous region that is not spanned by obj is currently not used. for (size_t i = region->index(); i < index_limit; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); - log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT, + log_debug(gc)("promoting humongous region %zu, from " PTR_FORMAT " to " PTR_FORMAT, r->index(), p2i(r->bottom()), p2i(r->top())); // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here r->set_affiliation(OLD_GENERATION); @@ -314,11 +315,11 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio if (obj->is_typeArray()) { // Primitive arrays don't need to be scanned. - log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT, + log_debug(gc)("Clean cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT, region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size())); scanner->mark_range_as_clean(humongous_bottom, obj->size()); } else { - log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT, + log_debug(gc)("Dirty cards for promoted humongous object (Region %zu) from " PTR_FORMAT " to " PTR_FORMAT, region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size())); scanner->mark_range_as_dirty(humongous_bottom, obj->size()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index fe38c996bd8..107103997e8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -219,7 +220,7 @@ ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGen } void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) { - log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live", + log_debug(gc)("Worker %u compacting %s Region %zu which had used %zu and %s live", _worker_id, from_region->affiliation_name(), from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have"); @@ -248,7 +249,7 @@ void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() { void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() { if (_old_to_region != nullptr) { - log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u", + log_debug(gc)("Planned compaction into Old Region %zu, used: %zu tabulated by worker %u", _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id); _old_to_region->set_new_top(_old_compact_point); _old_to_region = nullptr; @@ -257,7 +258,7 @@ void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region( void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() { if (_young_to_region != nullptr) { - log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT, + log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu", _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom()); _young_to_region->set_new_top(_young_compact_point); _young_to_region = nullptr; @@ -307,7 +308,7 @@ void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) { if (_old_compact_point + obj_size > _old_to_region->end()) { ShenandoahHeapRegion* new_to_region; - log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT + log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu" ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(), p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end())); @@ -354,7 +355,7 @@ void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) { if (_young_compact_point + obj_size > _young_to_region->end()) { ShenandoahHeapRegion* new_to_region; - log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT + log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu" ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(), p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end())); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 2ad35fcb288..8e803fcc242 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -460,20 +461,20 @@ HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, si size_t future_size = MIN2(cur_size * 2, plab_max_size()); // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor // are card multiples.) - assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: " SIZE_FORMAT - ", card_size: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT, + assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu" + ", card_size: %zu, cur_size: %zu, max: %zu", future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size()); // Record new heuristic value even if we take any shortcut. This captures // the case when moderately-sized objects always take a shortcut. At some point, // heuristics should catch up with them. Note that the requested cur_size may // not be honored, but we remember that this is the preferred size. - log_debug(gc, free)("Set new PLAB size: " SIZE_FORMAT, future_size); + log_debug(gc, free)("Set new PLAB size: %zu", future_size); ShenandoahThreadLocalData::set_plab_size(thread, future_size); if (cur_size < size) { // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. - log_debug(gc, free)("Current PLAB size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, cur_size, size); + log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size); return nullptr; } @@ -570,7 +571,7 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { // If retiring the plab created a filler object, then we need to register it with our card scanner so it can // safely walk the region backing the plab. - log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT, + log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, plab->waste() - original_waste, p2i(top)); // No lock is necessary because the PLAB memory is aligned on card boundaries. old_generation()->card_scan()->register_object_without_lock(top); @@ -714,7 +715,7 @@ void ShenandoahGenerationalHeap::TransferResult::print_on(const char* when, outp ShenandoahOldGeneration* const old_gen = heap->old_generation(); const size_t young_available = young_gen->available(); const size_t old_available = old_gen->available(); - ss->print_cr("After %s, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: " + ss->print_cr("After %s, %s %zu regions to %s to prepare for next gc, old available: " PROPERFMT ", young_available: " PROPERFMT, when, success? "successfully transferred": "failed to transfer", region_count, region_destination, @@ -819,7 +820,7 @@ private: HeapWord* update_watermark = r->get_update_watermark(); assert(update_watermark >= r->bottom(), "sanity"); - log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region " SIZE_FORMAT, worker_id, r->index()); + log_debug(gc)("Update refs worker " UINT32_FORMAT ", looking at region %zu", worker_id, r->index()); bool region_progress = false; if (r->is_active() && !r->is_cset()) { if (r->is_young()) { @@ -845,7 +846,7 @@ private: // updated. assert(r->get_update_watermark() == r->bottom(), - "%s Region " SIZE_FORMAT " is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE", + "%s Region %zu is_active but not recognized as YOUNG or OLD so must be newly transitioned from FREE", r->affiliation_name(), r->index()); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 018540b33b7..84b77c94bcb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -146,7 +146,7 @@ public: while (r != nullptr) { size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor(); - assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size); + assert (end <= _bitmap_size, "end is sane: %zu < %zu", end, _bitmap_size); if (r->is_committed()) { os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size); @@ -187,7 +187,7 @@ jint ShenandoahHeap::initialize() { _num_regions = ShenandoahHeapRegion::region_count(); assert(_num_regions == (max_byte_size / reg_size_bytes), - "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT, + "Regions should cover entire heap exactly: %zu != %zu/%zu", _num_regions, max_byte_size, reg_size_bytes); size_t num_committed_regions = init_byte_size / reg_size_bytes; @@ -277,7 +277,7 @@ jint ShenandoahHeap::initialize() { guarantee(bitmap_bytes_per_region != 0, "Bitmap bytes per region should not be zero"); guarantee(is_power_of_2(bitmap_bytes_per_region), - "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region); + "Bitmap bytes per region should be power of two: %zu", bitmap_bytes_per_region); if (bitmap_page_size > bitmap_bytes_per_region) { _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region; @@ -288,11 +288,11 @@ jint ShenandoahHeap::initialize() { } guarantee(_bitmap_regions_per_slice >= 1, - "Should have at least one region per slice: " SIZE_FORMAT, + "Should have at least one region per slice: %zu", _bitmap_regions_per_slice); guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0, - "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT, + "Bitmap slices should be page-granular: bps = %zu, page size = %zu", _bitmap_bytes_per_slice, bitmap_page_size); ReservedSpace bitmap = reserve(_bitmap_size, bitmap_page_size); @@ -584,12 +584,12 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) : void ShenandoahHeap::print_on(outputStream* st) const { st->print_cr("Shenandoah Heap"); - st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used", + st->print_cr(" %zu%s max, %zu%s soft max, %zu%s committed, %zu%s used", byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()), byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()), byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()), byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); - st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions", + st->print_cr(" %zu x %zu %s regions", num_regions(), byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()), proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes())); @@ -789,14 +789,14 @@ size_t ShenandoahHeap::max_capacity() const { size_t ShenandoahHeap::soft_max_capacity() const { size_t v = Atomic::load(&_soft_max_size); assert(min_capacity() <= v && v <= max_capacity(), - "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT, + "Should be in bounds: %zu <= %zu <= %zu", min_capacity(), v, max_capacity()); return v; } void ShenandoahHeap::set_soft_max_capacity(size_t v) { assert(min_capacity() <= v && v <= max_capacity(), - "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT, + "Should be in bounds: %zu <= %zu <= %zu", min_capacity(), v, max_capacity()); Atomic::store(&_soft_max_size, v); } @@ -843,7 +843,7 @@ bool ShenandoahHeap::check_soft_max_changed() { new_soft_max = MAX2(min_capacity(), new_soft_max); new_soft_max = MIN2(max_capacity(), new_soft_max); if (new_soft_max != old_soft_max) { - log_info(gc)("Soft Max Heap Size: " SIZE_FORMAT "%s -> " SIZE_FORMAT "%s", + log_info(gc)("Soft Max Heap Size: %zu%s -> %zu%s", byte_size_in_proper_unit(old_soft_max), proper_unit_for_byte_size(old_soft_max), byte_size_in_proper_unit(new_soft_max), proper_unit_for_byte_size(new_soft_max) ); @@ -882,13 +882,13 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) // Record new heuristic value even if we take any shortcut. This captures // the case when moderately-sized objects always take a shortcut. At some point, // heuristics should catch up with them. - log_debug(gc, free)("Set new GCLAB size: " SIZE_FORMAT, new_size); + log_debug(gc, free)("Set new GCLAB size: %zu", new_size); ShenandoahThreadLocalData::set_gclab_size(thread, new_size); if (new_size < size) { // New size still does not fit the object. Fall back to shared allocation. // This avoids retiring perfectly good GCLABs, when we encounter a large object. - log_debug(gc, free)("New gclab size (" SIZE_FORMAT ") is too small for " SIZE_FORMAT, new_size, size); + log_debug(gc, free)("New gclab size (%zu) is too small for %zu", new_size, size); return nullptr; } @@ -997,8 +997,8 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { } if (log_develop_is_enabled(Debug, gc, alloc)) { ResourceMark rm; - log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: " SIZE_FORMAT - ", Original: " SIZE_FORMAT ", Latest: " SIZE_FORMAT, + log_debug(gc, alloc)("Thread: %s, Result: " PTR_FORMAT ", Request: %s, Size: %zu" + ", Original: %zu, Latest: %zu", Thread::current()->name(), p2i(result), req.type_string(), req.size(), original_count, get_gc_no_progress_count()); } @@ -1027,7 +1027,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) { size_t actual = req.actual_size(); assert (req.is_lab_alloc() || (requested == actual), - "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT, + "Only LAB allocations are elastic: %s, requested = %zu, actual = %zu", ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual); if (req.is_mutator_alloc()) { @@ -1179,7 +1179,7 @@ private: ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh); ShenandoahHeapRegion* r; while ((r =_cs->claim_next()) != nullptr) { - assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index()); + assert(r->has_live(), "Region %zu should have been reclaimed early", r->index()); _sh->marked_object_iterate(r, &cl); if (ShenandoahPacing) { @@ -2280,7 +2280,7 @@ void ShenandoahHeap::pin_object(JavaThread* thr, oop o) { void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) { ShenandoahHeapRegion* r = heap_region_containing(o); assert(r != nullptr, "Sanity"); - assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index()); + assert(r->pin_count() > 0, "Region %zu should have non-zero pins", r->index()); r->record_unpin(); } @@ -2312,7 +2312,7 @@ void ShenandoahHeap::assert_pinned_region_status() { shenandoah_assert_generations_reconciled(); if (gc_generation()->contains(r)) { assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0), - "Region " SIZE_FORMAT " pinning status is inconsistent", i); + "Region %zu pinning status is inconsistent", i); } } } @@ -2470,7 +2470,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) { assert((first_old_region > last_old_region) || ((last_old_region + 1 - first_old_region >= old_region_count) && get_region(first_old_region)->is_old() && get_region(last_old_region)->is_old()), - "sanity: old_region_count: " SIZE_FORMAT ", first_old_region: " SIZE_FORMAT ", last_old_region: " SIZE_FORMAT, + "sanity: old_region_count: %zu, first_old_region: %zu, last_old_region: %zu", old_region_count, first_old_region, last_old_region); if (mode()->is_generational()) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index f879188431b..138e4cadd3c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -378,7 +379,7 @@ inline bool ShenandoahHeap::is_in_active_generation(oop obj) const { // Old regions are in old and global collections, not in young collections return !gen->is_young(); default: - assert(false, "Bad affiliation (%d) for region " SIZE_FORMAT, region_affiliation(index), index); + assert(false, "Bad affiliation (%d) for region %zu", region_affiliation(index), index); return false; } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index d46b76c9376..6f73c491fd6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -242,7 +242,7 @@ void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affi void ShenandoahHeapRegion::make_pinned() { shenandoah_assert_heaplocked(); - assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count()); + assert(pin_count() > 0, "Should have pins: %zu", pin_count()); switch (state()) { case _regular: @@ -264,7 +264,7 @@ void ShenandoahHeapRegion::make_pinned() { void ShenandoahHeapRegion::make_unpinned() { shenandoah_assert_heaplocked(); - assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count()); + assert(pin_count() == 0, "Should not have pins: %zu", pin_count()); switch (state()) { case _pinned: @@ -398,7 +398,7 @@ void ShenandoahHeapRegion::set_live_data(size_t s) { void ShenandoahHeapRegion::print_on(outputStream* st) const { st->print("|"); - st->print(SIZE_FORMAT_W(5), this->_index); + st->print("%5zu", this->_index); switch (state()) { case _empty_uncommitted: @@ -445,15 +445,15 @@ void ShenandoahHeapRegion::print_on(outputStream* st) const { p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast(this)))); st->print("|UWM " SHR_PTR_FORMAT, p2i(_update_watermark)); - st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); - st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); - st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); + st->print("|U %5zu%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); + st->print("|T %5zu%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); + st->print("|G %5zu%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); if (ShenandoahHeap::heap()->mode()->is_generational()) { - st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs())); + st->print("|P %5zu%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs())); } - st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); - st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); - st->print("|CP " SIZE_FORMAT_W(3), pin_count()); + st->print("|S %5zu%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); + st->print("|L %5zu%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); + st->print("|CP %3zu", pin_count()); st->cr(); #undef SHR_PTR_FORMAT @@ -675,33 +675,33 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { size_t region_size; if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) { if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { - err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " - "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).", + err_msg message("Max heap size (%zu%s) is too low to afford the minimum number " + "of regions (%zu) of minimum region size (%zu%s).", byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), MIN_NUM_REGIONS, byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { - err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).", + err_msg message("%zu%s should not be lower than minimum region size (%zu%s).", byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMinRegionSize < MinTLABSize) { - err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).", + err_msg message("%zu%s should not be lower than TLAB size size (%zu%s).", byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); } if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { - err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).", + err_msg message("%zu%s should not be lower than min region size (%zu%s).", byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize), byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); } if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { - err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).", + err_msg message("Minimum (%zu%s) should be larger than maximum (%zu%s).", byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); @@ -717,21 +717,21 @@ size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { } else { if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) { - err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " - "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).", + err_msg message("Max heap size (%zu%s) is too low to afford the minimum number " + "of regions (%zu) of requested size (%zu%s).", byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), MIN_NUM_REGIONS, byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); } if (ShenandoahRegionSize < ShenandoahMinRegionSize) { - err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).", + err_msg message("Heap region size (%zu%s) should be larger than min region size (%zu%s).", byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize), byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); } if (ShenandoahRegionSize > ShenandoahMaxRegionSize) { - err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).", + err_msg message("Heap region size (%zu%s) should be lower than max region size (%zu%s).", byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize), byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); @@ -843,7 +843,7 @@ void ShenandoahHeapRegion::record_pin() { } void ShenandoahHeapRegion::record_unpin() { - assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index()); + assert(pin_count() > 0, "Region %zu should have non-zero pins", index()); Atomic::sub(&_critical_pins, (size_t)1); } @@ -857,7 +857,7 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation ShenandoahAffiliation region_affiliation = heap->region_affiliation(this); { ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); - log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT + log_debug(gc)("Setting affiliation of Region %zu from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT, index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation), p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this))); @@ -871,7 +871,7 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation HeapWord* top_bitmap = ctx->top_bitmap(this); assert(ctx->is_bitmap_range_within_region_clear(top_bitmap, _end), - "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx, + "Region %zu, bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx, p2i(top_bitmap), p2i(_end)); } #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index 382d9ba942c..7f29a8628aa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +37,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) { shenandoah_assert_heaplocked_or_safepoint(); assert(req.is_lab_alloc(), "allocate_aligned() only applies to LAB allocations"); - assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size); + assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size); assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs"); assert(is_aligned(alignment_in_bytes, HeapWordSize), "Expect heap word alignment"); @@ -88,7 +89,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) { shenandoah_assert_heaplocked_or_safepoint(); - assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size); + assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size); HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { @@ -160,7 +161,7 @@ inline bool ShenandoahHeapRegion::has_live() const { inline size_t ShenandoahHeapRegion::garbage() const { assert(used() >= get_live_data_bytes(), - "Live Data must be a subset of used() live: " SIZE_FORMAT " used: " SIZE_FORMAT, + "Live Data must be a subset of used() live: %zu used: %zu", get_live_data_bytes(), used()); size_t result = used() - get_live_data_bytes(); @@ -171,7 +172,7 @@ inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const { assert(get_top_before_promote() != nullptr, "top before promote should not equal null"); size_t used_before_promote = byte_size(bottom(), get_top_before_promote()); assert(used_before_promote >= get_live_data_bytes(), - "Live Data must be a subset of used before promotion live: " SIZE_FORMAT " used: " SIZE_FORMAT, + "Live Data must be a subset of used before promotion live: %zu used: %zu", get_live_data_bytes(), used_before_promote); size_t result = used_before_promote - get_live_data_bytes(); return result; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp index 3d3483a5b69..f44902d2724 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionClosures.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,8 +83,8 @@ void ShenandoahFinalMarkUpdateRegionStateClosure::heap_region_do(ShenandoahHeapR // from-space-refs written from here on. r->set_update_watermark_at_safepoint(r->top()); } else { - assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index()); + assert(!r->has_live(), "Region %zu should have no live data", r->index()); assert(_ctx == nullptr || _ctx->top_at_mark_start(r) == r->top(), - "Region " SIZE_FORMAT " should have correct TAMS", r->index()); + "Region %zu should have correct TAMS", r->index()); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp index f04aa7ce8ed..9eb7e7855e3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -91,7 +92,7 @@ void ShenandoahHeapRegionCounters::write_snapshot(PerfLongVariable** regions, ResourceMark rm; LogStream ls(lt); - ls.print_cr(JLONG_FORMAT " " JLONG_FORMAT " " SIZE_FORMAT " " SIZE_FORMAT " " SIZE_FORMAT, + ls.print_cr(JLONG_FORMAT " " JLONG_FORMAT " %zu %zu %zu", ts->get_value(), status->get_value(), num_regions, region_size, protocol_version); if (num_regions > 0) { ls.print(JLONG_FORMAT, regions[0]->get_value()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp index d18bfb0d625..91a8082eb21 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,7 +82,7 @@ ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() { } void ShenandoahHeapRegionSet::print_on(outputStream* out) const { - out->print_cr("Region Set : " SIZE_FORMAT "", count()); + out->print_cr("Region Set : %zu", count()); for (size_t index = 0; index < _heap->num_regions(); index++) { if (is_in(index)) { _heap->get_region(index)->print_on(out); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp index baf95a5bdf7..73857520be2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,7 +41,7 @@ void ShenandoahInitLogger::print() { void ShenandoahInitLogger::print_heap() { GCInitLogger::print_heap(); - log_info(gc, init)("Heap Region Count: " SIZE_FORMAT, ShenandoahHeapRegion::region_count()); + log_info(gc, init)("Heap Region Count: %zu", ShenandoahHeapRegion::region_count()); log_info(gc, init)("Heap Region Size: " EXACTFMT, EXACTFMTARGS(ShenandoahHeapRegion::region_size_bytes())); log_info(gc, init)("TLAB Size Max: " EXACTFMT, EXACTFMTARGS(ShenandoahHeapRegion::max_tlab_size_bytes())); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp index 0239f961c65..2dc0813e513 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -128,7 +129,7 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob if (!region->is_humongous_start()) { assert(!region->is_humongous(), "Cannot have continuations here"); - assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx); + assert(region->is_affiliated(), "Do not count live data within Free Regular Region %zu", region_idx); ShenandoahLiveData cur = live_data[region_idx]; size_t new_val = size + cur; if (new_val >= SHENANDOAH_LIVEDATA_MAX) { @@ -143,11 +144,11 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob shenandoah_assert_in_correct_region(nullptr, obj); size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize); - assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx); + assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx); for (size_t i = region_idx; i < region_idx + num_regions; i++) { ShenandoahHeapRegion* chain_reg = heap->get_region(i); assert(chain_reg->is_humongous(), "Expecting a humongous region"); - assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i); + assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i); chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp index cd0f31ae14f..1ef06f23445 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. and/or its affiliates. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -154,19 +154,19 @@ void ShenandoahMarkBitMap::check_mark(HeapWord* addr) const { void ShenandoahMarkBitMap::verify_index(idx_t bit) const { assert(bit < _size, - "BitMap index out of bounds: " SIZE_FORMAT " >= " SIZE_FORMAT, + "BitMap index out of bounds: %zu >= %zu", bit, _size); } void ShenandoahMarkBitMap::verify_limit(idx_t bit) const { assert(bit <= _size, - "BitMap limit out of bounds: " SIZE_FORMAT " > " SIZE_FORMAT, + "BitMap limit out of bounds: %zu > %zu", bit, _size); } void ShenandoahMarkBitMap::verify_range(idx_t beg, idx_t end) const { assert(beg <= end, - "BitMap range error: " SIZE_FORMAT " > " SIZE_FORMAT, beg, end); + "BitMap range error: %zu > %zu", beg, end); verify_limit(end); } #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp index ded9fbd97f5..250ab508a30 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +57,7 @@ bool ShenandoahMarkingContext::is_bitmap_range_within_region_clear(const HeapWor size_t start_idx = heap->heap_region_index_containing(start); #ifdef ASSERT size_t end_idx = heap->heap_region_index_containing(end - 1); - assert(start_idx == end_idx, "Expected range to be within same region (" SIZE_FORMAT ", " SIZE_FORMAT ")", start_idx, end_idx); + assert(start_idx == end_idx, "Expected range to be within same region (%zu, %zu)", start_idx, end_idx); #endif ShenandoahHeapRegion* r = heap->get_region(start_idx); if (!heap->is_bitmap_slice_committed(r)) { @@ -73,7 +74,7 @@ void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion _top_at_mark_starts_base[idx] = bottom; _top_bitmaps[idx] = bottom; - log_debug(gc)("SMC:initialize_top_at_mark_start for Region " SIZE_FORMAT ", TAMS: " PTR_FORMAT ", TopOfBitMap: " PTR_FORMAT, + log_debug(gc)("SMC:initialize_top_at_mark_start for Region %zu, TAMS: " PTR_FORMAT ", TopOfBitMap: " PTR_FORMAT, r->index(), p2i(bottom), p2i(r->end())); } @@ -85,7 +86,7 @@ void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) { HeapWord* bottom = r->bottom(); HeapWord* top_bitmap = _top_bitmaps[r->index()]; - log_debug(gc)("SMC:clear_bitmap for %s Region " SIZE_FORMAT ", top_bitmap: " PTR_FORMAT, + log_debug(gc)("SMC:clear_bitmap for %s Region %zu, top_bitmap: " PTR_FORMAT, r->affiliation_name(), r->index(), p2i(top_bitmap)); if (top_bitmap > bottom) { @@ -94,7 +95,7 @@ void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) { } assert(is_bitmap_range_within_region_clear(bottom, r->end()), - "Region " SIZE_FORMAT " should have no marks in bitmap", r->index()); + "Region %zu should have no marks in bitmap", r->index()); } bool ShenandoahMarkingContext::is_complete() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp index 75a16e15549..d9bddd5fbb6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,16 +94,16 @@ inline void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRe HeapWord* new_tams = r->top(); assert(new_tams >= old_tams, - "Region " SIZE_FORMAT", TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, + "Region %zu, TAMS updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, idx, p2i(old_tams), p2i(new_tams)); assert((new_tams == r->bottom()) || (old_tams == r->bottom()) || (new_tams >= _top_bitmaps[idx]), - "Region " SIZE_FORMAT", top_bitmaps updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, + "Region %zu, top_bitmaps updates should be monotonic: " PTR_FORMAT " -> " PTR_FORMAT, idx, p2i(_top_bitmaps[idx]), p2i(new_tams)); assert(old_tams == r->bottom() || is_bitmap_range_within_region_clear(old_tams, new_tams), - "Region " SIZE_FORMAT ", bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT, + "Region %zu, bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT, idx, p2i(old_tams), p2i(new_tams)); - log_debug(gc)("Capturing TAMS for %s Region " SIZE_FORMAT ", was: " PTR_FORMAT ", now: " PTR_FORMAT, + log_debug(gc)("Capturing TAMS for %s Region %zu, was: " PTR_FORMAT ", now: " PTR_FORMAT, r->affiliation_name(), idx, p2i(old_tams), p2i(new_tams)); _top_at_mark_starts_base[idx] = new_tams; @@ -119,7 +120,7 @@ inline HeapWord* ShenandoahMarkingContext::top_at_mark_start(const ShenandoahHea inline void ShenandoahMarkingContext::reset_top_bitmap(ShenandoahHeapRegion* r) { assert(is_bitmap_range_within_region_clear(r->bottom(), r->end()), - "Region " SIZE_FORMAT " should have no marks in bitmap", r->index()); + "Region %zu should have no marks in bitmap", r->index()); _top_bitmaps[r->index()] = r->bottom(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp index 9350f44585f..03c1d920f0d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,15 +55,15 @@ MemoryUsage ShenandoahMemoryPool::get_memory_usage() { size_t committed = _heap->committed(); // These asserts can never fail: max is stable, and all updates to other values never overflow max. - assert(initial <= max, "initial: " SIZE_FORMAT ", max: " SIZE_FORMAT, initial, max); - assert(used <= max, "used: " SIZE_FORMAT ", max: " SIZE_FORMAT, used, max); - assert(committed <= max, "committed: " SIZE_FORMAT ", max: " SIZE_FORMAT, committed, max); + assert(initial <= max, "initial: %zu, max: %zu", initial, max); + assert(used <= max, "used: %zu, max: %zu", used, max); + assert(committed <= max, "committed: %zu, max: %zu", committed, max); // Committed and used are updated concurrently and independently. They can momentarily break // the assert below, which would also fail in downstream code. To avoid that, adjust values // to make sense under the race. See JDK-8207200. committed = MAX2(used, committed); - assert(used <= committed, "used: " SIZE_FORMAT ", committed: " SIZE_FORMAT, used, committed); + assert(used <= committed, "used: %zu, committed: %zu", used, committed); return MemoryUsage(initial, used, committed, max); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp index 8b2412ff9ab..0f480a92971 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMetrics.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,7 +49,7 @@ bool ShenandoahMetricsSnapshot::is_good_progress() { size_t free_actual = _heap->free_set()->available(); size_t free_expected = _heap->max_capacity() / 100 * ShenandoahCriticalFreeThreshold; bool prog_free = free_actual >= free_expected; - log_info(gc, ergo)("%s progress for free space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", + log_info(gc, ergo)("%s progress for free space: %zu%s, need %zu%s", prog_free ? "Good" : "Bad", byte_size_in_proper_unit(free_actual), proper_unit_for_byte_size(free_actual), byte_size_in_proper_unit(free_expected), proper_unit_for_byte_size(free_expected)); @@ -60,7 +61,7 @@ bool ShenandoahMetricsSnapshot::is_good_progress() { size_t progress_actual = (_used_before > _used_after) ? _used_before - _used_after : 0; size_t progress_expected = ShenandoahHeapRegion::region_size_bytes(); bool prog_used = progress_actual >= progress_expected; - log_info(gc, ergo)("%s progress for used space: " SIZE_FORMAT "%s, need " SIZE_FORMAT "%s", + log_info(gc, ergo)("%s progress for used space: %zu%s, need %zu%s", prog_used ? "Good" : "Bad", byte_size_in_proper_unit(progress_actual), proper_unit_for_byte_size(progress_actual), byte_size_in_proper_unit(progress_expected), proper_unit_for_byte_size(progress_expected)); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp index 3c7ba8e4243..872b9fd2665 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp @@ -1,6 +1,7 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -208,12 +209,12 @@ void BinaryMagnitudeSeq::add(size_t val) { // Defensively saturate for product bits: if (mag < 0) { - assert (false, "bucket index (%d) underflow for value (" SIZE_FORMAT ")", mag, val); + assert (false, "bucket index (%d) underflow for value (%zu)", mag, val); mag = 0; } if (mag >= BitsPerSize_t) { - assert (false, "bucket index (%d) overflow for value (" SIZE_FORMAT ")", mag, val); + assert (false, "bucket index (%d) overflow for value (%zu)", mag, val); mag = BitsPerSize_t - 1; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 36007023d46..4939a0e355f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,7 +109,7 @@ public: ~ShenandoahPurgeSATBTask() { if (_trashed_oops > 0) { - log_debug(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops); + log_debug(gc)("Purged %zu oops from old generation SATB buffers", _trashed_oops); } } @@ -653,9 +654,9 @@ void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t siz const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining(); const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled"; - log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT - ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT - ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT, + log_info(gc, ergo)("Promotion failed, size %zu, has plab? %s, PLAB remaining: %zu" + ", plab promotions %s, promotion reserve: %zu, promotion expended: %zu" + ", old capacity: %zu, old_used: %zu, old unaffiliated regions: %zu", size * HeapWordSize, plab == nullptr? "no": "yes", words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended, max_capacity(), used(), free_unaffiliated_regions()); @@ -700,7 +701,7 @@ void ShenandoahOldGeneration::abandon_collection_candidates() { void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() { assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle."); _old_heuristics->prepare_for_old_collections(); - log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT, + log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: %zu", _old_heuristics->unprocessed_old_collection_candidates(), _old_heuristics->coalesce_and_fill_candidates_count()); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp index e7d5a2e00c5..caee4e13443 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,8 +74,8 @@ void ShenandoahPacer::setup_for_mark() { restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Mark. Expected Live: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " - "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + log_info(gc, ergo)("Pacer for Mark. Expected Live: %zu%s, Free: %zu%s, " + "Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx", byte_size_in_proper_unit(live), proper_unit_for_byte_size(live), byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), @@ -98,8 +99,8 @@ void ShenandoahPacer::setup_for_evac() { restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Evacuation. Used CSet: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " - "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + log_info(gc, ergo)("Pacer for Evacuation. Used CSet: %zu%s, Free: %zu%s, " + "Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx", byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), @@ -123,8 +124,8 @@ void ShenandoahPacer::setup_for_updaterefs() { restart_with(non_taxable, tax); - log_info(gc, ergo)("Pacer for Update Refs. Used: " SIZE_FORMAT "%s, Free: " SIZE_FORMAT "%s, " - "Non-Taxable: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + log_info(gc, ergo)("Pacer for Update Refs. Used: %zu%s, Free: %zu%s, " + "Non-Taxable: %zu%s, Alloc Tax Rate: %.1fx", byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), byte_size_in_proper_unit(non_taxable), proper_unit_for_byte_size(non_taxable), @@ -148,7 +149,7 @@ void ShenandoahPacer::setup_for_idle() { restart_with(initial, tax); - log_info(gc, ergo)("Pacer for Idle. Initial: " SIZE_FORMAT "%s, Alloc Tax Rate: %.1fx", + log_info(gc, ergo)("Pacer for Idle. Initial: %zu%s, Alloc Tax Rate: %.1fx", byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial), tax); } @@ -164,7 +165,7 @@ void ShenandoahPacer::setup_for_reset() { size_t initial = _heap->max_capacity(); restart_with(initial, 1.0); - log_info(gc, ergo)("Pacer for Reset. Non-Taxable: " SIZE_FORMAT "%s", + log_info(gc, ergo)("Pacer for Reset. Non-Taxable: %zu%s", byte_size_in_proper_unit(initial), proper_unit_for_byte_size(initial)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp index 07d2fd3c4e3..48e695d1f58 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -673,10 +673,10 @@ void ShenandoahReferenceProcessor::collect_statistics() { discovered[REF_FINAL], discovered[REF_PHANTOM]); - log_info(gc,ref)("Encountered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, + log_info(gc,ref)("Encountered references: Soft: %zu, Weak: %zu, Final: %zu, Phantom: %zu", encountered[REF_SOFT], encountered[REF_WEAK], encountered[REF_FINAL], encountered[REF_PHANTOM]); - log_info(gc,ref)("Discovered references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, + log_info(gc,ref)("Discovered references: Soft: %zu, Weak: %zu, Final: %zu, Phantom: %zu", discovered[REF_SOFT], discovered[REF_WEAK], discovered[REF_FINAL], discovered[REF_PHANTOM]); - log_info(gc,ref)("Enqueued references: Soft: " SIZE_FORMAT ", Weak: " SIZE_FORMAT ", Final: " SIZE_FORMAT ", Phantom: " SIZE_FORMAT, + log_info(gc,ref)("Enqueued references: Soft: %zu, Weak: %zu, Final: %zu, Phantom: %zu", enqueued[REF_SOFT], enqueued[REF_WEAK], enqueued[REF_FINAL], enqueued[REF_PHANTOM]); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp index 5f09801b929..f3cc30518c2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -686,7 +687,7 @@ void ShenandoahScanRememberedTask::do_work(uint worker_id) { while (_work_list->next(&assignment)) { ShenandoahHeapRegion* region = assignment._r; log_debug(gc)("ShenandoahScanRememberedTask::do_work(%u), processing slice of region " - SIZE_FORMAT " at offset " SIZE_FORMAT ", size: " SIZE_FORMAT, + "%zu at offset %zu, size: %zu", worker_id, region->index(), assignment._chunk_offset, assignment._chunk_size); if (region->is_old()) { size_t cluster_size = @@ -856,7 +857,7 @@ ShenandoahRegionChunkIterator::ShenandoahRegionChunkIterator(ShenandoahHeap* hea { #ifdef ASSERT size_t expected_chunk_size_words = _clusters_in_smallest_chunk * CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster; - assert(smallest_chunk_size_words() == expected_chunk_size_words, "_smallest_chunk_size (" SIZE_FORMAT") is not valid because it does not equal (" SIZE_FORMAT ")", + assert(smallest_chunk_size_words() == expected_chunk_size_words, "_smallest_chunk_size (%zu) is not valid because it does not equal (%zu)", smallest_chunk_size_words(), expected_chunk_size_words); #endif assert(_num_groups <= _maximum_groups, @@ -897,13 +898,13 @@ ShenandoahRegionChunkIterator::ShenandoahRegionChunkIterator(ShenandoahHeap* hea } if (_group_entries[_num_groups-1] < _total_chunks) { assert((_total_chunks - _group_entries[_num_groups-1]) * _group_chunk_size[_num_groups-1] + previous_group_span == - heap->num_regions() * words_in_region, "Total region chunks (" SIZE_FORMAT - ") do not span total heap regions (" SIZE_FORMAT ")", _total_chunks, _heap->num_regions()); + heap->num_regions() * words_in_region, "Total region chunks (%zu" + ") do not span total heap regions (%zu)", _total_chunks, _heap->num_regions()); previous_group_span += (_total_chunks - _group_entries[_num_groups-1]) * _group_chunk_size[_num_groups-1]; _group_entries[_num_groups-1] = _total_chunks; } - assert(previous_group_span == heap->num_regions() * words_in_region, "Total region chunks (" SIZE_FORMAT - ") do not span total heap regions (" SIZE_FORMAT "): " SIZE_FORMAT " does not equal " SIZE_FORMAT, + assert(previous_group_span == heap->num_regions() * words_in_region, "Total region chunks (%zu" + ") do not span total heap regions (%zu): %zu does not equal %zu", _total_chunks, _heap->num_regions(), previous_group_span, heap->num_regions() * words_in_region); // Not necessary, but keeps things tidy diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp index ec00adc4040..b0fc55631e0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,8 +70,8 @@ void ShenandoahScanRemembered::process_clusters(size_t first_cluster, size_t cou const size_t whole_cards = (end_addr - start_addr + CardTable::card_size_in_words() - 1)/CardTable::card_size_in_words(); const size_t end_card_index = start_card_index + whole_cards - 1; - log_debug(gc, remset)("Worker %u: cluster = " SIZE_FORMAT " count = " SIZE_FORMAT " eor = " INTPTR_FORMAT - " start_addr = " INTPTR_FORMAT " end_addr = " INTPTR_FORMAT " cards = " SIZE_FORMAT, + log_debug(gc, remset)("Worker %u: cluster = %zu count = %zu eor = " INTPTR_FORMAT + " start_addr = " INTPTR_FORMAT " end_addr = " INTPTR_FORMAT " cards = %zu", worker_id, first_cluster, count, p2i(end_of_range), p2i(start_addr), p2i(end_addr), whole_cards); // use_write_table states whether we are using the card table that is being @@ -341,7 +342,7 @@ ShenandoahScanRemembered::process_region_slice(ShenandoahHeapRegion *region, siz } } - log_debug(gc)("Remembered set scan processing Region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT ", using %s table", + log_debug(gc)("Remembered set scan processing Region %zu, from " PTR_FORMAT " to " PTR_FORMAT ", using %s table", region->index(), p2i(start_of_range), p2i(end_of_range), use_write_table? "read/write (updating)": "read (marking)"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index d8d6d81578f..50edea87eba 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -384,7 +384,7 @@ public: _trashed_regions++; } _regions++; - log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding " SIZE_FORMAT " for %s Region " SIZE_FORMAT ", yielding: " SIZE_FORMAT, + log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding %zu for %s Region %zu, yielding: %zu", r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used); } @@ -423,7 +423,7 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { } static void log_usage(ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { - log_debug(gc)("Safepoint verification: %s verified usage: " SIZE_FORMAT "%s, recorded usage: " SIZE_FORMAT "%s", + log_debug(gc)("Safepoint verification: %s verified usage: %zu%s, recorded usage: %zu%s", generation->name(), byte_size_in_proper_unit(generation->used()), proper_unit_for_byte_size(generation->used()), byte_size_in_proper_unit(stats.used()), proper_unit_for_byte_size(stats.used())); @@ -444,12 +444,12 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used())); guarantee(stats.regions() == generation_used_regions, - "%s: generation (%s) used regions (" SIZE_FORMAT ") must equal regions that are in use (" SIZE_FORMAT ")", + "%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu)", label, generation->name(), generation->used_regions(), stats.regions()); size_t generation_capacity = generation->max_capacity(); guarantee(stats.non_trashed_span() <= generation_capacity, - "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT + "%s: generation (%s) size spanned by regions (%zu) * region size (" PROPERFMT ") must not exceed current capacity (" PROPERFMT ")", label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()), PROPERFMTARGS(generation_capacity)); @@ -872,14 +872,14 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, } if (sizeness != _verify_size_disable) { guarantee(cl.used() == heap_used, - "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s", + "%s: heap used size must be consistent: heap-used = %zu%s, regions-used = %zu%s", label, byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); } size_t heap_committed = _heap->committed(); guarantee(cl.committed() == heap_committed, - "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s", + "%s: heap committed size must be consistent: heap-committed = %zu%s, regions-committed = %zu%s", label, byte_size_in_proper_unit(heap_committed), proper_unit_for_byte_size(heap_committed), byte_size_in_proper_unit(cl.committed()), proper_unit_for_byte_size(cl.committed())); @@ -1026,7 +1026,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, if (reg_live != verf_live) { stringStream ss; r->print_on(&ss); - fatal("%s: Live data should match: region-live = " SIZE_FORMAT ", verifier-live = " UINT32_FORMAT "\n%s", + fatal("%s: Live data should match: region-live = %zu, verifier-live = " UINT32_FORMAT "\n%s", label, reg_live, verf_live, ss.freeze()); } } @@ -1035,7 +1035,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, log_debug(gc)("Safepoint verification finished accumulation of liveness data"); - log_info(gc)("Verify %s, Level %zd (" SIZE_FORMAT " reachable, " SIZE_FORMAT " marked)", + log_info(gc)("Verify %s, Level %zd (%zu reachable, %zu marked)", label, ShenandoahVerifyLevel, count_reachable, count_marked); FREE_C_HEAP_ARRAY(ShenandoahLivenessData, ld);