Merge branch 'master' into _8367993

This commit is contained in:
Leo Korinth 2026-01-26 09:54:45 +01:00
commit b41d4a0e8c
238 changed files with 7784 additions and 6877 deletions

View File

@ -72,6 +72,7 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
locale</a></li>
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
<li><a href="#sctp-tests" id="toc-sctp-tests">SCTP Tests</a></li>
<li><a href="#testing-ahead-of-time-optimizations"
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</a></li>
@ -621,6 +622,21 @@ element of the appropriate <code>@Artifact</code> class. (See
JTREG=&quot;JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs&quot;</code></pre>
<p>For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.</p>
<h3 id="sctp-tests">SCTP Tests</h3>
<p>The SCTP tests require the SCTP runtime library, which is often not
installed by default in popular Linux distributions. Without this
library, the SCTP tests will be skipped. If you want to enable the SCTP
tests, you should install the SCTP library before running the tests.</p>
<p>For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:</p>
<pre><code>sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<p>For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:</p>
<pre><code>sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp</code></pre>
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
Optimizations</h3>
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations

View File

@ -640,6 +640,32 @@ $ make test TEST="jtreg:sun/security/pkcs11/Secmod/AddTrustedCert.java" \
For more notes about the PKCS11 tests, please refer to
test/jdk/sun/security/pkcs11/README.
### SCTP Tests
The SCTP tests require the SCTP runtime library, which is often not installed
by default in popular Linux distributions. Without this library, the SCTP tests
will be skipped. If you want to enable the SCTP tests, you should install the
SCTP library before running the tests.
For distributions using the .deb packaging format and the apt tool
(such as Debian, Ubuntu, etc.), try this:
```
sudo apt install libsctp1
sudo modprobe sctp
lsmod | grep sctp
```
For distributions using the .rpm packaging format and the dnf tool
(such as Fedora, Red Hat, etc.), try this:
```
sudo dnf install -y lksctp-tools
sudo modprobe sctp
lsmod | grep sctp
```
### Testing Ahead-of-time Optimizations
One way to improve test coverage of ahead-of-time (AOT) optimizations in

View File

@ -69,22 +69,18 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Debug prefix mapping if supported by compiler
DEBUG_PREFIX_CFLAGS=
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
DEFAULT: "",
RESULT: DEBUG_SYMBOLS_LEVEL,
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: literal,
DEFAULT: [auto], VALID_VALUES: [auto 1 2 3],
CHECK_AVAILABLE: [
if test x$TOOLCHAIN_TYPE = xmicrosoft; then
AVAILABLE=false
fi
],
DESC: [set the native debug symbol level (GCC and Clang only)],
DEFAULT_DESC: [toolchain default])
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
test "x${TOOLCHAIN_TYPE}" = xclang; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
fi
fi
DEFAULT_DESC: [toolchain default],
IF_AUTO: [
RESULT=""
])
# Debug symbols
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@ -111,8 +107,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
fi
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
@ -132,8 +128,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
IF_FALSE: [GDWARF_FLAGS=""])
# Debug info level should follow the debug format to be effective.
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} -g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
ASFLAGS_DEBUG_SYMBOLS="-g${NATIVE_DEBUG_SYMBOLS_LEVEL}"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CFLAGS_DEBUG_SYMBOLS="-Z7"
fi

View File

@ -5782,6 +5782,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
// return false;
bind(A_IS_NOT_NULL);
ldrw(cnt1, Address(a1, length_offset));
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);
lea(a1, Address(a1, start_offset));
@ -5848,6 +5851,9 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
cbz(a1, DONE);
ldrw(cnt1, Address(a1, length_offset));
cbz(a2, DONE);
ldrw(tmp5, Address(a2, length_offset));
cmp(cnt1, tmp5);
br(NE, DONE); // If lengths differ, return false
// Increase loop counter by diff between base- and actual start-offset.
addw(cnt1, cnt1, extra_length);

View File

@ -571,7 +571,12 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
if (is_excluded(klass)) {
ResourceMark rm;
log_debug(cds, dynamic)("Skipping class (excluded): %s", klass->external_name());
aot_log_trace(aot)("pointer set to null: class (excluded): %s", klass->external_name());
return set_to_null;
}
if (klass->is_array_klass() && CDSConfig::is_dumping_dynamic_archive()) {
ResourceMark rm;
aot_log_trace(aot)("pointer set to null: array class not supported in dynamic region: %s", klass->external_name());
return set_to_null;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,6 @@
#include "gc/g1/g1HeapRegionPrinter.hpp"
#include "gc/g1/g1RemSetTrackingPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/mutexLocker.hpp"
struct G1UpdateRegionLivenessAndSelectForRebuildTask::G1OnRegionClosure : public G1HeapRegionClosure {
@ -154,7 +153,7 @@ void G1UpdateRegionLivenessAndSelectForRebuildTask::work(uint worker_id) {
G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list);
_g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id);
AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild);
_total_selected_for_rebuild.add_then_fetch(on_region_cl._num_selected_for_rebuild);
// Update the old/humongous region sets
_g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/g1/g1HeapRegionManager.hpp"
#include "gc/g1/g1HeapRegionSet.hpp"
#include "gc/shared/workerThread.hpp"
#include "runtime/atomic.hpp"
class G1CollectedHeap;
class G1ConcurrentMark;
@ -41,7 +42,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask {
G1ConcurrentMark* _cm;
G1HeapRegionClaimer _hrclaimer;
uint volatile _total_selected_for_rebuild;
Atomic<uint> _total_selected_for_rebuild;
// Reclaimed empty regions
G1FreeRegionList _cleanup_list;
@ -57,7 +58,9 @@ public:
void work(uint worker_id) override;
uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
uint total_selected_for_rebuild() const {
return _total_selected_for_rebuild.load_relaxed();
}
static uint desired_num_workers(uint num_regions);
};

View File

@ -68,9 +68,9 @@ ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahSpaceInfo*
ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
// The logic for cset selection in adaptive is as follows:
@ -124,6 +124,7 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
cur_garbage = new_garbage;
}
}
return 0;
}
void ShenandoahAdaptiveHeuristics::record_cycle_start() {

View File

@ -108,9 +108,9 @@ public:
virtual ~ShenandoahAdaptiveHeuristics();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
virtual void record_cycle_start() override;
virtual void record_success_concurrent() override;

View File

@ -39,15 +39,16 @@ ShenandoahAggressiveHeuristics::ShenandoahAggressiveHeuristics(ShenandoahSpaceIn
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahEvacReserveOverflow);
}
void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
if (r->garbage() > 0) {
cset->add_region(r);
}
}
return 0;
}
bool ShenandoahAggressiveHeuristics::should_start_gc() {

View File

@ -35,9 +35,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
public:
ShenandoahAggressiveHeuristics(ShenandoahSpaceInfo* space_info);
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual bool should_start_gc();

View File

@ -76,9 +76,9 @@ bool ShenandoahCompactHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Do not select too large CSet that would overflow the available free space
size_t max_cset = actual_free * 3 / 4;
@ -97,4 +97,5 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
return 0;
}

View File

@ -37,9 +37,9 @@ public:
virtual bool should_start_gc();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free);
virtual const char* name() { return "Compact"; }
virtual bool is_diagnostic() { return false; }

View File

@ -37,7 +37,7 @@ ShenandoahGenerationalHeuristics::ShenandoahGenerationalHeuristics(ShenandoahGen
: ShenandoahAdaptiveHeuristics(generation), _generation(generation) {
}
void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
size_t ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
assert(collection_set->is_empty(), "Must be empty");
auto heap = ShenandoahGenerationalHeap::heap();
@ -168,16 +168,12 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Only young collections need to prime the collection set.
if (_generation->is_young()) {
heap->old_generation()->heuristics()->prime_collection_set(collection_set);
}
size_t add_regions_to_old = 0;
if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
// Call the subclasses to add young-gen regions into the collection set.
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
add_regions_to_old = choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
if (collection_set->has_old_regions()) {
@ -194,6 +190,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
regular_regions_promoted_free,
immediate_regions,
immediate_garbage);
return add_regions_to_old;
}
@ -210,13 +207,6 @@ size_t ShenandoahGenerationalHeuristics::add_preselected_regions_to_collection_s
assert(ShenandoahGenerationalHeap::heap()->is_tenurable(r), "Preselected regions must have tenure age");
// Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
// This region has been pre-selected and its impact on promotion reserve is already accounted for.
// r->used() is r->garbage() + r->get_live_data_bytes()
// Since all live data in this region is being evacuated from young-gen, it is as if this memory
// is garbage insofar as young-gen is concerned. Counting this as garbage reduces the need to
// reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
// within young-gen memory.
cur_young_garbage += r->garbage();
cset->add_region(r);
}

View File

@ -44,7 +44,7 @@ class ShenandoahGenerationalHeuristics : public ShenandoahAdaptiveHeuristics {
public:
explicit ShenandoahGenerationalHeuristics(ShenandoahGeneration* generation);
void choose_collection_set(ShenandoahCollectionSet* collection_set) override;
size_t choose_collection_set(ShenandoahCollectionSet* collection_set) override;
protected:
ShenandoahGeneration* _generation;

View File

@ -24,6 +24,7 @@
*/
#include "gc/shenandoah/heuristics/shenandoahGlobalHeuristics.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
#include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
@ -35,13 +36,14 @@ ShenandoahGlobalHeuristics::ShenandoahGlobalHeuristics(ShenandoahGlobalGeneratio
}
void ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahGlobalHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
choose_global_collection_set(cset, data, size, actual_free, 0 /* cur_young_garbage */);
return 0;
}
@ -49,94 +51,212 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti
const ShenandoahHeuristics::RegionData* data,
size_t size, size_t actual_free,
size_t cur_young_garbage) const {
shenandoah_assert_heaplocked_or_safepoint();
auto heap = ShenandoahGenerationalHeap::heap();
auto free_set = heap->free_set();
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t capacity = heap->soft_max_capacity();
size_t garbage_threshold = region_size_bytes * ShenandoahGarbageThreshold / 100;
size_t ignore_threshold = region_size_bytes * ShenandoahIgnoreGarbageThreshold / 100;
size_t young_evac_reserve = heap->young_generation()->get_evacuation_reserve();
size_t original_young_evac_reserve = young_evac_reserve;
size_t old_evac_reserve = heap->old_generation()->get_evacuation_reserve();
size_t max_young_cset = (size_t) (young_evac_reserve / ShenandoahEvacWaste);
size_t young_cur_cset = 0;
size_t max_old_cset = (size_t) (old_evac_reserve / ShenandoahOldEvacWaste);
size_t old_cur_cset = 0;
size_t old_promo_reserve = heap->old_generation()->get_promoted_reserve();
// Figure out how many unaffiliated young regions are dedicated to mutator and to evacuator. Allow the young
// collector's unaffiliated regions to be transferred to old-gen if old-gen has more easily reclaimed garbage
// than young-gen. At the end of this cycle, any excess regions remaining in old-gen will be transferred back
// to young. Do not transfer the mutator's unaffiliated regions to old-gen. Those must remain available
// to the mutator as it needs to be able to consume this memory during concurrent GC.
size_t unaffiliated_young_regions = heap->young_generation()->free_unaffiliated_regions();
size_t unaffiliated_young_regions = free_set->collector_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
size_t unaffiliated_old_regions = free_set->old_collector_unaffiliated_regions();
size_t unaffiliated_old_memory = unaffiliated_old_regions * region_size_bytes;
if (unaffiliated_young_memory > max_young_cset) {
size_t unaffiliated_mutator_memory = unaffiliated_young_memory - max_young_cset;
unaffiliated_young_memory -= unaffiliated_mutator_memory;
unaffiliated_young_regions = unaffiliated_young_memory / region_size_bytes; // round down
unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
// Figure out how many unaffiliated regions are dedicated to Collector and OldCollector reserves. Let these
// be shuffled between young and old generations in order to expedite evacuation of whichever regions have the
// most garbage, regardless of whether these garbage-first regions reside in young or old generation.
// Excess reserves will be transferred back to the mutator after collection set has been chosen. At the end
// of evacuation, any reserves not consumed by evacuation will also be transferred to the mutator free set.
// Truncate reserves to only target unaffiliated memory
size_t shared_reserve_regions = 0;
if (young_evac_reserve > unaffiliated_young_memory) {
shared_reserve_regions += unaffiliated_young_regions;
} else {
size_t delta_regions = young_evac_reserve / region_size_bytes;
shared_reserve_regions += delta_regions;
}
young_evac_reserve = 0;
size_t total_old_reserve = old_evac_reserve + old_promo_reserve;
if (total_old_reserve > unaffiliated_old_memory) {
// Give all the unaffiliated memory to the shared reserves. Leave the rest for promo reserve.
shared_reserve_regions += unaffiliated_old_regions;
old_promo_reserve = total_old_reserve - unaffiliated_old_memory;
} else {
size_t delta_regions = old_evac_reserve / region_size_bytes;
shared_reserve_regions += delta_regions;
}
old_evac_reserve = 0;
assert(shared_reserve_regions <=
(heap->young_generation()->free_unaffiliated_regions() + heap->old_generation()->free_unaffiliated_regions()),
"simple math");
// We'll affiliate these unaffiliated regions with either old or young, depending on need.
max_young_cset -= unaffiliated_young_memory;
size_t shared_reserves = shared_reserve_regions * region_size_bytes;
size_t committed_from_shared_reserves = 0;
// Keep track of how many regions we plan to transfer from young to old.
size_t regions_transferred_to_old = 0;
size_t promo_bytes = 0;
size_t old_evac_bytes = 0;
size_t young_evac_bytes = 0;
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
size_t consumed_by_promo = 0; // promo_bytes * ShenandoahPromoEvacWaste
size_t consumed_by_old_evac = 0; // old_evac_bytes * ShenandoahOldEvacWaste
size_t consumed_by_young_evac = 0; // young_evac_bytes * ShenandoahEvacWaste
// Of the memory reclaimed by GC, some of this will need to be reserved for the next GC collection. Use the current
// young reserve as an approximation of the future Collector reserve requirement. Try to end with at least
// (capacity * ShenandoahMinFreeThreshold) / 100 bytes available to the mutator.
size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + original_young_evac_reserve;
size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu"
"%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.",
byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset),
byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset),
byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory),
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
size_t aged_regions_promoted = 0;
size_t young_regions_evacuated = 0;
size_t old_regions_evacuated = 0;
log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Discretionary evacuation budget (for either old or young): %zu%s"
", Actual Free: %zu%s.",
byte_size_in_proper_unit(shared_reserves), proper_unit_for_byte_size(shared_reserves),
byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
size_t cur_garbage = cur_young_garbage;
for (size_t idx = 0; idx < size; idx++) {
ShenandoahHeapRegion* r = data[idx].get_region();
assert(!cset->is_preselected(r->index()), "There should be no preselected regions during GLOBAL GC");
bool add_region = false;
if (r->is_old() || heap->is_tenurable(r)) {
size_t new_cset = old_cur_cset + r->get_live_data_bytes();
if ((r->garbage() > garbage_threshold)) {
while ((new_cset > max_old_cset) && (unaffiliated_young_regions > 0)) {
unaffiliated_young_regions--;
regions_transferred_to_old++;
max_old_cset += region_size_bytes / ShenandoahOldEvacWaste;
size_t region_garbage = r->garbage();
size_t new_garbage = cur_garbage + region_garbage;
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
size_t live_bytes = r->get_live_data_bytes();
if (add_regardless || (region_garbage >= garbage_threshold)) {
if (r->is_old()) {
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahOldEvacWaste);
size_t new_old_consumption = consumed_by_old_evac + anticipated_consumption;
size_t new_old_evac_reserve = old_evac_reserve;
size_t proposed_old_region_expansion = 0;
while ((new_old_consumption > new_old_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_old_evac_reserve += region_size_bytes;
}
}
if ((new_cset <= max_old_cset) && (r->garbage() > garbage_threshold)) {
add_region = true;
old_cur_cset = new_cset;
}
} else {
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
size_t new_cset = young_cur_cset + r->get_live_data_bytes();
size_t region_garbage = r->garbage();
size_t new_garbage = cur_young_garbage + region_garbage;
bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
if (add_regardless || (r->garbage() > garbage_threshold)) {
while ((new_cset > max_young_cset) && (unaffiliated_young_regions > 0)) {
unaffiliated_young_regions--;
max_young_cset += region_size_bytes / ShenandoahEvacWaste;
// If this region has free memory and we choose to place it in the collection set, its free memory is no longer
// available to hold promotion results. So we behave as if its free memory is consumed within the promotion reserve.
size_t anticipated_loss_from_promo_reserve = r->free();
size_t new_promo_consumption = consumed_by_promo + anticipated_loss_from_promo_reserve;
size_t new_promo_reserve = old_promo_reserve;
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_promo_reserve += region_size_bytes;
}
if ((new_old_consumption <= new_old_evac_reserve) && (new_promo_consumption <= new_promo_reserve)) {
add_region = true;
old_evac_reserve = new_old_evac_reserve;
old_promo_reserve = new_promo_reserve;
old_evac_bytes += live_bytes;
consumed_by_old_evac = new_old_consumption;
consumed_by_promo = new_promo_consumption;
cur_garbage = new_garbage;
old_regions_evacuated++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
}
} else if (heap->is_tenurable(r)) {
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahPromoEvacWaste);
size_t new_promo_consumption = consumed_by_promo + anticipated_consumption;
size_t new_promo_reserve = old_promo_reserve;
size_t proposed_old_region_expansion = 0;
while ((new_promo_consumption > new_promo_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_old_region_expansion++;
new_promo_reserve += region_size_bytes;
}
if (new_promo_consumption <= new_promo_reserve) {
add_region = true;
old_promo_reserve = new_promo_reserve;
promo_bytes += live_bytes;
consumed_by_promo = new_promo_consumption;
cur_garbage = new_garbage;
aged_regions_promoted++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_old_region_expansion * region_size_bytes;
}
} else {
assert(r->is_young() && !heap->is_tenurable(r), "DeMorgan's law (assuming r->is_affiliated)");
size_t anticipated_consumption = (size_t) (live_bytes * ShenandoahEvacWaste);
size_t new_young_evac_consumption = consumed_by_young_evac + anticipated_consumption;
size_t new_young_evac_reserve = young_evac_reserve;
size_t proposed_young_region_expansion = 0;
while ((new_young_evac_consumption > new_young_evac_reserve) && (committed_from_shared_reserves < shared_reserves)) {
committed_from_shared_reserves += region_size_bytes;
proposed_young_region_expansion++;
new_young_evac_reserve += region_size_bytes;
}
if (new_young_evac_consumption <= new_young_evac_reserve) {
add_region = true;
young_evac_reserve = new_young_evac_reserve;
young_evac_bytes += live_bytes;
consumed_by_young_evac = new_young_evac_consumption;
cur_garbage = new_garbage;
young_regions_evacuated++;
} else {
// We failed to sufficiently expand old so unwind proposed expansion
committed_from_shared_reserves -= proposed_young_region_expansion * region_size_bytes;
}
}
if ((new_cset <= max_young_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
add_region = true;
young_cur_cset = new_cset;
cur_young_garbage = new_garbage;
}
}
if (add_region) {
cset->add_region(r);
}
}
if (regions_transferred_to_old > 0) {
assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative");
heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes);
heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes);
if (committed_from_shared_reserves < shared_reserves) {
// Give all the rest to promotion
old_promo_reserve += (shared_reserves - committed_from_shared_reserves);
// dead code: committed_from_shared_reserves = shared_reserves;
}
// Consider the effects of round-off:
// 1. We know that the sum over each evacuation mutiplied by Evacuation Waste is <= total evacuation reserve
// 2. However, the reserve for each individual evacuation may be rounded down. In the worst case, we will be over budget
// by the number of regions evacuated, since each region's reserve might be under-estimated by at most 1
// 3. Likewise, if we take the sum of bytes evacuated and multiply this by the Evacuation Waste and then round down
// to nearest integer, the calculated reserve will underestimate the true reserve needs by at most 1.
// 4. This explains the adjustments to subtotals in the assert statements below.
assert(young_evac_bytes * ShenandoahEvacWaste <= young_evac_reserve + young_regions_evacuated,
"budget: %zu <= %zu", (size_t) (young_evac_bytes * ShenandoahEvacWaste), young_evac_reserve);
assert(old_evac_bytes * ShenandoahOldEvacWaste <= old_evac_reserve + old_regions_evacuated,
"budget: %zu <= %zu", (size_t) (old_evac_bytes * ShenandoahOldEvacWaste), old_evac_reserve);
assert(promo_bytes * ShenandoahPromoEvacWaste <= old_promo_reserve + aged_regions_promoted,
"budget: %zu <= %zu", (size_t) (promo_bytes * ShenandoahPromoEvacWaste), old_promo_reserve);
assert(young_evac_reserve + old_evac_reserve + old_promo_reserve <=
heap->young_generation()->get_evacuation_reserve() + heap->old_generation()->get_evacuation_reserve() +
heap->old_generation()->get_promoted_reserve(), "Exceeded budget");
if (heap->young_generation()->get_evacuation_reserve() < young_evac_reserve) {
size_t delta_bytes = young_evac_reserve - heap->young_generation()->get_evacuation_reserve();
size_t delta_regions = delta_bytes / region_size_bytes;
size_t regions_to_transfer = MIN2(unaffiliated_old_regions, delta_regions);
log_info(gc)("Global GC moves %zu unaffiliated regions from old collector to young collector reserves", regions_to_transfer);
ssize_t negated_regions = -regions_to_transfer;
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(negated_regions);
} else if (heap->young_generation()->get_evacuation_reserve() > young_evac_reserve) {
size_t delta_bytes = heap->young_generation()->get_evacuation_reserve() - young_evac_reserve;
size_t delta_regions = delta_bytes / region_size_bytes;
size_t regions_to_transfer = MIN2(unaffiliated_young_regions, delta_regions);
log_info(gc)("Global GC moves %zu unaffiliated regions from young collector to old collector reserves", regions_to_transfer);
heap->free_set()->move_unaffiliated_regions_from_collector_to_old_collector(regions_to_transfer);
}
heap->young_generation()->set_evacuation_reserve(young_evac_reserve);
heap->old_generation()->set_evacuation_reserve(old_evac_reserve);
heap->old_generation()->set_promoted_reserve(old_promo_reserve);
}

View File

@ -39,9 +39,9 @@ class ShenandoahGlobalHeuristics : public ShenandoahGenerationalHeuristics {
public:
ShenandoahGlobalHeuristics(ShenandoahGlobalGeneration* generation);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
private:
void choose_global_collection_set(ShenandoahCollectionSet* cset,

View File

@ -72,7 +72,7 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
}
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
size_t ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(collection_set->is_empty(), "Must be empty");
@ -153,8 +153,8 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
if (immediate_percent <= ShenandoahImmediateThreshold) {
choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
}
collection_set->summarize(total_garbage, immediate_garbage, immediate_regions);
return 0;
}
void ShenandoahHeuristics::record_cycle_start() {

View File

@ -183,9 +183,12 @@ protected:
static int compare_by_garbage(RegionData a, RegionData b);
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
// This is a helper function to choose_collection_set(), returning the number of regions that need to be transferred to
// the old reserve from the young reserve in order to effectively evacuate the chosen collection set. In non-generational
// mode, the return value is 0.
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free) = 0;
void adjust_penalty(intx step);
@ -233,7 +236,9 @@ public:
virtual void record_requested_gc();
virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
// Choose the collection set, returning the number of regions that need to be transferred to the old reserve from the young
// reserve in order to effectively evacuate the chosen collection set. In non-generational mode, the return value is 0.
virtual size_t choose_collection_set(ShenandoahCollectionSet* collection_set);
virtual bool can_unload_classes();

View File

@ -26,9 +26,11 @@
#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahOldGeneration.hpp"
#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
#include "logging/log.hpp"
#include "utilities/quickSort.hpp"
@ -77,15 +79,17 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
}
bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
if (unprocessed_old_collection_candidates() == 0) {
return false;
}
_mixed_evac_cset = collection_set;
_included_old_regions = 0;
_evacuated_old_bytes = 0;
_collected_old_bytes = 0;
if (_old_generation->is_preparing_for_mark()) {
// We have unprocessed old collection candidates, but the heuristic has given up on evacuating them.
// This is most likely because they were _all_ pinned at the time of the last mixed evacuation (and
// this in turn is most likely because there are just one or two candidate regions remaining).
log_info(gc, ergo)("Remaining " UINT32_FORMAT " old regions are being coalesced and filled", unprocessed_old_collection_candidates());
log_info(gc, ergo)("Remaining " UINT32_FORMAT
" old regions are being coalesced and filled", unprocessed_old_collection_candidates());
return false;
}
@ -111,150 +115,44 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
// of memory that can still be evacuated. We address this by reducing the evacuation budget by the amount
// of live memory in that region and by the amount of unallocated memory in that region if the evacuation
// budget is constrained by availability of free memory.
const size_t old_evacuation_reserve = _old_generation->get_evacuation_reserve();
const size_t old_evacuation_budget = (size_t) ((double) old_evacuation_reserve / ShenandoahOldEvacWaste);
size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
size_t fragmented_available;
size_t excess_fragmented_available;
_old_evacuation_reserve = _old_generation->get_evacuation_reserve();
_old_evacuation_budget = (size_t) ((double) _old_evacuation_reserve / ShenandoahOldEvacWaste);
if (unfragmented_available > old_evacuation_budget) {
unfragmented_available = old_evacuation_budget;
fragmented_available = 0;
excess_fragmented_available = 0;
// fragmented_available is the amount of memory within partially consumed old regions that may be required to
// hold the results of old evacuations. If all of the memory required by the old evacuation reserve is available
// in unfragmented regions (unaffiliated old regions), then fragmented_available is zero because we do not need
// to evacuate into the existing partially consumed old regions.
// if fragmented_available is non-zero, excess_fragmented_old_budget represents the amount of fragmented memory
// that is available within old, but is not required to hold the resuilts of old evacuation. As old-gen regions
// are added into the collection set, their free memory is subtracted from excess_fragmented_old_budget until the
// excess is exhausted. For old-gen regions subsequently added to the collection set, their free memory is
// subtracted from fragmented_available and from the old_evacuation_budget (since the budget decreases when this
// fragmented_available memory decreases). After fragmented_available has been exhausted, any further old regions
// selected for the cset do not further decrease the old_evacuation_budget because all further evacuation is targeted
// to unfragmented regions.
size_t unaffiliated_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
if (unaffiliated_available > _old_evacuation_reserve) {
_unspent_unfragmented_old_budget = _old_evacuation_budget;
_unspent_fragmented_old_budget = 0;
_excess_fragmented_old_budget = 0;
} else {
assert(_old_generation->available() >= old_evacuation_budget, "Cannot budget more than is available");
fragmented_available = _old_generation->available() - unfragmented_available;
assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
if (fragmented_available + unfragmented_available > old_evacuation_budget) {
excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
fragmented_available -= excess_fragmented_available;
assert(_old_generation->available() >= _old_evacuation_reserve, "Cannot reserve more than is available");
size_t affiliated_available = _old_generation->available() - unaffiliated_available;
assert(affiliated_available + unaffiliated_available >= _old_evacuation_reserve, "Budgets do not add up");
if (affiliated_available + unaffiliated_available > _old_evacuation_reserve) {
_excess_fragmented_old_budget = (affiliated_available + unaffiliated_available) - _old_evacuation_reserve;
affiliated_available -= _excess_fragmented_old_budget;
}
_unspent_fragmented_old_budget = (size_t) ((double) affiliated_available / ShenandoahOldEvacWaste);
_unspent_unfragmented_old_budget = (size_t) ((double) unaffiliated_available / ShenandoahOldEvacWaste);
}
size_t remaining_old_evacuation_budget = old_evacuation_budget;
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: %zu%s, candidates: %u",
byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
log_debug(gc)("Choose old regions for mixed collection: old evacuation budget: " PROPERFMT ", candidates: %u",
PROPERFMTARGS(_old_evacuation_budget),
unprocessed_old_collection_candidates());
size_t lost_evacuation_capacity = 0;
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
// evacuate region N, then there is no need to even consider evacuating region N+1.
while (unprocessed_old_collection_candidates() > 0) {
// Old collection candidates are sorted in order of decreasing garbage contained therein.
ShenandoahHeapRegion* r = next_old_collection_candidate();
if (r == nullptr) {
break;
}
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
// to decrease the capacity of the fragmented memory by the scaled loss.
const size_t live_data_for_evacuation = r->get_live_data_bytes();
size_t lost_available = r->free();
if ((lost_available > 0) && (excess_fragmented_available > 0)) {
if (lost_available < excess_fragmented_available) {
excess_fragmented_available -= lost_available;
lost_evacuation_capacity -= lost_available;
lost_available = 0;
} else {
lost_available -= excess_fragmented_available;
lost_evacuation_capacity -= excess_fragmented_available;
excess_fragmented_available = 0;
}
}
size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
if ((lost_available > 0) && (fragmented_available > 0)) {
if (scaled_loss + live_data_for_evacuation < fragmented_available) {
fragmented_available -= scaled_loss;
scaled_loss = 0;
} else {
// We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
// to decrement scaled_loss
}
}
if (scaled_loss > 0) {
// We were not able to account for the lost free memory within fragmented memory, so we need to take this
// allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free.
if (live_data_for_evacuation > unfragmented_available) {
// There is no room to evacuate this region or any that come after it in within the candidates array.
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
unfragmented_available, live_data_for_evacuation, r->index());
break;
} else {
unfragmented_available -= live_data_for_evacuation;
}
} else {
// Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
// fragmented or unfragmented available memory. Use up the fragmented memory budget first.
size_t evacuation_need = live_data_for_evacuation;
if (evacuation_need > fragmented_available) {
evacuation_need -= fragmented_available;
fragmented_available = 0;
} else {
fragmented_available -= evacuation_need;
evacuation_need = 0;
}
if (evacuation_need > unfragmented_available) {
// There is no room to evacuate this region or any that come after it in within the candidates array.
log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)",
unfragmented_available, live_data_for_evacuation, r->index());
break;
} else {
unfragmented_available -= evacuation_need;
// dead code: evacuation_need == 0;
}
}
collection_set->add_region(r);
included_old_regions++;
evacuated_old_bytes += live_data_for_evacuation;
collected_old_bytes += r->garbage();
consume_old_collection_candidate();
}
if (_first_pinned_candidate != NOT_FOUND) {
// Need to deal with pinned regions
slide_pinned_regions_to_front();
}
decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
if (included_old_regions > 0) {
log_info(gc, ergo)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " PROPERFMT ", reclaiming: " PROPERFMT ")",
included_old_regions, PROPERFMTARGS(evacuated_old_bytes), PROPERFMTARGS(collected_old_bytes));
}
if (unprocessed_old_collection_candidates() == 0) {
// We have added the last of our collection candidates to a mixed collection.
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
clear_triggers();
_old_generation->complete_mixed_evacuations();
} else if (included_old_regions == 0) {
// We have candidates, but none were included for evacuation - are they all pinned?
// or did we just not have enough room for any of them in this collection set?
// We don't want a region with a stuck pin to prevent subsequent old collections, so
// if they are all pinned we transition to a state that will allow us to make these uncollected
// (pinned) regions parsable.
if (all_candidates_are_pinned()) {
log_info(gc, ergo)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
_old_generation->abandon_mixed_evacuations();
} else {
log_info(gc, ergo)("No regions selected for mixed collection. "
"Old evacuation budget: " PROPERFMT ", Remaining evacuation budget: " PROPERFMT
", Lost capacity: " PROPERFMT
", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
PROPERFMTARGS(old_evacuation_reserve),
PROPERFMTARGS(remaining_old_evacuation_budget),
PROPERFMTARGS(lost_evacuation_capacity),
_next_old_collection_candidate, _last_old_collection_candidate);
}
}
return (included_old_regions > 0);
return add_old_regions_to_cset();
}
bool ShenandoahOldHeuristics::all_candidates_are_pinned() {
@ -328,6 +226,187 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
_next_old_collection_candidate = write_index + 1;
}
bool ShenandoahOldHeuristics::add_old_regions_to_cset() {
if (unprocessed_old_collection_candidates() == 0) {
return false;
}
_first_pinned_candidate = NOT_FOUND;
// The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
// concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
// Candidate regions are ordered according to increasing amount of live data. If there is not sufficient room to
// evacuate region N, then there is no need to even consider evacuating region N+1.
while (unprocessed_old_collection_candidates() > 0) {
// Old collection candidates are sorted in order of decreasing garbage contained therein.
ShenandoahHeapRegion* r = next_old_collection_candidate();
if (r == nullptr) {
break;
}
assert(r->is_regular(), "There should be no humongous regions in the set of mixed-evac candidates");
// If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
// to decrease the capacity of the fragmented memory by the scaled loss.
const size_t live_data_for_evacuation = r->get_live_data_bytes();
size_t lost_available = r->free();
ssize_t fragmented_delta = 0;
ssize_t unfragmented_delta = 0;
ssize_t excess_delta = 0;
// We must decrease our mixed-evacuation budgets proportional to the lost available memory. This memory that is no
// longer available was likely "promised" to promotions, so we must decrease our mixed evacuations now.
// (e.g. if we loose 14 bytes of available old memory, we must decrease the evacuation budget by 10 bytes.)
size_t scaled_loss = (size_t) (((double) lost_available) / ShenandoahOldEvacWaste);
if (lost_available > 0) {
// We need to subtract lost_available from our working evacuation budgets
if (scaled_loss < _excess_fragmented_old_budget) {
excess_delta -= scaled_loss;
_excess_fragmented_old_budget -= scaled_loss;
} else {
excess_delta -= _excess_fragmented_old_budget;
_excess_fragmented_old_budget = 0;
}
if (scaled_loss < _unspent_fragmented_old_budget) {
_unspent_fragmented_old_budget -= scaled_loss;
fragmented_delta = -scaled_loss;
scaled_loss = 0;
} else {
scaled_loss -= _unspent_fragmented_old_budget;
fragmented_delta = -_unspent_fragmented_old_budget;
_unspent_fragmented_old_budget = 0;
}
if (scaled_loss < _unspent_unfragmented_old_budget) {
_unspent_unfragmented_old_budget -= scaled_loss;
unfragmented_delta = -scaled_loss;
scaled_loss = 0;
} else {
scaled_loss -= _unspent_unfragmented_old_budget;
fragmented_delta = -_unspent_unfragmented_old_budget;
_unspent_unfragmented_old_budget = 0;
}
}
// Allocate replica from unfragmented memory if that exists
size_t evacuation_need = live_data_for_evacuation;
if (evacuation_need < _unspent_unfragmented_old_budget) {
_unspent_unfragmented_old_budget -= evacuation_need;
} else {
if (_unspent_unfragmented_old_budget > 0) {
evacuation_need -= _unspent_unfragmented_old_budget;
unfragmented_delta -= _unspent_unfragmented_old_budget;
_unspent_unfragmented_old_budget = 0;
}
// Take the remaining allocation out of fragmented available
if (_unspent_fragmented_old_budget > evacuation_need) {
_unspent_fragmented_old_budget -= evacuation_need;
} else {
// We cannot add this region into the collection set. We're done. Undo the adjustments to available.
_unspent_fragmented_old_budget -= fragmented_delta;
_unspent_unfragmented_old_budget -= unfragmented_delta;
_excess_fragmented_old_budget -= excess_delta;
break;
}
}
_mixed_evac_cset->add_region(r);
_included_old_regions++;
_evacuated_old_bytes += live_data_for_evacuation;
_collected_old_bytes += r->garbage();
consume_old_collection_candidate();
}
return true;
}
bool ShenandoahOldHeuristics::finalize_mixed_evacs() {
if (_first_pinned_candidate != NOT_FOUND) {
// Need to deal with pinned regions
slide_pinned_regions_to_front();
}
decrease_unprocessed_old_collection_candidates_live_memory(_evacuated_old_bytes);
if (_included_old_regions > 0) {
log_info(gc)("Old-gen mixed evac (%zu regions, evacuating %zu%s, reclaiming: %zu%s)",
_included_old_regions,
byte_size_in_proper_unit(_evacuated_old_bytes), proper_unit_for_byte_size(_evacuated_old_bytes),
byte_size_in_proper_unit(_collected_old_bytes), proper_unit_for_byte_size(_collected_old_bytes));
}
if (unprocessed_old_collection_candidates() == 0) {
// We have added the last of our collection candidates to a mixed collection.
// Any triggers that occurred during mixed evacuations may no longer be valid. They can retrigger if appropriate.
clear_triggers();
_old_generation->complete_mixed_evacuations();
} else if (_included_old_regions == 0) {
// We have candidates, but none were included for evacuation - are they all pinned?
// or did we just not have enough room for any of them in this collection set?
// We don't want a region with a stuck pin to prevent subsequent old collections, so
// if they are all pinned we transition to a state that will allow us to make these uncollected
// (pinned) regions parsable.
if (all_candidates_are_pinned()) {
log_info(gc)("All candidate regions " UINT32_FORMAT " are pinned", unprocessed_old_collection_candidates());
_old_generation->abandon_mixed_evacuations();
} else {
log_info(gc)("No regions selected for mixed collection. "
"Old evacuation budget: " PROPERFMT ", Next candidate: " UINT32_FORMAT ", Last candidate: " UINT32_FORMAT,
PROPERFMTARGS(_old_evacuation_reserve),
_next_old_collection_candidate, _last_old_collection_candidate);
}
}
return (_included_old_regions > 0);
}
bool ShenandoahOldHeuristics::top_off_collection_set(size_t &add_regions_to_old) {
if (unprocessed_old_collection_candidates() == 0) {
add_regions_to_old = 0;
return false;
} else {
ShenandoahYoungGeneration* young_generation = _heap->young_generation();
size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
size_t max_young_cset = young_generation->get_evacuation_reserve();
// We have budgeted to assure the live_bytes_in_tenurable_regions() get evacuated into old generation. Young reserves
// only for untenurable region evacuations.
size_t planned_young_evac = _mixed_evac_cset->get_live_bytes_in_untenurable_regions();
size_t consumed_from_young_cset = (size_t) (planned_young_evac * ShenandoahEvacWaste);
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t regions_required_for_collector_reserve = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
assert(consumed_from_young_cset <= max_young_cset, "sanity");
assert(max_young_cset <= young_unaffiliated_regions * region_size_bytes, "sanity");
size_t regions_for_old_expansion;
if (consumed_from_young_cset < max_young_cset) {
size_t excess_young_reserves = max_young_cset - consumed_from_young_cset;
// We can only transfer empty regions from young to old. Furthermore, we must be careful to assure that the young
// Collector reserve that remains after transfer is comprised entirely of empty (unaffiliated) regions.
size_t consumed_unaffiliated_regions = (consumed_from_young_cset + region_size_bytes - 1) / region_size_bytes;
size_t available_unaffiliated_regions = ((young_unaffiliated_regions > consumed_unaffiliated_regions)?
young_unaffiliated_regions - consumed_unaffiliated_regions: 0);
regions_for_old_expansion = MIN2(available_unaffiliated_regions, excess_young_reserves / region_size_bytes);
} else {
regions_for_old_expansion = 0;
}
if (regions_for_old_expansion > 0) {
log_info(gc)("Augmenting old-gen evacuation budget from unexpended young-generation reserve by %zu regions",
regions_for_old_expansion);
add_regions_to_old = regions_for_old_expansion;
size_t budget_supplement = region_size_bytes * regions_for_old_expansion;
size_t supplement_without_waste = (size_t) (((double) budget_supplement) / ShenandoahOldEvacWaste);
_old_evacuation_budget += supplement_without_waste;
_unspent_unfragmented_old_budget += supplement_without_waste;
_old_generation->augment_evacuation_reserve(budget_supplement);
young_generation->set_evacuation_reserve(max_young_cset - budget_supplement);
return add_old_regions_to_cset();
} else {
add_regions_to_old = 0;
return false;
}
}
}
void ShenandoahOldHeuristics::prepare_for_old_collections() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
@ -336,7 +415,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
size_t immediate_garbage = 0;
size_t immediate_regions = 0;
size_t live_data = 0;
RegionData* candidates = _region_data;
for (size_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* region = heap->get_region(i);
@ -355,10 +433,10 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
// else, regions that were promoted in place had 0 old live data at mark start
if (region->is_regular() || region->is_regular_pinned()) {
// Only place regular or pinned regions with live data into the candidate set.
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
// for the collection set here. That happens later during the next young GC cycle,
// by which time, the pinned region may no longer be pinned.
// Only place regular or pinned regions with live data into the candidate set.
// Pinned regions cannot be evacuated, but we are not actually choosing candidates
// for the collection set here. That happens later during the next young GC cycle,
// by which time, the pinned region may no longer be pinned.
if (!region->has_live()) {
assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
region->make_trash_immediate();
@ -561,6 +639,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
void ShenandoahOldHeuristics::abandon_collection_candidates() {
_last_old_collection_candidate = 0;
_next_old_collection_candidate = 0;
_live_bytes_in_unprocessed_candidates = 0;
_last_old_region = 0;
}
@ -805,8 +884,9 @@ bool ShenandoahOldHeuristics::is_experimental() {
return true;
}
void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
size_t ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
ShenandoahHeuristics::RegionData* data,
size_t data_size, size_t free) {
ShouldNotReachHere();
return 0;
}

View File

@ -102,6 +102,30 @@ private:
size_t _fragmentation_first_old_region;
size_t _fragmentation_last_old_region;
// State variables involved in construction of a mixed-evacuation collection set. These variables are initialized
// when client code invokes prime_collection_set(). They are consulted, and sometimes modified, when client code
// calls top_off_collection_set() to possibly expand the number of old-gen regions in a mixed evacuation cset, and by
// finalize_mixed_evacs(), which prepares the way for mixed evacuations to begin.
ShenandoahCollectionSet* _mixed_evac_cset;
size_t _evacuated_old_bytes;
size_t _collected_old_bytes;
size_t _included_old_regions;
size_t _old_evacuation_reserve;
size_t _old_evacuation_budget;
// This represents the amount of memory that can be evacuated from old into initially empty regions during a mixed evacuation.
// This is the total amount of unfragmented free memory in old divided by ShenandoahOldEvacWaste.
size_t _unspent_unfragmented_old_budget;
// This represents the amount of memory that can be evacuated from old into initially non-empty regions during a mixed
// evacuation. This is the total amount of initially fragmented free memory in old divided by ShenandoahOldEvacWaste.
size_t _unspent_fragmented_old_budget;
// If there is more available memory in old than is required by the intended mixed evacuation, the amount of excess
// memory is represented by _excess_fragmented_old. To convert this value into a promotion budget, multiply by
// ShenandoahOldEvacWaste and divide by ShenandoahPromoWaste.
size_t _excess_fragmented_old_budget;
// The value of command-line argument ShenandoahOldGarbageThreshold represents the percent of garbage that must
// be present within an old-generation region before that region is considered a good candidate for inclusion in
// the collection set under normal circumstances. For our purposes, normal circustances are when the memory consumed
@ -131,7 +155,15 @@ private:
void set_trigger_if_old_is_overgrown();
protected:
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
size_t
choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size, size_t free) override;
// This internal helper routine adds as many mixed evacuation candidate regions as fit within the old-gen evacuation budget
// to the collection set. This may be called twice to prepare for any given mixed evacuation cycle, the first time with
// a conservative old evacuation budget, and the second time with a larger more aggressive old evacuation budget. Returns
// true iff we need to finalize mixed evacs. (If no regions are added to the collection set, there is no need to finalize
// mixed evacuations.)
bool add_old_regions_to_cset();
public:
explicit ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahGenerationalHeap* gen_heap);
@ -139,8 +171,22 @@ public:
// Prepare for evacuation of old-gen regions by capturing the mark results of a recently completed concurrent mark pass.
void prepare_for_old_collections();
// Return true iff the collection set is primed with at least one old-gen region.
bool prime_collection_set(ShenandoahCollectionSet* set);
// Initialize instance variables to support the preparation of a mixed-evacuation collection set. Adds as many
// old candidate regions into the collection set as can fit within the iniital conservative old evacuation budget.
// Returns true iff we need to finalize mixed evacs.
bool prime_collection_set(ShenandoahCollectionSet* collection_set);
// If young evacuation did not consume all of its available evacuation reserve, add as many additional mixed-
// evacuation candidate regions into the collection set as will fit within this excess repurposed reserved.
// Returns true iff we need to finalize mixed evacs. Upon return, the var parameter regions_to_xfer holds the
// number of regions to transfer from young to old.
bool top_off_collection_set(size_t &add_regions_to_old);
// Having added all eligible mixed-evacuation candidates to the collection set, this function updates the total count
// of how much old-gen memory remains to be evacuated and adjusts the representation of old-gen regions that remain to
// be evacuated, giving special attention to regions that are currently pinned. It outputs relevant log messages and
// returns true iff the collection set holds at least one unpinned mixed evacuation candidate.
bool finalize_mixed_evacs();
// How many old-collection candidates have not yet been processed?
uint unprocessed_old_collection_candidates() const;

View File

@ -50,9 +50,9 @@ bool ShenandoahPassiveHeuristics::should_degenerate_cycle() {
return ShenandoahDegeneratedGC;
}
void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
size_t ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
assert(ShenandoahDegeneratedGC, "This path is only taken for Degenerated GC");
// Do not select too large CSet that would overflow the available free space.
@ -76,4 +76,5 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
cset->add_region(r);
}
}
return 0;
}

View File

@ -46,9 +46,9 @@ public:
virtual bool should_degenerate_cycle();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
RegionData* data, size_t data_size,
size_t free);
virtual const char* name() { return "Passive"; }
virtual bool is_diagnostic() { return true; }

View File

@ -59,9 +59,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() {
return ShenandoahHeuristics::should_start_gc();
}
void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free) {
size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
for (size_t idx = 0; idx < size; idx++) {
@ -70,4 +70,5 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
cset->add_region(r);
}
}
return 0;
}

View File

@ -40,9 +40,9 @@ public:
virtual bool should_start_gc();
virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t free);
virtual const char* name() { return "Static"; }
virtual bool is_diagnostic() { return false; }

View File

@ -33,11 +33,11 @@
#include "utilities/quickSort.hpp"
ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
: ShenandoahGenerationalHeuristics(generation) {
: ShenandoahGenerationalHeuristics(generation) {
}
void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
size_t ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) {
// See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
@ -48,6 +48,8 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
// array before younger regions that typically contain more garbage. This is one reason why,
// for example, we continue examining regions even after rejecting a region that has
// more live data than we can evacuate.
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
bool need_to_finalize_mixed = heap->old_generation()->heuristics()->prime_collection_set(cset);
// Better select garbage-first regions
QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage);
@ -55,6 +57,17 @@ void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(Shenandoah
size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
// Especially when young-gen trigger is expedited in order to finish mixed evacuations, there may not be
// enough consolidated garbage to make effective use of young-gen evacuation reserve. If there is still
// young-gen reserve available following selection of the young-gen collection set, see if we can use
// this memory to expand the old-gen evacuation collection set.
size_t add_regions_to_old;
need_to_finalize_mixed |= heap->old_generation()->heuristics()->top_off_collection_set(add_regions_to_old);
if (need_to_finalize_mixed) {
heap->old_generation()->heuristics()->finalize_mixed_evacs();
}
return add_regions_to_old;
}
void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,

View File

@ -38,9 +38,9 @@ public:
explicit ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation);
void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
size_t choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
RegionData* data, size_t size,
size_t actual_free) override;
bool should_start_gc() override;

View File

@ -50,6 +50,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
_region_count(0),
_old_garbage(0),
_preselected_regions(nullptr),
_young_available_bytes_collected(0),
_old_available_bytes_collected(0),
_current_index(0) {
// The collection set map is reserved to cover the entire heap *and* zero addresses.
@ -104,6 +106,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
}
} else if (r->is_old()) {
_old_bytes_to_evacuate += live;
_old_available_bytes_collected += free;
_old_garbage += garbage;
}
@ -140,6 +143,7 @@ void ShenandoahCollectionSet::clear() {
_old_bytes_to_evacuate = 0;
_young_available_bytes_collected = 0;
_old_available_bytes_collected = 0;
_has_old_regions = false;
}

View File

@ -75,6 +75,10 @@ private:
// should be subtracted from what's available.
size_t _young_available_bytes_collected;
// When a region having memory available to be allocated is added to the collection set, the region's available memory
// should be subtracted from what's available.
size_t _old_available_bytes_collected;
shenandoah_padding(0);
volatile size_t _current_index;
shenandoah_padding(1);
@ -121,6 +125,9 @@ public:
// Returns the amount of free bytes in young regions in the collection set.
size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; }
// Returns the amount of free bytes in old regions in the collection set.
size_t get_old_available_bytes_collected() const { return _old_available_bytes_collected; }
// Returns the amount of garbage in old regions in the collection set.
inline size_t get_old_garbage() const;

View File

@ -204,9 +204,8 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
return false;
}
entry_concurrent_update_refs_prepare(heap);
// Perform update-refs phase.
entry_concurrent_update_refs_prepare(heap);
if (ShenandoahVerify) {
vmop_entry_init_update_refs();
}
@ -227,6 +226,7 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
// Update references freed up collection set, kick the cleanup to reclaim the space.
entry_cleanup_complete();
} else {
_abbreviated = true;
if (!entry_final_roots()) {
assert(_degen_point != _degenerated_unset, "Need to know where to start degenerated cycle");
return false;
@ -235,7 +235,6 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
if (VerifyAfterGC) {
vmop_entry_verify_final_roots();
}
_abbreviated = true;
}
// We defer generation resizing actions until after cset regions have been recycled. We do this even following an
@ -282,7 +281,6 @@ bool ShenandoahConcurrentGC::complete_abbreviated_cycle() {
return true;
}
void ShenandoahConcurrentGC::vmop_entry_init_mark() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
TraceCollectorStats tcs(heap->monitoring_support()->stw_collection_counters());
@ -536,6 +534,12 @@ void ShenandoahConcurrentGC::entry_cleanup_early() {
// This phase does not use workers, no need for setup
heap->try_inject_alloc_failure();
op_cleanup_early();
if (!heap->is_evacuation_in_progress()) {
// This is an abbreviated cycle. Rebuild the freeset in order to establish reserves for the next GC cycle. Doing
// the rebuild ASAP also expedites availability of immediate trash, reducing the likelihood that we will degenerate
// during promote-in-place processing.
heap->rebuild_free_set(true /*concurrent*/);
}
}
void ShenandoahConcurrentGC::entry_evacuate() {

View File

@ -326,7 +326,7 @@ void ShenandoahRegionPartitions::initialize_old_collector() {
}
void ShenandoahRegionPartitions::make_all_regions_unavailable() {
shenandoah_assert_heaplocked();
shenandoah_assert_heaplocked_or_safepoint();
for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
_membership[partition_id].clear_all();
_leftmosts[partition_id] = _max;
@ -439,6 +439,13 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh
_available[int(which_partition)] = value - _used[int(which_partition)];
}
void ShenandoahRegionPartitions::set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
shenandoah_assert_heaplocked();
assert (which_partition < NumPartitions, "selected free set must be valid");
_used[int(which_partition)] = value;
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
}
void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
shenandoah_assert_heaplocked();
@ -900,7 +907,7 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w
#ifdef ASSERT
void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
void ShenandoahRegionPartitions::assert_bounds() {
size_t capacities[UIntNumPartitions];
size_t used[UIntNumPartitions];
@ -936,7 +943,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
switch (partition) {
case ShenandoahFreeSetPartitionId::NotFree:
{
assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty");
assert(capacity != _region_size_bytes, "Should not be retired if empty");
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
if (r->is_humongous()) {
if (r->is_old()) {
@ -976,12 +983,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
case ShenandoahFreeSetPartitionId::Collector:
case ShenandoahFreeSetPartitionId::OldCollector:
{
ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i);
assert(capacity > 0, "free regions must have allocation capacity");
bool is_empty = (capacity == _region_size_bytes);
regions[int(partition)]++;
used[int(partition)] += _region_size_bytes - capacity;
capacities[int(partition)] += _region_size_bytes;
if (i < leftmosts[int(partition)]) {
leftmosts[int(partition)] = i;
}
@ -1020,20 +1027,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free regions before the leftmost: %zd, bound %zd",
"Mutator free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free regions past the rightmost: %zd, bound %zd",
"Mutator free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
"Mutator free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
"free empty region (%zd) before the leftmost bound %zd",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
"free empty region (%zd) past the rightmost bound %zd",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: %zd < %zd",
@ -1053,20 +1060,20 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
"Collector free regions before the leftmost: %zd, bound %zd",
"Collector free region before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
"Collector free regions past the rightmost: %zd, bound %zd",
"Collector free region past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
"Collector free empty region before the leftmost: %zd, bound %zd",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
"Collector free empty region past the rightmost: %zd, bound %zd",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
// Performance invariants. Failing these would not break the free partition, but performance would suffer.
assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd",
@ -1083,106 +1090,109 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) {
ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector));
// Concurrent recycling of trash recycles a region (changing its state from is_trash to is_empty without the heap lock),
// If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
// Likewise for empty region partitions.
beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector free regions before the leftmost: %zd, bound %zd",
assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions before the leftmost: %zd, bound %zd",
beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
"OldCollector free regions past the rightmost: %zd, bound %zd",
assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), "free regions past the rightmost: %zd, bound %zd",
end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
"OldCollector free empty regions before the leftmost: %zd, bound %zd",
beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
"free empty region (%zd) before the leftmost bound %zd, region %s trash",
beg_off, _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
((beg_off >= _max)? "out of bounds is not":
(ShenandoahHeap::heap()->get_region(_leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
"is": "is not")));
assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
"OldCollector free empty regions past the rightmost: %zd, bound %zd",
end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
"free empty region (%zd) past the rightmost bound %zd, region %s trash",
end_off, _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
((end_off < 0)? "out of bounds is not" :
(ShenandoahHeap::heap()->get_region(_rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)])->is_trash()?
"is": "is not")));
if (validate_totals) {
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
// to the Mutator partition to top it off so that it matches the running totals.
//
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
// should also match running totals.
// young_retired_regions need to be added to either Mutator or Collector partitions, 100% used.
// Give enough of young_retired_regions, young_retired_capacity, young_retired_user
// to the Mutator partition to top it off so that it matches the running totals.
//
// Give any remnants to the Collector partition. After topping off the Collector partition, its values
// should also match running totals.
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
assert(young_retired_capacity == young_retired_used, "sanity");
assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity");
assert(young_retired_capacity == young_retired_used, "sanity");
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match (%zu != %zu)",
capacities[int(ShenandoahFreeSetPartitionId::OldCollector)],
_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]);
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
"Old Collector available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
"Capacity total must be >= counted tally");
size_t mutator_capacity_shortfall =
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
young_retired_capacity -= mutator_capacity_shortfall;
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)]
== _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] / _region_size_bytes, "Old collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]
>= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]),
"Old Collector available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] ==
humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match");
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Used total must be >= counted tally");
size_t mutator_used_shortfall =
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_used_shortfall <= young_retired_used, "sanity");
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
young_retired_used -= mutator_used_shortfall;
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)],
"Capacity total must be >= counted tally");
size_t mutator_capacity_shortfall =
_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity");
capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall;
young_retired_capacity -= mutator_capacity_shortfall;
capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
young_retired_regions -= mutator_regions_shortfall;
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector Capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
"Collector Available must equal capacity minus used");
assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Used total must be >= counted tally");
size_t mutator_used_shortfall =
_used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)];
assert(mutator_used_shortfall <= young_retired_used, "sanity");
used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall;
young_retired_used -= mutator_used_shortfall;
used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used;
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
>= regions[int(ShenandoahFreeSetPartitionId::Mutator)], "Region total must be >= counted tally");
size_t mutator_regions_shortfall = (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes
- regions[int(ShenandoahFreeSetPartitionId::Mutator)]);
assert(mutator_regions_shortfall <= young_retired_regions, "sanity");
regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall;
young_retired_regions -= mutator_regions_shortfall;
regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions;
assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Collector)]
== _capacity[int(ShenandoahFreeSetPartitionId::Collector)] / _region_size_bytes, "Collector regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)],
"Collector Capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]),
"Collector Available must equal capacity minus used");
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
"Mutator available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
"Mutator humongous waste must match");
}
assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacities must match");
assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator used must match");
assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)]
== _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] / _region_size_bytes, "Mutator regions must match");
assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)],
"Mutator capacity must be >= used");
assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] ==
(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]),
"Mutator available must equal capacity minus used");
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
"Mutator humongous waste must match");
}
#endif
@ -1206,6 +1216,36 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
clear_internal();
}
void ShenandoahFreeSet::move_unaffiliated_regions_from_collector_to_old_collector(ssize_t count) {
shenandoah_assert_heaplocked();
size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
size_t old_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector);
size_t collector_capacity = _partitions.get_capacity(ShenandoahFreeSetPartitionId::Collector);
if (count > 0) {
size_t ucount = count;
size_t bytes_moved = ucount * region_size_bytes;
assert(collector_capacity >= bytes_moved, "Cannot transfer");
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector) >= ucount,
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector));
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity - bytes_moved);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity + bytes_moved);
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
} else if (count < 0) {
size_t ucount = -count;
size_t bytes_moved = ucount * region_size_bytes;
assert(old_capacity >= bytes_moved, "Cannot transfer");
assert(_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector) >= ucount,
"Cannot transfer %zu of %zu", ucount, _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector));
_partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, ucount);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::OldCollector, old_capacity - bytes_moved);
_partitions.set_capacity_of(ShenandoahFreeSetPartitionId::Collector, collector_capacity + bytes_moved);
_partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, ucount);
}
// else, do nothing
}
// was pip_pad_bytes
void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
shenandoah_assert_heaplocked();
@ -1261,7 +1301,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
}
template<typename Iter>
@ -1496,9 +1536,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
return nullptr;
}
HeapWord* result = nullptr;
// We must call try_recycle_under_lock() even if !r->is_trash(). The reason is that if r is being recycled at this
// moment by a GC worker thread, it may appear to be not trash even though it has not yet been fully recycled. If
// we proceed without waiting for the worker to finish recycling the region, the worker thread may overwrite the
// region's affiliation with FREE after we set the region's affiliation to req.afiliation() below
r->try_recycle_under_lock();
in_new_region = r->is_empty();
if (in_new_region) {
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
r->index(), req.type_string(), p2i(&req));
@ -1668,7 +1711,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
default:
assert(false, "won't happen");
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return result;
}
@ -1799,6 +1842,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
increase_bytes_allocated(waste_bytes);
}
}
_partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used);
increase_bytes_allocated(total_used);
req.set_actual_size(words_size);
@ -1819,14 +1863,16 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return _heap->get_region(beg)->bottom();
}
class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
public:
void heap_region_do(ShenandoahHeapRegion* r) {
r->try_recycle();
if (r->is_trash()) {
r->try_recycle();
}
}
bool is_thread_safe() {
@ -1861,7 +1907,7 @@ bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return true;
} else {
return false;
@ -1914,7 +1960,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// 4. Do not adjust capacities for generations, we just swapped the regions that have already
// been accounted for. However, we should adjust the evacuation reserves as those may have changed.
shenandoah_assert_heaplocked();
@ -1945,7 +1991,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ false,
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
// to recycle trash before attempting to allocate anything in the region.
}
@ -2025,16 +2071,23 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
for (size_t idx = 0; idx < num_regions; idx++) {
ShenandoahHeapRegion* region = _heap->get_region(idx);
if (region->is_trash()) {
// Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection
// partition but have not yet been "cleaned up" following update refs.
// Trashed regions represent regions that had been in the collection set (or may have been identified as immediate garbage)
// but have not yet been "cleaned up". The cset regions are not "trashed" until we have finished update refs.
if (region->is_old()) {
// We're going to place this region into the Mutator set. We increment old_trashed_regions because this count represents
// regions that the old generation is entitled to without any transfer from young. We do not place this region into
// the OldCollector partition at this time. Instead, we let reserve_regions() decide whether to place this region
// into the OldCollector partition. Deferring the decision allows reserve_regions() to more effectively pack the
// OldCollector regions into high-address memory. We do not adjust capacities of old and young generations at this
// time. At the end of finish_rebuild(), the capacities are adjusted based on the results of reserve_regions().
old_trashed_regions++;
} else {
assert(region->is_young(), "Trashed region should be old or young");
young_trashed_regions++;
}
} else if (region->is_old()) {
// count both humongous and regular regions, but don't count trash (cset) regions.
// We count humongous and regular regions as "old regions". We do not count trashed regions that are old. Those
// are counted (above) as old_trashed_regions.
old_region_count++;
if (first_old_region > idx) {
first_old_region = idx;
@ -2048,7 +2101,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
size_t ac = alloc_capacity(region);
if (ac >= PLAB::min_size() * HeapWordSize) {
if (region->is_trash() || !region->is_old()) {
// Both young and old collected regions (trashed) are placed into the Mutator set
// Both young and old (possibly immediately) collected regions (trashed) are placed into the Mutator set
_partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
if (idx < mutator_leftmost) {
mutator_leftmost = idx;
@ -2111,10 +2164,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired");
size_t humongous_waste_bytes = 0;
if (region->is_humongous_start()) {
oop obj = cast_to_oop(region->bottom());
size_t byte_size = obj->size() * HeapWordSize;
size_t region_span = ShenandoahHeapRegion::required_regions(byte_size);
humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size;
// Since rebuild does not necessarily happen at a safepoint, a newly allocated humongous object may not have been
// fully initialized. Therefore, we cannot safely consult its header.
ShenandoahHeapRegion* last_of_humongous_continuation = region;
size_t next_idx;
for (next_idx = idx + 1; next_idx < num_regions; next_idx++) {
ShenandoahHeapRegion* humongous_cont_candidate = _heap->get_region(next_idx);
if (!humongous_cont_candidate->is_humongous_continuation()) {
break;
}
last_of_humongous_continuation = humongous_cont_candidate;
}
// For humongous regions, used() is established while holding the global heap lock so it is reliable here
humongous_waste_bytes = ShenandoahHeapRegion::region_size_bytes() - last_of_humongous_continuation->used();
}
if (region->is_old()) {
old_collector_used += region_size_bytes;
@ -2183,7 +2245,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
#ifdef ASSERT
if (_heap->mode()->is_generational()) {
assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity");
@ -2221,7 +2283,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector
/* CollectorSizeChanged */ false, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
// global_used is unaffected by this transfer
// No need to adjust ranges because humongous regions are not allocatable
@ -2303,7 +2365,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
}
// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
@ -2370,7 +2432,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return transferred_regions;
}
@ -2445,7 +2507,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa
/* AffiliatedChangesAreYoungNeutral */ true, /* AffiliatedChangesAreGlobalNeutral */ true,
/* UnaffiliatedChangesAreYoungNeutral */ true>();
}
_partitions.assert_bounds(true);
_partitions.assert_bounds();
return transferred_regions;
}
@ -2507,14 +2569,13 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t
first_old_region, last_old_region, old_region_count);
}
void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count,
bool have_evacuation_reserves) {
void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count) {
shenandoah_assert_heaplocked();
size_t young_reserve(0), old_reserve(0);
if (_heap->mode()->is_generational()) {
compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves,
young_reserve, old_reserve);
compute_young_and_old_reserves(young_cset_regions, old_cset_regions, young_reserve, old_reserve);
} else {
young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
old_reserve = 0;
@ -2531,8 +2592,41 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
// Release the rebuild lock now. What remains in this function is read-only
rebuild_lock()->unlock();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
log_status();
if (_heap->mode()->is_generational()) {
// Clear the region balance until it is adjusted in preparation for a subsequent GC cycle.
_heap->old_generation()->set_region_balance(0);
}
}
// Reduce old reserve (when there are insufficient resources to satisfy the original request).
void ShenandoahFreeSet::reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve) {
ShenandoahOldGeneration* const old_generation = _heap->old_generation();
size_t requested_promoted_reserve = old_generation->get_promoted_reserve();
size_t requested_old_evac_reserve = old_generation->get_evacuation_reserve();
assert(adjusted_old_reserve < requested_old_reserve, "Only allow reduction");
assert(requested_promoted_reserve + requested_old_evac_reserve >= adjusted_old_reserve, "Sanity");
size_t delta = requested_old_reserve - adjusted_old_reserve;
if (requested_promoted_reserve >= delta) {
requested_promoted_reserve -= delta;
old_generation->set_promoted_reserve(requested_promoted_reserve);
} else {
delta -= requested_promoted_reserve;
requested_promoted_reserve = 0;
requested_old_evac_reserve -= delta;
old_generation->set_promoted_reserve(requested_promoted_reserve);
old_generation->set_evacuation_reserve(requested_old_evac_reserve);
}
}
// Reduce young reserve (when there are insufficient resources to satisfy the original request).
void ShenandoahFreeSet::reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve) {
ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
assert(adjusted_young_reserve < requested_young_reserve, "Only allow reduction");
young_generation->set_evacuation_reserve(adjusted_young_reserve);
}
/**
@ -2549,7 +2643,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
* this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance().
*/
void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions,
bool have_evacuation_reserves,
size_t& young_reserve_result, size_t& old_reserve_result) const {
shenandoah_assert_generational();
shenandoah_assert_heaplocked();
@ -2566,6 +2659,15 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
old_available += old_trashed_regions * region_size_bytes;
young_unaffiliated_regions += young_trashed_regions;
assert(young_capacity >= young_generation->used(),
"Young capacity (%zu) must exceed used (%zu)", young_capacity, young_generation->used());
size_t young_available = young_capacity - young_generation->used();
young_available += young_trashed_regions * region_size_bytes;
assert(young_available >= young_unaffiliated_regions * region_size_bytes, "sanity");
assert(old_available >= old_unaffiliated_regions * region_size_bytes, "sanity");
// Consult old-region balance to make adjustments to current generation capacities and availability.
// The generation region transfers take place after we rebuild. old_region_balance represents number of regions
// to transfer from old to young.
@ -2585,6 +2687,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
old_available -= xfer_bytes;
old_unaffiliated_regions -= old_region_balance;
young_available += xfer_bytes;
young_capacity += xfer_bytes;
young_unaffiliated_regions += old_region_balance;
}
@ -2593,41 +2696,22 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi
// promotions and evacuations. The partition between which old memory is reserved for evacuation and
// which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
// each PLAB's available memory.
if (have_evacuation_reserves) {
// We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
const size_t promoted_reserve = old_generation->get_promoted_reserve();
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
young_reserve_result = young_generation->get_evacuation_reserve();
old_reserve_result = promoted_reserve + old_evac_reserve;
if (old_reserve_result > old_available) {
// Try to transfer memory from young to old.
size_t old_deficit = old_reserve_result - old_available;
size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
if (young_unaffiliated_regions < old_region_deficit) {
old_region_deficit = young_unaffiliated_regions;
}
young_unaffiliated_regions -= old_region_deficit;
old_unaffiliated_regions += old_region_deficit;
old_region_balance -= old_region_deficit;
old_generation->set_region_balance(old_region_balance);
}
} else {
// We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
// The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
// Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
// unaffiliated regions.
old_reserve_result = old_available;
}
const size_t promoted_reserve = old_generation->get_promoted_reserve();
const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
young_reserve_result = young_generation->get_evacuation_reserve();
old_reserve_result = promoted_reserve + old_evac_reserve;
assert(old_reserve_result + young_reserve_result <= old_available + young_available,
"Cannot reserve (%zu + %zu + %zu) more than is available: %zu + %zu",
promoted_reserve, old_evac_reserve, young_reserve_result, old_available, young_available);
// Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
// free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
// the reserve downward to account for this possibility. This loss is part of the reason why the original budget
// was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
if (old_reserve_result >
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
old_reserve_result =
_partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
_partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
}
if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
@ -2791,19 +2875,17 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
ShenandoahFreeSetPartitionId p = _partitions.membership(idx);
size_t ac = alloc_capacity(r);
assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions");
if (p == ShenandoahFreeSetPartitionId::Collector) {
if (ac != region_size_bytes) {
young_used_regions++;
young_used_bytes = region_size_bytes - ac;
}
// else, unaffiliated region has no used
} else if (p == ShenandoahFreeSetPartitionId::OldCollector) {
if (ac != region_size_bytes) {
old_used_regions++;
old_used_bytes = region_size_bytes - ac;
}
// else, unaffiliated region has no used
} else if (p == ShenandoahFreeSetPartitionId::NotFree) {
assert(p != ShenandoahFreeSetPartitionId::Collector, "Collector regions must be converted from Mutator regions");
if (p == ShenandoahFreeSetPartitionId::OldCollector) {
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
old_used_regions++;
old_used_bytes = region_size_bytes - ac;
// This region is within the range for OldCollector partition, as established by find_regions_with_alloc_capacity()
assert((_partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= idx) &&
(_partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector) >= idx),
"find_regions_with_alloc_capacity() should have established this is in range");
} else {
assert(p == ShenandoahFreeSetPartitionId::NotFree, "sanity");
// This region has been retired
if (r->is_old()) {
old_used_regions++;
@ -2813,21 +2895,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
young_used_regions++;
young_used_bytes += region_size_bytes - ac;
}
} else {
assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector");
assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions");
if (idx < old_collector_low_idx) {
old_collector_low_idx = idx;
}
if (idx > old_collector_high_idx) {
old_collector_high_idx = idx;
}
if (idx < old_collector_empty_low_idx) {
old_collector_empty_low_idx = idx;
}
if (idx > old_collector_empty_high_idx) {
old_collector_empty_high_idx = idx;
}
}
}
}
@ -2856,14 +2923,14 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector);
}
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector,
collector_low_idx, collector_high_idx,
collector_empty_low_idx, collector_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Collector,
collector_low_idx, collector_high_idx, collector_empty_low_idx, collector_empty_high_idx);
_partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector,
old_collector_low_idx, old_collector_high_idx,
old_collector_empty_low_idx, old_collector_empty_high_idx);
_partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator,
mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx);
recompute_total_used</* UsedByMutatorChanged */ true,
/* UsedByCollectorChanged */ true, /* UsedByOldCollectorChanged */ true>();
@ -2872,17 +2939,22 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old
/* CollectorSizeChanged */ true, /* OldCollectorSizeChanged */ true,
/* AffiliatedChangesAreYoungNeutral */ false, /* AffiliatedChangesAreGlobalNeutral */ false,
/* UnaffiliatedChangesAreYoungNeutral */ false>();
_partitions.assert_bounds(true);
_partitions.assert_bounds();
if (LogTarget(Info, gc, free)::is_enabled()) {
size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
if (old_reserve < to_reserve_old) {
log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
assert(_heap->mode()->is_generational(), "to_old_reserve > 0 implies generational mode");
reduce_old_reserve(old_reserve, to_reserve_old);
}
size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
if (reserve < to_reserve) {
if (_heap->mode()->is_generational()) {
reduce_young_reserve(reserve, to_reserve);
}
log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
}
}
}

View File

@ -224,6 +224,10 @@ public:
void transfer_used_capacity_from_to(ShenandoahFreeSetPartitionId from_partition, ShenandoahFreeSetPartitionId to_partition,
size_t regions);
// For recycled region r in the OldCollector partition but possibly not within the interval for empty OldCollector regions,
// expand the empty interval to include this region.
inline void adjust_interval_for_recycled_old_region_under_lock(ShenandoahHeapRegion* r);
const char* partition_membership_name(idx_t idx) const;
// Return the index of the next available region >= start_index, or maximum_regions if not found.
@ -373,12 +377,7 @@ public:
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
shenandoah_assert_heaplocked();
assert (which_partition < NumPartitions, "selected free set must be valid");
_used[int(which_partition)] = value;
_available[int(which_partition)] = _capacity[int(which_partition)] - value;
}
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value);
inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
@ -402,7 +401,7 @@ public:
// idx >= leftmost &&
// idx <= rightmost
// }
void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN;
void assert_bounds() NOT_DEBUG_RETURN;
};
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),
@ -634,7 +633,11 @@ private:
void establish_old_collector_alloc_bias();
size_t get_usable_free_words(size_t free_bytes) const;
void reduce_young_reserve(size_t adjusted_young_reserve, size_t requested_young_reserve);
void reduce_old_reserve(size_t adjusted_old_reserve, size_t requested_old_reserve);
void log_freeset_stats(ShenandoahFreeSetPartitionId partition_id, LogStream& ls);
// log status, assuming lock has already been acquired by the caller.
void log_status();
@ -685,35 +688,46 @@ public:
return _total_global_used;
}
size_t global_unaffiliated_regions() {
// A negative argument results in moving from old_collector to collector
void move_unaffiliated_regions_from_collector_to_old_collector(ssize_t regions);
inline size_t global_unaffiliated_regions() {
return _global_unaffiliated_regions;
}
size_t young_unaffiliated_regions() {
inline size_t young_unaffiliated_regions() {
return _young_unaffiliated_regions;
}
size_t old_unaffiliated_regions() {
inline size_t collector_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector);
}
inline size_t old_collector_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
}
size_t young_affiliated_regions() {
inline size_t old_unaffiliated_regions() {
return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector);
}
inline size_t young_affiliated_regions() {
return _young_affiliated_regions;
}
size_t old_affiliated_regions() {
inline size_t old_affiliated_regions() {
return _old_affiliated_regions;
}
size_t global_affiliated_regions() {
inline size_t global_affiliated_regions() {
return _global_affiliated_regions;
}
size_t total_young_regions() {
inline size_t total_young_regions() {
return _total_young_regions;
}
size_t total_old_regions() {
inline size_t total_old_regions() {
return _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector) / ShenandoahHeapRegion::region_size_bytes();
}
@ -725,36 +739,27 @@ public:
// Examine the existing free set representation, capturing the current state into var arguments:
//
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
// old_trashed_regions is the number of trashed regions
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// first_old_region is the index of the first region that is part of the OldCollector set
// last_old_region is the index of the last region that is part of the OldCollector set
// old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
void prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
// At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
// hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
// These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
// the free set (ShenandoahFreeSet) in preparation for evacuation. When the free set is rebuilt, we make sure
// to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
// hold the results of evacuating to young-gen and to old-gen. These quantities, stored in reserves for their
// respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for
// evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and
// old_collector sets to hold evacuations. Likewise, at the end of update refs, we rebuild the free set in order
// to set aside reserves to be consumed during the next GC cycle.
//
// We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger. In this case,
// have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
// next GC cycle. When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
// and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
// ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve for old_collector set when the
// evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
// found during the previous GC pass which is one less than the current tenure age.
//
// young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
// old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
// young_trashed_regions is the number of trashed regions (immediate garbage at final mark, cset regions after update refs)
// old_trashed_regions is the number of trashed regions
// (immediate garbage at final old mark, cset regions after update refs for mixed evac)
// num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
// have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
// promotion reserve have been precomputed (and can be obtained by invoking
// <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
bool have_evacuation_reserves = false);
void finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t num_old_regions);
// When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
// into the old collector partition by invoking this method.
@ -806,9 +811,18 @@ public:
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
}
// Use this version of available() if the heap lock is held.
inline size_t available_locked() const {
return _partitions.available_in(ShenandoahFreeSetPartitionId::Mutator);
}
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); }
inline size_t humongous_waste_in_mutator() const {
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator);
}
inline size_t humongous_waste_in_old() const {
return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector);
}
void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste);
@ -874,7 +888,7 @@ public:
// Reserve space for evacuations, with regions reserved for old evacuations placed to the right
// of regions reserved of young evacuations.
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
size_t &young_reserve_result, size_t &old_reserve_result) const;
};

View File

@ -522,6 +522,7 @@ public:
void heap_region_do(ShenandoahHeapRegion* r) override {
if (r->is_trash()) {
r->try_recycle_under_lock();
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
}
if (r->is_cset()) {
// Leave affiliation unchanged
@ -966,6 +967,7 @@ public:
if (r->is_trash()) {
live = 0;
r->try_recycle_under_lock();
// No need to adjust_interval_for_recycled_old_region. That will be taken care of during freeset rebuild.
} else {
if (r->is_old()) {
ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
@ -1113,16 +1115,16 @@ void ShenandoahFullGC::phase5_epilog() {
ShenandoahPostCompactClosure post_compact;
heap->heap_region_iterate(&post_compact);
heap->collection_set()->clear();
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
ShenandoahFreeSet* free_set = heap->free_set();
{
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
ShenandoahFreeSet* free_set = heap->free_set();
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
// We also do not expand old generation size following Full GC because we have scrambled age populations and
// no longer have objects separated by age into distinct regions.
if (heap->mode()->is_generational()) {
ShenandoahGenerationalFullGC::compute_balances();
}
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
_generation->set_mark_incomplete();

View File

@ -250,6 +250,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
ShenandoahOldGeneration* const old_generation = heap->old_generation();
ShenandoahYoungGeneration* const young_generation = heap->young_generation();
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
// During initialization and phase changes, it is more likely that fewer objects die young and old-gen
// memory is not yet full (or is in the process of being replaced). During these times especially, it
@ -263,15 +264,15 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// First priority is to reclaim the easy garbage out of young-gen.
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve());
// maximum_young_evacuation_reserve is upper bound on memory to be evacuated into young Collector Reserve. This is
// bounded at the end of previous GC cycle, based on available memory and balancing of evacuation to old and young.
size_t maximum_young_evacuation_reserve = young_generation->get_evacuation_reserve();
// maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted),
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacRatioPercent,
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@ -283,12 +284,14 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t old_available = old_generation->available();
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacRatioPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent) / (100 - ShenandoahOldEvacRatioPercent),
const size_t maximum_old_evacuation_reserve = (ShenandoahOldEvacPercent == 100) ?
old_available : MIN2((maximum_young_evacuation_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
old_available);
// In some cases, maximum_old_reserve < old_available (when limited by ShenandoahOldEvacPercent)
// This limit affects mixed evacuations, but does not affect promotions.
// Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates. Third priority
// is to promote as much as we have room to promote. However, if old-gen memory is in short supply, this means young
@ -305,10 +308,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// evacuation and update-refs, we give emphasis to reclaiming garbage first, wherever that garbage is found.
// Global GC will adjust generation sizes to accommodate the collection set it chooses.
// Set old_promo_reserve to enforce that no regions are preselected for promotion. Such regions typically
// have relatively high memory utilization. We still call select_aged_regions() because this will prepare for
// promotions in place, if relevant.
old_promo_reserve = 0;
// Use remnant of old_available to hold promotions.
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
// Dedicate all available old memory to old_evacuation reserve. This may be small, because old-gen is only
// expanded based on an existing mixed evacuation workload at the end of the previous GC cycle. We'll expand
@ -319,43 +320,48 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote. Prioritize compaction
// over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
old_evacuation_reserve = maximum_old_evacuation_reserve;
old_promo_reserve = 0;
old_promo_reserve = old_available - maximum_old_evacuation_reserve;
} else {
// Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
old_evacuation_reserve = 0;
old_evacuation_reserve = old_available - maximum_old_evacuation_reserve;
old_promo_reserve = maximum_old_evacuation_reserve;
}
assert(old_evacuation_reserve <= old_available, "Error");
// We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
// So we limit the old-evacuation reserve to unfragmented memory. Even so, old-evacuation is free to fill in nooks and
// crannies within existing partially used regions and it generally tries to do so.
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
if (old_evacuation_reserve > old_free_unfragmented) {
const size_t delta = old_evacuation_reserve - old_free_unfragmented;
old_evacuation_reserve -= delta;
// Let promo consume fragments of old-gen memory if not global
if (!is_global()) {
old_promo_reserve += delta;
}
// Let promo consume fragments of old-gen memory
old_promo_reserve += delta;
}
// Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
// and identify regions that will promote in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
// If is_global(), we let garbage-first heuristic determine cset membership. Otherwise, we give priority
// to tenurable regions by preselecting regions for promotion by evacuation (obtaining the live data to seed promoted_reserve).
// This also identifies regions that will be promoted in place. These use the tenuring threshold.
const size_t consumed_by_advance_promotion = select_aged_regions(is_global()? 0: old_promo_reserve);
assert(consumed_by_advance_promotion <= old_promo_reserve, "Do not promote more than budgeted");
// The young evacuation reserve can be no larger than young_unaffiliated. Planning to evacuate into partially consumed
// young regions is doomed to failure if any of those partially consumed regions is selected for the collection set.
size_t young_unaffiliated = young_generation->free_unaffiliated_regions() * region_size_bytes;
// If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator
// and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect
// young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute.
young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve());
size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_unaffiliated);
// Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this
// to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood
// of old evacuation failure.
// of old evacuation failure. Leave this memory in the promoted reserve as it may be targeted by opportunistic
// promotions (found during evacuation of young regions).
young_generation->set_evacuation_reserve(young_evacuation_reserve);
old_generation->set_evacuation_reserve(old_evacuation_reserve);
old_generation->set_promoted_reserve(consumed_by_advance_promotion);
old_generation->set_promoted_reserve(old_promo_reserve);
// There is no need to expand OLD because all memory used here was set aside at end of previous GC, except in the
// case of a GLOBAL gc. During choose_collection_set() of GLOBAL, old will be expanded on demand.
@ -363,8 +369,8 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
// Having chosen the collection set, adjust the budgets for generational mode based on its composition. Note
// that young_generation->available() now knows about recently discovered immediate garbage.
//
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, ShenandoahCollectionSet* const collection_set) {
void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
ShenandoahCollectionSet* const collection_set, size_t add_regions_to_old) {
shenandoah_assert_generational();
// We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
// be able to increase regions_available_to_loan
@ -398,7 +404,8 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Leave old_evac_reserve as previously configured
} else if (old_evacuated_committed < old_evacuation_reserve) {
// This happens if the old-gen collection consumes less than full budget.
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT, PROPERFMTARGS(old_evacuated_committed));
log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT,
PROPERFMTARGS(old_evacuated_committed));
old_evacuation_reserve = old_evacuated_committed;
old_generation->set_evacuation_reserve(old_evacuation_reserve);
}
@ -409,11 +416,17 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions();
size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated));
size_t total_young_available = young_generation->available_with_reserve();
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate more than is available in young");
size_t total_young_available = young_generation->available_with_reserve() - add_regions_to_old * region_size_bytes;;
assert(young_evacuated_reserve_used <= total_young_available, "Cannot evacuate (%zu) more than is available in young (%zu)",
young_evacuated_reserve_used, total_young_available);
young_generation->set_evacuation_reserve(young_evacuated_reserve_used);
size_t old_available = old_generation->available();
// We have not yet rebuilt the free set. Some of the memory that is thought to be avaiable within old may no
// longer be available if that memory had been free within regions that were selected for the collection set.
// Make the necessary adjustments to old_available.
size_t old_available =
old_generation->available() + add_regions_to_old * region_size_bytes - collection_set->get_old_available_bytes_collected();
// Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
// and promotion reserves. Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
// evac and update phases.
@ -422,21 +435,27 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
if (old_available < old_consumed) {
// This can happen due to round-off errors when adding the results of truncated integer arithmetic.
// We've already truncated old_evacuated_committed. Truncate young_advance_promoted_reserve_used here.
assert(young_advance_promoted_reserve_used <= (33 * (old_available - old_evacuated_committed)) / 32,
"Round-off errors should be less than 3.125%%, committed: %zu, reserved: %zu",
young_advance_promoted_reserve_used, old_available - old_evacuated_committed);
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
if (old_available > old_evacuated_committed) {
young_advance_promoted_reserve_used = old_available - old_evacuated_committed;
} else {
young_advance_promoted_reserve_used = 0;
old_evacuated_committed = old_available;
}
// TODO: reserve for full promotion reserve, not just for advance (preselected) promotion
old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
}
assert(old_available >= old_consumed, "Cannot consume (%zu) more than is available (%zu)",
old_consumed, old_available);
size_t excess_old = old_available - old_consumed;
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions() + add_regions_to_old;
size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
assert(old_available >= unaffiliated_old,
"Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)",
unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available);
assert(unaffiliated_old >= old_evacuated_committed, "Do not evacuate (%zu) more than unaffiliated old (%zu)",
old_evacuated_committed, unaffiliated_old);
// Make sure old_evac_committed is unaffiliated
if (old_evacuated_committed > 0) {
@ -454,20 +473,22 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
}
// If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
// runway during evacuation and update-refs.
size_t regions_to_xfer = 0;
// runway during evacuation and update-refs. We may make further adjustments to balance.
ssize_t add_regions_to_young = 0;
if (excess_old > unaffiliated_old) {
// we can give back unaffiliated_old (all of unaffiliated is excess)
if (unaffiliated_old_regions > 0) {
regions_to_xfer = unaffiliated_old_regions;
add_regions_to_young = unaffiliated_old_regions;
}
} else if (unaffiliated_old_regions > 0) {
// excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
size_t excess_regions = excess_old / region_size_bytes;
regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
add_regions_to_young = MIN2(excess_regions, unaffiliated_old_regions);
}
if (regions_to_xfer > 0) {
excess_old -= regions_to_xfer * region_size_bytes;
if (add_regions_to_young > 0) {
assert(excess_old >= add_regions_to_young * region_size_bytes, "Cannot xfer more than excess old");
excess_old -= add_regions_to_young * region_size_bytes;
log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu "
"plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old);
}
@ -475,6 +496,7 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap,
// Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated
// promotions than fit in reserved memory, they will be deferred until a future GC pass.
size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
old_generation->set_promoted_reserve(total_promotion_reserve);
old_generation->reset_promoted_expended();
}
@ -782,17 +804,13 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahCollectionSetPreselector preselector(collection_set, heap->num_regions());
// Find the amount that will be promoted, regions that will be promoted in
// place, and preselect older regions that will be promoted by evacuation.
// place, and preselected older regions that will be promoted by evacuation.
compute_evacuation_budgets(heap);
// Choose the collection set, including the regions preselected above for
// promotion into the old generation.
_heuristics->choose_collection_set(collection_set);
if (!collection_set->is_empty()) {
// only make use of evacuation budgets when we are evacuating
adjust_evacuation_budgets(heap, collection_set);
}
// Choose the collection set, including the regions preselected above for promotion into the old generation.
size_t add_regions_to_old = _heuristics->choose_collection_set(collection_set);
// Even if collection_set->is_empty(), we want to adjust budgets, making reserves available to mutator.
adjust_evacuation_budgets(heap, collection_set, add_regions_to_old);
if (is_global()) {
// We have just chosen a collection set for a global cycle. The mark bitmap covering old regions is complete, so
// the remembered set scan can use that to avoid walking into garbage. When the next old mark begins, we will
@ -816,17 +834,16 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
ShenandoahHeapLocker locker(heap->lock());
// We are preparing for evacuation. At this time, we ignore cset region tallies.
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
// We are preparing for evacuation.
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (heap->mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions);
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
// Free set construction uses reserve quantities, because they are known to be valid here
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
}

View File

@ -63,9 +63,10 @@ private:
// Compute evacuation budgets prior to choosing collection set.
void compute_evacuation_budgets(ShenandoahHeap* heap);
// Adjust evacuation budgets after choosing collection set.
// Adjust evacuation budgets after choosing collection set. The argument regions_to_xfer represents regions to be
// transfered to old based on decisions made in top_off_collection_set()
void adjust_evacuation_budgets(ShenandoahHeap* heap,
ShenandoahCollectionSet* collection_set);
ShenandoahCollectionSet* collection_set, size_t regions_to_xfer);
// Preselect for possible inclusion into the collection set exactly the most
// garbage-dense regions, including those that satisfy criteria 1 & 2 below,
@ -144,6 +145,22 @@ private:
virtual void prepare_gc();
// Called during final mark, chooses collection set, rebuilds free set.
// Upon return from prepare_regions_and_collection_set(), certain parameters have been established to govern the
// evacuation efforts that are about to begin. In particular:
//
// old_generation->get_promoted_reserve() represents the amount of memory within old-gen's available memory that has
// been set aside to hold objects promoted from young-gen memory. This represents an estimated percentage
// of the live young-gen memory within the collection set. If there is more data ready to be promoted than
// can fit within this reserve, the promotion of some objects will be deferred until a subsequent evacuation
// pass.
//
// old_generation->get_evacuation_reserve() represents the amount of memory within old-gen's available memory that has been
// set aside to hold objects evacuated from the old-gen collection set.
//
// young_generation->get_evacuation_reserve() represents the amount of memory within young-gen's available memory that has
// been set aside to hold objects evacuated from the young-gen collection set. Conservatively, this value
// equals the entire amount of live young-gen memory within the collection set, even though some of this memory
// will likely be promoted.
virtual void prepare_regions_and_collection_set(bool concurrent);
// Cancel marking (used by Full collect and when cancelling cycle).

View File

@ -55,9 +55,6 @@ void ShenandoahGenerationalFullGC::prepare() {
// Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
heap->set_active_generation(heap->global_generation());
// No need for old_gen->increase_used() as this was done when plabs were allocated.
heap->reset_generation_reserves();
// Full GC supersedes any marking or coalescing in old generation.
heap->old_generation()->cancel_gc();
}
@ -156,8 +153,11 @@ void ShenandoahGenerationalFullGC::compute_balances() {
// In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
heap->old_generation()->set_promotion_potential(0);
// Invoke this in case we are able to transfer memory from OLD to YOUNG.
heap->compute_old_generation_balance(0, 0);
// Invoke this in case we are able to transfer memory from OLD to YOUNG
size_t allocation_runway =
heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0L);
heap->compute_old_generation_balance(allocation_runway, 0, 0);
}
ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,

View File

@ -299,9 +299,9 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint
alloc_from_lab = false;
}
// else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
// We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
// We choose not to promote objects smaller than size_threshold by way of shared allocations as this is too
// costly. Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
// evacuation pass. This condition is denoted by: is_promotion && has_plab && (size <= size_threshhold).
}
#ifdef ASSERT
}
@ -576,19 +576,18 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
// Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations
// and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to
// xfer_limit, and any surplus is transferred to the young generation.
//
// xfer_limit is the maximum we're able to transfer from young to old based on either:
// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or
// 2. there is sufficient excess in the allocation runway during GC idle cycles
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) {
// mutator_xfer_limit, and any surplus is transferred to the young generation. mutator_xfer_limit is
// the maximum we're able to transfer from young to old. This is called at the end of GC, as we prepare
// for the idle span that precedes the next GC.
void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_xfer_limit,
size_t old_trashed_regions, size_t young_trashed_regions) {
shenandoah_assert_heaplocked();
// We can limit the old reserve to the size of anticipated promotions:
// max_old_reserve is an upper bound on memory evacuated from old and promoted to old,
// clamped by the old generation space available.
//
// Here's the algebra.
// Let SOEP = ShenandoahOldEvacRatioPercent,
// Let SOEP = ShenandoahOldEvacPercent,
// OE = old evac,
// YE = young evac, and
// TE = total evac = OE + YE
@ -600,81 +599,171 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
// => OE = YE*SOEP/(100-SOEP)
// We have to be careful in the event that SOEP is set to 100 by the user.
assert(ShenandoahOldEvacRatioPercent <= 100, "Error");
const size_t old_available = old_generation()->available();
// The free set will reserve this amount of memory to hold young evacuations
const size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
// In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit.
const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve;
const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve:
MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent)
/ double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve));
assert(ShenandoahOldEvacPercent <= 100, "Error");
const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
ShenandoahOldGeneration* old_gen = old_generation();
size_t old_capacity = old_gen->max_capacity();
size_t old_usage = old_gen->used(); // includes humongous waste
size_t old_available = ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
ShenandoahYoungGeneration* young_gen = young_generation();
size_t young_capacity = young_gen->max_capacity();
size_t young_usage = young_gen->used(); // includes humongous waste
size_t young_available = ((young_capacity >= young_usage)? young_capacity - young_usage: 0);
size_t freeset_available = free_set()->available_locked();
if (young_available > freeset_available) {
young_available = freeset_available;
}
young_available += young_trashed_regions * region_size_bytes;
// The free set will reserve this amount of memory to hold young evacuations (initialized to the ideal reserve)
size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
// If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
bound_on_old_reserve:
MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
bound_on_old_reserve));
if (young_reserve > young_available) {
young_reserve = young_available;
}
// Decide how much old space we should reserve for a mixed collection
double reserve_for_mixed = 0;
if (old_generation()->has_unprocessed_collection_candidates()) {
size_t reserve_for_mixed = 0;
const size_t old_fragmented_available =
old_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
if (old_fragmented_available > proposed_max_old) {
// After we've promoted regions in place, there may be an abundance of old-fragmented available memory,
// even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back
// to young. Instead we make the best of the situation by using this fragmented memory for both promotions
// and evacuations.
proposed_max_old = old_fragmented_available;
}
size_t reserve_for_promo = old_fragmented_available;
const size_t max_old_reserve = proposed_max_old;
const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
const bool doing_mixed = (mixed_candidate_live_memory > 0);
if (doing_mixed) {
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
const double max_evac_need =
(double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste);
const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
"Unaffiliated available must be less than total available");
const double old_fragmented_available =
double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes);
reserve_for_mixed = max_evac_need + old_fragmented_available;
if (reserve_for_mixed > max_old_reserve) {
reserve_for_mixed = max_old_reserve;
// We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
// we already have too much fragmented available memory in old.
reserve_for_mixed = max_evac_need;
if (reserve_for_mixed + reserve_for_promo > max_old_reserve) {
// In this case, we'll allow old-evac to target some of the fragmented old memory.
size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve;
if (reserve_for_promo > excess_reserves) {
reserve_for_promo -= excess_reserves;
} else {
excess_reserves -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed -= excess_reserves;
}
}
}
// Decide how much space we should reserve for promotions from young
size_t reserve_for_promo = 0;
// Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
// over promotions.
const size_t promo_load = old_generation()->get_promotion_potential();
const bool doing_promotions = promo_load > 0;
if (doing_promotions) {
// We're promoting and have a bound on the maximum amount that can be promoted
assert(max_old_reserve >= reserve_for_mixed, "Sanity");
const size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
reserve_for_promo = MIN2((size_t)(promo_load * ShenandoahPromoEvacWaste), available_for_promotions);
// We've already set aside all of the fragmented available memory within old-gen to represent old objects
// to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted
// from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory
// to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen
// evacuation workloa.
// We're promoting and have an estimate of memory to be promoted from aged regions
assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity");
const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo);
size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste);
if (promo_need > reserve_for_promo) {
reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions);
}
// We've already reserved all the memory required for the promo_load, and possibly more. The excess
// can be consumed by objects promoted from regions that have not yet reached tenure age.
}
// This is the total old we want to ideally reserve
const size_t old_reserve = reserve_for_mixed + reserve_for_promo;
assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
// This is the total old we want to reserve (initialized to the ideal reserve)
size_t old_reserve = reserve_for_mixed + reserve_for_promo;
// We now check if the old generation is running a surplus or a deficit.
const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
if (max_old_available >= old_reserve) {
// We are running a surplus, so the old region surplus can go to young
const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes;
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions);
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
} else {
// We are running a deficit which we'd like to fill from young.
// Ignore that this will directly impact young_generation()->max_capacity(),
// indirectly impacting young_reserve and old_reserve. These computations are conservative.
// Note that deficit is rounded up by one region.
const size_t old_need = (old_reserve - max_old_available + region_size_bytes - 1) / region_size_bytes;
const size_t max_old_region_xfer = old_xfer_limit / region_size_bytes;
size_t old_region_deficit = 0;
size_t old_region_surplus = 0;
// Round down the regions we can transfer from young to old. If we're running short
// on young-gen memory, we restrict the xfer. Old-gen collection activities will be
// curtailed if the budget is restricted.
const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer);
size_t mutator_region_xfer_limit = mutator_xfer_limit / region_size_bytes;
// align the mutator_xfer_limit on region size
mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
if (old_available >= old_reserve) {
// We are running a surplus, so the old region surplus can go to young
const size_t old_surplus = old_available - old_reserve;
old_region_surplus = old_surplus / region_size_bytes;
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
} else if (old_available + mutator_xfer_limit >= old_reserve) {
// Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there
size_t old_deficit = old_reserve - old_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
} else {
// We'll try to xfer from both mutator excess and from young collector reserve
size_t available_reserves = old_available + young_reserve + mutator_xfer_limit;
size_t old_entitlement = (available_reserves * ShenandoahOldEvacPercent) / 100;
// Round old_entitlement down to nearest multiple of regions to be transferred to old
size_t entitled_xfer = old_entitlement - old_available;
entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes);
size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
if (entitled_xfer > unaffiliated_young_memory) {
entitled_xfer = unaffiliated_young_memory;
}
old_entitlement = old_available + entitled_xfer;
if (old_entitlement < old_reserve) {
// There's not enough memory to satisfy our desire. Scale back our old-gen intentions.
size_t budget_overrun = old_reserve - old_entitlement;;
if (reserve_for_promo > budget_overrun) {
reserve_for_promo -= budget_overrun;
old_reserve -= budget_overrun;
} else {
budget_overrun -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0;
old_reserve = reserve_for_promo + reserve_for_mixed;
}
}
// Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch
// condition above: "(old_available + mutator_xfer_limit >= old_reserve)
// Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available
size_t old_deficit = old_reserve - old_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
// Shrink young_reserve to account for loan to old reserve
const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit;
young_reserve -= reserve_xfer_regions * region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
}
}
void ShenandoahGenerationalHeap::reset_generation_reserves() {
ShenandoahHeapLocker locker(lock());
young_generation()->set_evacuation_reserve(0);
old_generation()->set_evacuation_reserve(0);
old_generation()->set_promoted_reserve(0);
assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available,
"Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available);
// deficit/surplus adjustments to generation sizes will precede rebuild
young_generation()->set_evacuation_reserve(young_reserve);
old_generation()->set_evacuation_reserve(reserve_for_mixed);
old_generation()->set_promoted_reserve(reserve_for_promo);
}
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {
@ -1015,10 +1104,6 @@ void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
void ShenandoahGenerationalHeap::complete_degenerated_cycle() {
shenandoah_assert_heaplocked_or_safepoint();
// In case degeneration interrupted concurrent evacuation or update references, we need to clean up
// transient state. Otherwise, these actions have no effect.
reset_generation_reserves();
if (!old_generation()->is_parsable()) {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill);
coalesce_and_fill_old_regions(false);
@ -1036,7 +1121,6 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() {
// throw off the heuristics.
entry_global_coalesce_and_fill();
}
reset_generation_reserves();
}
void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() {

View File

@ -136,7 +136,7 @@ public:
void reset_generation_reserves();
// Computes the optimal size for the old generation, represented as a surplus or deficit of old regions
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions);
void compute_old_generation_balance(size_t old_xfer_limit, size_t old_trashed_regions, size_t young_trashed_regions);
// Balances generations, coalesces and fills old regions if necessary
void complete_degenerated_cycle();

View File

@ -425,20 +425,29 @@ jint ShenandoahHeap::initialize() {
_affiliations[i] = ShenandoahAffiliation::FREE;
}
if (mode()->is_generational()) {
size_t young_reserve = (soft_max_capacity() * ShenandoahEvacReserve) / 100;
young_generation()->set_evacuation_reserve(young_reserve);
old_generation()->set_evacuation_reserve((size_t) 0);
old_generation()->set_promoted_reserve((size_t) 0);
}
_free_set = new ShenandoahFreeSet(this, _num_regions);
post_initialize_heuristics();
// We are initializing free set. We ignore cset region tallies.
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
size_t young_trashed_regions, old_trashed_regions, first_old, last_old, num_old;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old, last_old, num_old);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
// We cannot call
// gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions)
// until after the heap is fully initialized. So we make up a safe value here.
size_t allocation_runway = InitialHeapSize / 2;
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, num_old);
}
if (AlwaysPreTouch) {
@ -2521,13 +2530,10 @@ void ShenandoahHeap::final_update_refs_update_region_states() {
parallel_heap_region_iterate(&cl);
}
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
ShenandoahGCPhase phase(concurrent ?
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
void ShenandoahHeap::rebuild_free_set_within_phase() {
ShenandoahHeapLocker locker(lock());
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
size_t young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count;
_free_set->prepare_to_rebuild(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count);
// If there are no old regions, first_old_region will be greater than last_old_region
assert((first_old_region > last_old_region) ||
((last_old_region + 1 - first_old_region >= old_region_count) &&
@ -2546,19 +2552,11 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
// available for transfer to old. Note that transfer of humongous regions does not impact available.
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
// regions in place when many of these regular regions have an abundant amount of available memory within them.
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
//
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
// within partially consumed regions of memory.
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trashed_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trashed_regions, young_trashed_regions);
}
// Rebuild free set based on adjusted generation sizes.
_free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
_free_set->finish_rebuild(young_trashed_regions, old_trashed_regions, old_region_count);
if (mode()->is_generational()) {
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
@ -2567,6 +2565,13 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
}
}
void ShenandoahHeap::rebuild_free_set(bool concurrent) {
ShenandoahGCPhase phase(concurrent ?
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
rebuild_free_set_within_phase();
}
bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
size_t slice = r->index() / _bitmap_regions_per_slice;

View File

@ -481,7 +481,9 @@ private:
void rendezvous_threads(const char* name);
void recycle_trash();
public:
// The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
void rebuild_free_set(bool concurrent);
void rebuild_free_set_within_phase();
void notify_gc_progress();
void notify_gc_no_progress();
size_t get_gc_no_progress_count() const;

View File

@ -595,6 +595,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
_recycling.unset();
} else {
// Ensure recycling is unset before returning to mutator to continue memory allocation.
// Otherwise, the mutator might see region as fully recycled and might change its affiliation only to have
// the racing GC worker thread overwrite its affiliation to FREE.
while (_recycling.is_set()) {
if (os::is_MP()) {
SpinPause();
@ -605,6 +607,8 @@ void ShenandoahHeapRegion::try_recycle_under_lock() {
}
}
// Note that return from try_recycle() does not mean the region has been recycled. It only means that
// some GC worker thread has taken responsibility to recycle the region, eventually.
void ShenandoahHeapRegion::try_recycle() {
shenandoah_assert_not_heaplocked();
if (is_trash() && _recycling.try_set()) {

View File

@ -128,8 +128,6 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// the space. This would be the last action if there is nothing to evacuate.
entry_cleanup_early();
heap->free_set()->log_status_under_lock();
assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc.");
// We must execute this vm operation if we completed final mark. We cannot
@ -138,7 +136,10 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
// collection.
heap->concurrent_final_roots();
size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
heap->compute_old_generation_balance(allocation_runway, 0);
// After concurrent old marking finishes, we reclaim immediate garbage. Further, we may also want to expand OLD in order
// to make room for anticipated promotions and/or for mixed evacuations. Mixed evacuations are especially likely to
// follow the end of OLD marking.
heap->rebuild_free_set_within_phase();
heap->free_set()->log_status_under_lock();
return true;
}

View File

@ -427,8 +427,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
size_t allocation_runway =
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trash_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions);
gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions, young_trash_regions);
heap->free_set()->finish_rebuild(young_trash_regions, old_trash_regions, num_old);
}
}

View File

@ -66,8 +66,8 @@ private:
// remaining in a PLAB when it is retired.
size_t _promoted_expended;
// Represents the quantity of live bytes we expect to promote during the next evacuation
// cycle. This value is used by the young heuristic to trigger mixed collections.
// Represents the quantity of live bytes we expect to promote during the next GC cycle, either by
// evacuation or by promote-in-place. This value is used by the young heuristic to trigger mixed collections.
// It is also used when computing the optimum size for the old generation.
size_t _promotion_potential;

View File

@ -243,8 +243,7 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
#ifdef ASSERT
assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode");
assert(region->is_old(), "Do not use for young regions");
// For HumongousRegion:s it's more efficient to jump directly to the
// start region.
// For humongous regions it's more efficient to jump directly to the start region.
assert(!region->is_humongous(), "Use region->humongous_start_region() instead");
#endif

View File

@ -420,7 +420,14 @@ public:
// span is the total memory affiliated with these stats (some of which is in use and other is available)
size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); }
size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); }
size_t non_trashed_span() const {
assert(_regions >= _trashed_regions, "sanity");
return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes();
}
size_t non_trashed_committed() const {
assert(_committed >= _trashed_regions * ShenandoahHeapRegion::region_size_bytes(), "sanity");
return _committed - (_trashed_regions * ShenandoahHeapRegion::region_size_bytes());
}
};
class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {

View File

@ -400,27 +400,20 @@
"reserve/waste is incorrect, at the risk that application " \
"runs out of memory too early.") \
\
product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \
"The maximum proportion of evacuation from old-gen memory, " \
"expressed as a percentage. The default value 75 denotes that " \
"no more than 75% of the collection set evacuation workload may " \
"be towards evacuation of old-gen heap regions. This limits both "\
"the promotion of aged regions and the compaction of existing " \
"old regions. A value of 75 denotes that the total evacuation " \
"work may increase to up to four times the young gen evacuation " \
"work. A larger value allows quicker promotion and allows " \
"a smaller number of mixed evacuations to process " \
"the entire list of old-gen collection candidates at the cost " \
"of an increased disruption of the normal cadence of young-gen " \
"collections. A value of 100 allows a mixed evacuation to " \
"focus entirely on old-gen memory, allowing no young-gen " \
"regions to be collected, likely resulting in subsequent " \
"allocation failures because the allocation pool is not " \
"replenished. A value of 0 allows a mixed evacuation to " \
"focus entirely on young-gen memory, allowing no old-gen " \
"regions to be collected, likely resulting in subsequent " \
"promotion failures and triggering of stop-the-world full GC " \
"events.") \
product(uintx, ShenandoahOldEvacPercent, 75, EXPERIMENTAL, \
"The maximum evacuation to old-gen expressed as a percent of " \
"the total live memory within the collection set. With the " \
"default setting, if collection set evacuates X, no more than " \
"75% of X may hold objects evacuated from old or promoted to " \
"old from young. A value of 100 allows the entire collection " \
"set to be comprised of old-gen regions and young regions that " \
"have reached the tenure age. Larger values allow fewer mixed " \
"evacuations to reclaim all the garbage from old. Smaller " \
"values result in less variation in GC cycle times between " \
"young vs. mixed cycles. A value of 0 prevents mixed " \
"evacations from running and blocks promotion of aged regions " \
"by evacuation. Setting the value to 0 does not prevent " \
"regions from being promoted in place.") \
range(0,100) \
\
product(bool, ShenandoahEvacTracking, false, DIAGNOSTIC, \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,18 +192,20 @@ void BytecodeTracer::trace_interpreter(const methodHandle& method, address bcp,
}
#endif
void BytecodeTracer::print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags) {
void BytecodeTracer::print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags, bool buffered) {
BytecodePrinter method_printer(flags);
BytecodeStream s(method);
s.set_interval(from, to);
// Keep output to st coherent: collect all lines and print at once.
ResourceMark rm;
stringStream ss;
outputStream* out = buffered ? &ss : st;
while (s.next() >= 0) {
method_printer.trace(method, s.bcp(), &ss);
method_printer.trace(method, s.bcp(), out);
}
if (buffered) {
st->print("%s", ss.as_string());
}
st->print("%s", ss.as_string());
}
void BytecodePrinter::print_constant(int cp_index, outputStream* st) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,7 @@ class BytecodeClosure;
class BytecodeTracer: AllStatic {
public:
NOT_PRODUCT(static void trace_interpreter(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st = tty);)
static void print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags);
static void print_method_codes(const methodHandle& method, int from, int to, outputStream* st, int flags, bool buffered = true);
};
#endif // SHARE_INTERPRETER_BYTECODETRACER_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1898,15 +1898,15 @@ void Method::print_name(outputStream* st) const {
#endif // !PRODUCT || INCLUDE_JVMTI
void Method::print_codes_on(outputStream* st, int flags) const {
print_codes_on(0, code_size(), st, flags);
void Method::print_codes_on(outputStream* st, int flags, bool buffered) const {
print_codes_on(0, code_size(), st, flags, buffered);
}
void Method::print_codes_on(int from, int to, outputStream* st, int flags) const {
void Method::print_codes_on(int from, int to, outputStream* st, int flags, bool buffered) const {
Thread *thread = Thread::current();
ResourceMark rm(thread);
methodHandle mh (thread, (Method*)this);
BytecodeTracer::print_method_codes(mh, from, to, st, flags);
BytecodeTracer::print_method_codes(mh, from, to, st, flags, buffered);
}
CompressedLineNumberReadStream::CompressedLineNumberReadStream(u_char* buffer) : CompressedReadStream(buffer) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -466,8 +466,8 @@ public:
// prints byte codes
void print_codes(int flags = 0) const { print_codes_on(tty, flags); }
void print_codes_on(outputStream* st, int flags = 0) const;
void print_codes_on(int from, int to, outputStream* st, int flags = 0) const;
void print_codes_on(outputStream* st, int flags = 0, bool buffered = true) const;
void print_codes_on(int from, int to, outputStream* st, int flags = 0, bool buffered = true) const;
// method parameters
bool has_method_parameters() const

View File

@ -175,6 +175,10 @@ Node* GraphKit::unbox_vector(Node* v, const TypeInstPtr* vbox_type, BasicType el
assert(check_vbox(vbox_type), "");
const TypeVect* vt = TypeVect::make(elem_bt, num_elem, is_vector_mask(vbox_type->instance_klass()));
Node* unbox = gvn().transform(new VectorUnboxNode(C, vt, v, merged_memory()));
if (gvn().type(unbox)->isa_vect() == nullptr) {
assert(gvn().type(unbox) == Type::TOP, "sanity");
return nullptr; // not a vector
}
return unbox;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1919,6 +1919,15 @@ Node* VectorMaskToLongNode::Ideal_MaskAll(PhaseGVN* phase) {
// saved with a predicate type.
if (in1->Opcode() == Op_VectorStoreMask) {
Node* mask = in1->in(1);
// Skip the optimization if the mask is dead.
if (phase->type(mask) == Type::TOP) {
return nullptr;
}
// If the ideal graph is transformed correctly, the input mask should be a
// vector type node. Following optimization can ignore the mismatched type
// issue. But we still keep the sanity check for the mask type by using
// "is_vect()" in the assertion below, so that there can be less optimizations
// evolved before the compiler finally runs into a problem.
assert(!Matcher::mask_op_prefers_predicate(Opcode(), mask->bottom_type()->is_vect()), "sanity");
in1 = mask;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -491,6 +491,15 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
#ifndef PRODUCT
if (PrintDeoptimizationDetails) {
const bool print_codes = WizardMode && Verbose;
ResourceMark rm(thread);
stringStream codes_ss;
if (print_codes) {
// print_codes_on() may acquire MDOExtraData_lock (rank nosafepoint-1).
// To keep the lock acquisition order correct, call it before taking tty_lock.
// Avoid double buffering: set buffered=false.
method()->print_codes_on(&codes_ss, 0, false);
}
ttyLocker ttyl;
tty->print_cr("[%d. Interpreted Frame]", ++unpack_counter);
iframe()->print_on(tty);
@ -500,7 +509,9 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
RegisterMap::WalkContinuation::skip);
vframe* f = vframe::new_vframe(iframe(), &map, thread);
f->print();
if (WizardMode && Verbose) method()->print_codes();
if (print_codes) {
tty->print("%s", codes_ss.as_string());
}
tty->cr();
}
#endif // !PRODUCT

View File

@ -620,9 +620,12 @@ final class VirtualThread extends BaseVirtualThread {
// Object.wait
if (s == WAITING || s == TIMED_WAITING) {
int newState;
boolean blocked;
boolean interruptible = interruptibleWait;
if (s == WAITING) {
setState(newState = WAIT);
// may have been notified while in transition
blocked = notified && compareAndSetState(WAIT, BLOCKED);
} else {
// For timed-wait, a timeout task is scheduled to execute. The timeout
// task will change the thread state to UNBLOCKED and submit the thread
@ -637,22 +640,22 @@ final class VirtualThread extends BaseVirtualThread {
byte seqNo = ++timedWaitSeqNo;
timeoutTask = schedule(() -> waitTimeoutExpired(seqNo), timeout, MILLISECONDS);
setState(newState = TIMED_WAIT);
// May have been notified while in transition. This must be done while
// holding the monitor to avoid changing the state of a new timed wait call.
blocked = notified && compareAndSetState(TIMED_WAIT, BLOCKED);
}
}
// may have been notified while in transition to wait state
if (notified && compareAndSetState(newState, BLOCKED)) {
// may have even been unblocked already
if (blocked) {
// may have been unblocked already
if (blockPermit && compareAndSetState(BLOCKED, UNBLOCKED)) {
submitRunContinuation();
lazySubmitRunContinuation();
}
} else {
// may have been interrupted while in transition to wait state
if (interruptible && interrupted && compareAndSetState(newState, UNBLOCKED)) {
lazySubmitRunContinuation();
}
return;
}
// may have been interrupted while in transition to wait state
if (interruptible && interrupted && compareAndSetState(newState, UNBLOCKED)) {
submitRunContinuation();
return;
}
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -188,13 +188,17 @@ public final class SwitchBootstraps {
String invocationName,
MethodType invocationType,
Object... labels) {
requireNonNull(lookup);
requireNonNull(invocationType);
requireNonNull(labels);
Class<?> selectorType = invocationType.parameterType(0);
if (invocationType.parameterCount() != 2
|| (!invocationType.returnType().equals(int.class))
|| !invocationType.parameterType(1).equals(int.class))
throw new IllegalArgumentException("Illegal invocation type " + invocationType);
for (Object l : labels) { // implicit null-check
for (Object l : labels) {
verifyLabel(l, selectorType);
}
@ -292,6 +296,10 @@ public final class SwitchBootstraps {
String invocationName,
MethodType invocationType,
Object... labels) {
requireNonNull(lookup);
requireNonNull(invocationType);
requireNonNull(labels);
if (invocationType.parameterCount() != 2
|| (!invocationType.returnType().equals(int.class))
|| invocationType.parameterType(0).isPrimitive()
@ -299,7 +307,7 @@ public final class SwitchBootstraps {
|| !invocationType.parameterType(1).equals(int.class))
throw new IllegalArgumentException("Illegal invocation type " + invocationType);
labels = labels.clone(); // implicit null check
labels = labels.clone();
Class<?> enumClass = invocationType.parameterType(0);
boolean constantsOnly = true;
@ -307,7 +315,7 @@ public final class SwitchBootstraps {
for (int i = 0; i < len; i++) {
Object convertedLabel =
convertEnumConstants(lookup, enumClass, labels[i]);
convertEnumConstants(enumClass, labels[i]);
labels[i] = convertedLabel;
if (constantsOnly)
constantsOnly = convertedLabel instanceof EnumDesc;
@ -331,7 +339,7 @@ public final class SwitchBootstraps {
return new ConstantCallSite(target);
}
private static <E extends Enum<E>> Object convertEnumConstants(MethodHandles.Lookup lookup, Class<?> enumClassTemplate, Object label) {
private static <E extends Enum<E>> Object convertEnumConstants(Class<?> enumClassTemplate, Object label) {
if (label == null) {
throw new IllegalArgumentException("null label found");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,8 +185,8 @@ public interface SecureDirectoryStream<T>
/**
* Move a file from this directory to another directory.
*
* <p> This method works in a similar manner to {@link Files#move move}
* method when the {@link StandardCopyOption#ATOMIC_MOVE ATOMIC_MOVE} option
* <p> This method works in a similar manner to {@link Files#move Files.move}
* when the {@link StandardCopyOption#ATOMIC_MOVE ATOMIC_MOVE} option
* is specified. That is, this method moves a file as an atomic file system
* operation. If the {@code srcpath} parameter is an {@link Path#isAbsolute
* absolute} path then it locates the source file. If the parameter is a
@ -194,14 +194,15 @@ public interface SecureDirectoryStream<T>
* the {@code targetpath} parameter is absolute then it locates the target
* file (the {@code targetdir} parameter is ignored). If the parameter is
* a relative path it is located relative to the open directory identified
* by the {@code targetdir} parameter. In all cases, if the target file
* exists then it is implementation specific if it is replaced or this
* method fails.
* by the {@code targetdir} parameter, unless {@code targetdir} is
* {@code null}, in which case it is located relative to the current
* working directory. In all cases, if the target file exists then it is
* implementation specific if it is replaced or this method fails.
*
* @param srcpath
* the name of the file to move
* @param targetdir
* the destination directory
* the destination directory; can be {@code null}
* @param targetpath
* the name to give the file in the destination directory
*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -165,7 +165,6 @@ final class JsseJce {
static {
boolean mediator = true;
try {
Signature.getInstance(SIGNATURE_ECDSA);
Signature.getInstance(SIGNATURE_RAWECDSA);
KeyAgreement.getInstance("ECDH");
KeyFactory.getInstance("EC");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -214,13 +214,13 @@ public class KAKeyDerivation implements SSLKeyDerivation {
var decapsulator = kem.newDecapsulator(localPrivateKey);
sharedSecret = decapsulator.decapsulate(
keyshare, 0, decapsulator.secretSize(),
"TlsPremasterSecret");
"Generic");
} else {
// Using traditional DH-style Key Agreement
KeyAgreement ka = KeyAgreement.getInstance(algorithmName);
ka.init(localPrivateKey);
ka.doPhase(peerPublicKey, true);
sharedSecret = ka.generateSecret("TlsPremasterSecret");
sharedSecret = ka.generateSecret("Generic");
}
return deriveHandshakeSecret(type, sharedSecret);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,10 +30,12 @@ import java.security.spec.AlgorithmParameterSpec;
import java.security.spec.ECParameterSpec;
import java.security.spec.InvalidParameterSpecException;
import java.security.spec.NamedParameterSpec;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.ArrayList;
import java.util.Objects;
import java.util.Set;
import javax.crypto.KeyAgreement;
import javax.crypto.spec.DHParameterSpec;
@ -463,10 +465,9 @@ enum NamedGroup {
AlgorithmConstraints constraints, NamedGroupSpec type) {
boolean hasFFDHEGroups = false;
for (String ng : sslConfig.namedGroups) {
NamedGroup namedGroup = NamedGroup.nameOf(ng);
if (namedGroup != null &&
namedGroup.isAvailable && namedGroup.spec == type) {
for (NamedGroup namedGroup :
SupportedGroups.getGroupsFromConfig(sslConfig)) {
if (namedGroup.isAvailable && namedGroup.spec == type) {
if (namedGroup.isPermitted(constraints)) {
return true;
}
@ -501,8 +502,8 @@ enum NamedGroup {
// Is the named group supported?
static boolean isEnabled(SSLConfiguration sslConfig,
NamedGroup namedGroup) {
for (String ng : sslConfig.namedGroups) {
if (namedGroup.name.equalsIgnoreCase(ng)) {
for (NamedGroup ng : SupportedGroups.getGroupsFromConfig(sslConfig)) {
if (namedGroup.equals(ng)) {
return true;
}
}
@ -516,12 +517,10 @@ enum NamedGroup {
SSLConfiguration sslConfig,
ProtocolVersion negotiatedProtocol,
AlgorithmConstraints constraints, NamedGroupSpec[] types) {
for (String name : sslConfig.namedGroups) {
NamedGroup ng = NamedGroup.nameOf(name);
if (ng != null && ng.isAvailable &&
(NamedGroupSpec.arrayContains(types, ng.spec)) &&
ng.isAvailable(negotiatedProtocol) &&
ng.isPermitted(constraints)) {
for (NamedGroup ng : SupportedGroups.getGroupsFromConfig(sslConfig)) {
if (ng.isAvailable && NamedGroupSpec.arrayContains(types, ng.spec)
&& ng.isAvailable(negotiatedProtocol)
&& ng.isPermitted(constraints)) {
return ng;
}
}
@ -857,19 +856,92 @@ enum NamedGroup {
}
}
// Inner class encapsulating supported named groups.
static final class SupportedGroups {
// the supported named groups, non-null immutable list
// Default named groups.
private static final NamedGroup[] defaultGroups = new NamedGroup[]{
// Hybrid key agreement
X25519MLKEM768,
// Primary XDH (RFC 7748) curves
X25519,
// Primary NIST Suite B curves
SECP256_R1,
SECP384_R1,
SECP521_R1,
// Secondary XDH curves
X448,
// FFDHE (RFC 7919)
FFDHE_2048,
FFDHE_3072,
FFDHE_4096,
FFDHE_6144,
FFDHE_8192
};
// Filter default groups names against default constraints.
// Those are the values being displayed to the user with
// "java -XshowSettings:security:tls" command.
private static final String[] defaultNames = Arrays.stream(
defaultGroups)
.filter(ng -> ng.isAvailable)
.filter(ng -> ng.isPermitted(SSLAlgorithmConstraints.DEFAULT))
.map(ng -> ng.name)
.toArray(String[]::new);
private static final NamedGroup[] customizedGroups =
getCustomizedNamedGroups();
// Note: user-passed groups are not being filtered against default
// algorithm constraints here. They will be displayed as-is.
private static final String[] customizedNames =
customizedGroups == null ?
null : Arrays.stream(customizedGroups)
.map(ng -> ng.name)
.toArray(String[]::new);
// Named group names for SSLConfiguration.
static final String[] namedGroups;
static {
// The value of the System Property defines a list of enabled named
// groups in preference order, separated with comma. For example:
//
// jdk.tls.namedGroups="secp521r1, secp256r1, ffdhe2048"
//
// If the System Property is not defined or the value is empty, the
// default groups and preferences will be used.
if (customizedNames != null) {
namedGroups = customizedNames;
} else {
if (defaultNames.length == 0) {
SSLLogger.logWarning("ssl", "No default named groups");
}
namedGroups = defaultNames;
}
}
// Avoid the group lookup for default and customized groups.
static NamedGroup[] getGroupsFromConfig(SSLConfiguration sslConfig) {
if (sslConfig.namedGroups == defaultNames) {
return defaultGroups;
} else if (sslConfig.namedGroups == customizedNames) {
return customizedGroups;
} else {
return Arrays.stream(sslConfig.namedGroups)
.map(NamedGroup::nameOf)
.filter(Objects::nonNull)
.toArray(NamedGroup[]::new);
}
}
// The value of the System Property defines a list of enabled named
// groups in preference order, separated with comma. For example:
//
// jdk.tls.namedGroups="secp521r1, secp256r1, ffdhe2048"
//
// If the System Property is not defined or the value is empty, the
// default groups and preferences will be used.
private static NamedGroup[] getCustomizedNamedGroups() {
String property = System.getProperty("jdk.tls.namedGroups");
if (property != null && !property.isEmpty()) {
// remove double quote marks from beginning/end of the property
if (property.length() > 1 && property.charAt(0) == '"' &&
@ -878,66 +950,25 @@ enum NamedGroup {
}
}
ArrayList<String> groupList;
if (property != null && !property.isEmpty()) {
String[] groups = property.split(",");
groupList = new ArrayList<>(groups.length);
for (String group : groups) {
group = group.trim();
if (!group.isEmpty()) {
NamedGroup namedGroup = nameOf(group);
if (namedGroup != null) {
if (namedGroup.isAvailable) {
groupList.add(namedGroup.name);
}
} // ignore unknown groups
}
}
NamedGroup[] ret = Arrays.stream(property.split(","))
.map(String::trim)
.map(NamedGroup::nameOf)
.filter(Objects::nonNull)
.filter(ng -> ng.isAvailable)
.toArray(NamedGroup[]::new);
if (groupList.isEmpty()) {
if (ret.length == 0) {
throw new IllegalArgumentException(
"System property jdk.tls.namedGroups(" +
property + ") contains no supported named groups");
}
} else { // default groups
NamedGroup[] groups = new NamedGroup[] {
// Hybrid key agreement
X25519MLKEM768,
// Primary XDH (RFC 7748) curves
X25519,
// Primary NIST Suite B curves
SECP256_R1,
SECP384_R1,
SECP521_R1,
// Secondary XDH curves
X448,
// FFDHE (RFC 7919)
FFDHE_2048,
FFDHE_3072,
FFDHE_4096,
FFDHE_6144,
FFDHE_8192,
};
groupList = new ArrayList<>(groups.length);
for (NamedGroup group : groups) {
if (group.isAvailable) {
groupList.add(group.name);
}
property
+ ") contains no supported named groups");
}
if (groupList.isEmpty() &&
SSLLogger.isOn() && SSLLogger.isOn("ssl")) {
SSLLogger.warning("No default named groups");
}
return ret;
}
namedGroups = groupList.toArray(new String[0]);
return null;
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -82,8 +82,10 @@ public class CryptoAlgorithmConstraints extends AbstractAlgorithmConstraints {
CryptoAlgorithmConstraints(String propertyName) {
super(null);
disabledServices = getAlgorithms(propertyName, true);
debug("Before " + Arrays.deepToString(disabledServices.toArray()));
for (String dk : disabledServices) {
String[] entries = disabledServices.toArray(new String[0]);
debug("Before " + Arrays.deepToString(entries));
for (String dk : entries) {
int idx = dk.indexOf(".");
if (idx < 1 || idx == dk.length() - 1) {
// wrong syntax: missing "." or empty service or algorithm

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1505,6 +1505,7 @@ InitializeJVM(JavaVM **pvm, JNIEnv **penv, InvocationFunctions *ifn)
r = ifn->CreateJavaVM(pvm, (void **)penv, &args);
JLI_MemFree(options);
options = NULL;
return r == JNI_OK;
}
@ -2203,6 +2204,7 @@ FreeKnownVMs()
knownVMs[i].name = NULL;
}
JLI_MemFree(knownVMs);
knownVMs = NULL;
}
/*
@ -2276,8 +2278,9 @@ ShowSplashScreen()
(void)UnsetEnv(SPLASH_JAR_ENV_ENTRY);
JLI_MemFree(splash_jar_entry);
splash_jar_entry = NULL;
JLI_MemFree(splash_file_entry);
splash_file_entry = NULL;
}
static const char* GetFullVersion()

View File

@ -202,21 +202,21 @@ class UnixSecureDirectoryStream
{
UnixPath from = getName(fromObj);
UnixPath to = getName(toObj);
if (dir == null)
throw new NullPointerException();
if (!(dir instanceof UnixSecureDirectoryStream))
if (dir != null && !(dir instanceof UnixSecureDirectoryStream))
throw new ProviderMismatchException();
UnixSecureDirectoryStream that = (UnixSecureDirectoryStream)dir;
int todfd = that != null ? that.dfd : AT_FDCWD;
// lock ordering doesn't matter
this.ds.readLock().lock();
try {
that.ds.readLock().lock();
if (that != null)
that.ds.readLock().lock();
try {
if (!this.ds.isOpen() || !that.ds.isOpen())
if (!this.ds.isOpen() || (that != null && !that.ds.isOpen()))
throw new ClosedDirectoryStreamException();
try {
renameat(this.dfd, from.asByteArray(), that.dfd, to.asByteArray());
renameat(this.dfd, from.asByteArray(), todfd, to.asByteArray());
} catch (UnixException x) {
if (x.errno() == EXDEV) {
throw new AtomicMoveNotSupportedException(
@ -225,7 +225,8 @@ class UnixSecureDirectoryStream
x.rethrowAsIOException(from, to);
}
} finally {
that.ds.readLock().unlock();
if (that != null)
that.ds.readLock().unlock();
}
} finally {
this.ds.readLock().unlock();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -29,5 +29,6 @@
@interface NavigableTextAccessibility : CommonComponentAccessibility <NSAccessibilityNavigableStaticText>
@property(readonly) BOOL accessibleIsPasswordText;
@property BOOL announceEditUpdates;
@end

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, JetBrains s.r.o.. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -60,6 +60,22 @@ static jmethodID sjm_getAccessibleEditableText = NULL;
return [fJavaRole isEqualToString:@"passwordtext"];
}
- (id)init {
self = [super init];
if (self) {
_announceEditUpdates = YES;
}
return self;
}
- (void)suppressEditUpdates {
_announceEditUpdates = NO;
}
- (void)resumeEditUpdates {
_announceEditUpdates = YES;
}
// NSAccessibilityElement protocol methods
- (NSRect)accessibilityFrameForRange:(NSRange)range
@ -117,6 +133,9 @@ static jmethodID sjm_getAccessibleEditableText = NULL;
- (NSString *)accessibilityStringForRange:(NSRange)range
{
if (!_announceEditUpdates) {
return @"";
}
JNIEnv *env = [ThreadUtilities getJNIEnv];
GET_CACCESSIBLETEXT_CLASS_RETURN(nil);
DECLARE_STATIC_METHOD_RETURN(jm_getStringForRange, sjc_CAccessibleText, "getStringForRange",
@ -306,6 +325,12 @@ static jmethodID sjm_getAccessibleEditableText = NULL;
return [super accessibilityParent];
}
- (void)postSelectedTextChanged
{
[super postSelectedTextChanged];
[self resumeEditUpdates];
}
/*
* Other text methods
- (NSRange)accessibilitySharedCharacterRange;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
*/
#import "SpinboxAccessibility.h"
#import "ThreadUtilities.h"
#define INCREMENT 0
#define DECREMENT 1
@ -44,7 +45,15 @@
- (id _Nullable)accessibilityValue
{
return [super accessibilityValue];
id val = [super accessibilityValue];
NSArray *clist = [super accessibilityChildren];
for (NSUInteger i = 0; i < [clist count]; i++) {
id child = [clist objectAtIndex:i];
if ([child conformsToProtocol:@protocol(NSAccessibilityNavigableStaticText)]) {
val = [child accessibilityValue];
}
}
return val;
}
- (BOOL)accessibilityPerformIncrement
@ -68,4 +77,18 @@
return [super accessibilityParent];
}
- (void)postValueChanged
{
AWT_ASSERT_APPKIT_THREAD;
NSAccessibilityPostNotification(self, NSAccessibilityValueChangedNotification);
NSArray *clist = [super accessibilityChildren];
for (NSUInteger i = 0; i < [clist count]; i++) {
id child = [clist objectAtIndex:i];
if ([child conformsToProtocol:@protocol(NSAccessibilityNavigableStaticText)]) {
NSAccessibilityPostNotification(child, NSAccessibilityLayoutChangedNotification);
[child suppressEditUpdates];
}
}
}
@end

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,6 @@ import java.util.ServiceLoader;
import javax.print.attribute.AttributeSet;
import sun.awt.AppContext;
/**
* Implementations of this class provide lookup services for print services
@ -58,35 +57,14 @@ public abstract class PrintServiceLookup {
protected PrintServiceLookup() {}
/**
* Contains a lists of services.
* The list of lookup services.
*/
static class Services {
/**
* The list of lookup services.
*/
private ArrayList<PrintServiceLookup> listOfLookupServices = null;
/**
* The list of registered services.
*/
private ArrayList<PrintService> registeredServices = null;
}
private static ArrayList<PrintServiceLookup> listOfLookupServices = null;
/**
* Returns the services from the current appcontext.
*
* @return the services
* The list of registered services.
*/
private static Services getServicesForContext() {
Services services =
(Services)AppContext.getAppContext().get(Services.class);
if (services == null) {
services = new Services();
AppContext.getAppContext().put(Services.class, services);
}
return services;
}
private static ArrayList<PrintService> registeredServices = null;
/**
* Returns the list of lookup services.
@ -94,7 +72,7 @@ public abstract class PrintServiceLookup {
* @return the list of lookup services
*/
private static ArrayList<PrintServiceLookup> getListOfLookupServices() {
return getServicesForContext().listOfLookupServices;
return listOfLookupServices;
}
/**
@ -103,8 +81,7 @@ public abstract class PrintServiceLookup {
* @return the list of lookup services
*/
private static ArrayList<PrintServiceLookup> initListOfLookupServices() {
ArrayList<PrintServiceLookup> listOfLookupServices = new ArrayList<>();
getServicesForContext().listOfLookupServices = listOfLookupServices;
listOfLookupServices = new ArrayList<>();
return listOfLookupServices;
}
@ -114,7 +91,7 @@ public abstract class PrintServiceLookup {
* @return the list of registered services
*/
private static ArrayList<PrintService> getRegisteredServices() {
return getServicesForContext().registeredServices;
return registeredServices;
}
/**
@ -123,8 +100,7 @@ public abstract class PrintServiceLookup {
* @return the list of registered services
*/
private static ArrayList<PrintService> initRegisteredServices() {
ArrayList<PrintService> registeredServices = new ArrayList<>();
getServicesForContext().registeredServices = registeredServices;
registeredServices = new ArrayList<>();
return registeredServices;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,8 +33,6 @@ import java.util.ServiceLoader;
import javax.print.attribute.PrintRequestAttributeSet;
import sun.awt.AppContext;
/**
* A {@code StreamPrintServiceFactory} is the factory for
* {@link StreamPrintService} instances, which can print to an output stream in
@ -63,24 +61,20 @@ public abstract class StreamPrintServiceFactory {
static class Services {
/**
* The list of factories which will be stored per appcontext.
* The list of factories.
*/
private ArrayList<StreamPrintServiceFactory> listOfFactories = null;
}
private static final Services SERVICES = new Services();
/**
* Returns the services from the current appcontext.
* Returns the singleton Services instance.
*
* @return the services
*/
private static Services getServices() {
Services services =
(Services)AppContext.getAppContext().get(Services.class);
if (services == null) {
services = new Services();
AppContext.getAppContext().put(Services.class, services);
}
return services;
return SERVICES;
}
/**

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -62,9 +62,6 @@ import javax.accessibility.*;
import javax.print.attribute.*;
import sun.awt.AppContext;
import sun.swing.PrintingStatus;
import sun.swing.SwingUtilities2;
import sun.swing.text.TextComponentPrintable;
@ -1097,22 +1094,16 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
return getKeymapTable().get(nm);
}
private static HashMap<String,Keymap> getKeymapTable() {
synchronized (KEYMAP_TABLE) {
AppContext appContext = AppContext.getAppContext();
@SuppressWarnings("unchecked")
HashMap<String,Keymap> keymapTable =
(HashMap<String,Keymap>)appContext.get(KEYMAP_TABLE);
if (keymapTable == null) {
keymapTable = new HashMap<String,Keymap>(17);
appContext.put(KEYMAP_TABLE, keymapTable);
//initialize default keymap
Keymap binding = addKeymap(DEFAULT_KEYMAP, null);
binding.setDefaultAction(new
DefaultEditorKit.DefaultKeyTypedAction());
}
return keymapTable;
private static HashMap<String,Keymap> keymapTable;
private static synchronized HashMap<String,Keymap> getKeymapTable() {
if (keymapTable == null) {
keymapTable = new HashMap<String,Keymap>(17);
//initialize default keymap
Keymap binding = addKeymap(DEFAULT_KEYMAP, null);
binding.setDefaultAction(new DefaultEditorKit.DefaultKeyTypedAction());
}
return keymapTable;
}
/**
@ -1653,7 +1644,7 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
public void removeNotify() {
super.removeNotify();
if (getFocusedComponent() == this) {
AppContext.getAppContext().remove(FOCUSED_COMPONENT);
focusedComponent = null;
}
}
@ -4084,13 +4075,14 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
}
}
private static JTextComponent focusedComponent;
/**
* Returns the JTextComponent that most recently had focus. The returned
* value may currently have focus.
*/
static final JTextComponent getFocusedComponent() {
return (JTextComponent)AppContext.getAppContext().
get(FOCUSED_COMPONENT);
return focusedComponent;
}
@SuppressWarnings("deprecation")
@ -4105,9 +4097,6 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
return modifiers;
}
private static final Object KEYMAP_TABLE =
new StringBuilder("JTextComponent_KeymapTable");
//
// member variables used for on-the-spot input method
// editing style support
@ -4438,9 +4427,6 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
}
}
private static final Object FOCUSED_COMPONENT =
new StringBuilder("JTextComponent_FocusedComponent");
/**
* The default keymap that will be shared by all
* <code>JTextComponent</code> instances unless they
@ -4493,8 +4479,7 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
// --- FocusListener methods -----------------------------------
public void focusGained(FocusEvent fe) {
AppContext.getAppContext().put(FOCUSED_COMPONENT,
fe.getSource());
focusedComponent = (JTextComponent)fe.getSource();
}
public void focusLost(FocusEvent fe) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,6 @@
package javax.swing.text;
import java.util.Vector;
import sun.awt.AppContext;
/**
* A queue of text layout tasks.
@ -36,11 +35,11 @@ import sun.awt.AppContext;
*/
public class LayoutQueue {
private static final Object DEFAULT_QUEUE = new Object();
private Vector<Runnable> tasks;
private Thread worker;
private static LayoutQueue defaultQueue;
/**
* Construct a layout queue.
*/
@ -53,15 +52,10 @@ public class LayoutQueue {
* @return the default layout queue
*/
public static LayoutQueue getDefaultQueue() {
AppContext ac = AppContext.getAppContext();
synchronized (DEFAULT_QUEUE) {
LayoutQueue defaultQueue = (LayoutQueue) ac.get(DEFAULT_QUEUE);
if (defaultQueue == null) {
defaultQueue = new LayoutQueue();
ac.put(DEFAULT_QUEUE, defaultQueue);
}
return defaultQueue;
if (defaultQueue == null) {
defaultQueue = new LayoutQueue();
}
return defaultQueue;
}
/**
@ -70,9 +64,7 @@ public class LayoutQueue {
* @param q the new queue.
*/
public static void setDefaultQueue(LayoutQueue q) {
synchronized (DEFAULT_QUEUE) {
AppContext.getAppContext().put(DEFAULT_QUEUE, q);
}
defaultQueue = q;
}
/**

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -92,7 +92,6 @@ import javax.swing.text.ViewFactory;
import javax.swing.text.html.parser.ParserDelegator;
import sun.swing.SwingAccessor;
import sun.awt.AppContext;
import static java.nio.charset.StandardCharsets.ISO_8859_1;
@ -432,11 +431,7 @@ public class HTMLEditorKit extends StyledEditorKit implements Accessible {
* @param s a StyleSheet
*/
public void setStyleSheet(StyleSheet s) {
if (s == null) {
AppContext.getAppContext().remove(DEFAULT_STYLES_KEY);
} else {
AppContext.getAppContext().put(DEFAULT_STYLES_KEY, s);
}
defaultStyles = s;
}
/**
@ -448,12 +443,8 @@ public class HTMLEditorKit extends StyledEditorKit implements Accessible {
* @return the StyleSheet
*/
public StyleSheet getStyleSheet() {
AppContext appContext = AppContext.getAppContext();
StyleSheet defaultStyles = (StyleSheet) appContext.get(DEFAULT_STYLES_KEY);
if (defaultStyles == null) {
defaultStyles = new StyleSheet();
appContext.put(DEFAULT_STYLES_KEY, defaultStyles);
try (InputStream is = HTMLEditorKit.getResourceAsStream(DEFAULT_CSS);
InputStreamReader isr = new InputStreamReader(is, ISO_8859_1);
Reader r = new BufferedReader(isr))
@ -692,6 +683,7 @@ public class HTMLEditorKit extends StyledEditorKit implements Accessible {
private static final ViewFactory defaultFactory = new HTMLFactory();
MutableAttributeSet input;
private static StyleSheet defaultStyles = null;
private static final Object DEFAULT_STYLES_KEY = new Object();
private LinkController linkHandler = new LinkController();
private static Parser defaultParser = null;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,8 +25,6 @@
package javax.swing.text.html.parser;
import sun.awt.AppContext;
import java.io.PrintStream;
import java.io.File;
import java.io.FileInputStream;
@ -403,11 +401,6 @@ class DTD implements DTDConstants {
return name;
}
/**
* The hashtable key of DTDs in AppContext.
*/
private static final Object DTD_HASH_KEY = new Object();
/**
* Put a name and appropriate DTD to hashtable.
*
@ -415,7 +408,7 @@ class DTD implements DTDConstants {
* @param dtd the DTD
*/
public static void putDTDHash(String name, DTD dtd) {
getDtdHash().put(name, dtd);
DTD_MAP.put(name, dtd);
}
/**
@ -430,27 +423,14 @@ class DTD implements DTDConstants {
*/
public static DTD getDTD(String name) throws IOException {
name = name.toLowerCase();
DTD dtd = getDtdHash().get(name);
DTD dtd = DTD_MAP.get(name);
if (dtd == null)
dtd = new DTD(name);
return dtd;
}
private static Hashtable<String, DTD> getDtdHash() {
AppContext appContext = AppContext.getAppContext();
@SuppressWarnings("unchecked")
Hashtable<String, DTD> result = (Hashtable<String, DTD>) appContext.get(DTD_HASH_KEY);
if (result == null) {
result = new Hashtable<String, DTD>();
appContext.put(DTD_HASH_KEY, result);
}
return result;
}
private static final Hashtable<String, DTD> DTD_MAP = new Hashtable<String, DTD>();
/**
* Recreates a DTD from an archived format.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,7 +28,6 @@ package javax.swing.text.html.parser;
import java.io.Serializable;
import java.util.BitSet;
import java.util.Map;
import sun.awt.AppContext;
/**
* An element as described in a DTD using the ELEMENT construct.
@ -107,17 +106,14 @@ public final class Element implements DTDConstants, Serializable {
this.name = name;
this.index = index;
if (index > getMaxIndex()) {
AppContext.getAppContext().put(MAX_INDEX_KEY, index);
maxIndex = index;
}
}
private static final Object MAX_INDEX_KEY = new Object();
private static int maxIndex = 0;
static int getMaxIndex() {
Integer value = (Integer) AppContext.getAppContext().get(MAX_INDEX_KEY);
return (value != null)
? value.intValue()
: 0;
return maxIndex;
}
/**

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,8 +24,6 @@
*/
package javax.swing.text.html.parser;
import sun.awt.AppContext;
import javax.swing.text.html.HTMLEditorKit;
import java.io.BufferedInputStream;
import java.io.IOException;
@ -52,7 +50,8 @@ import java.io.Serializable;
*/
@SuppressWarnings("serial") // Same-version serialization only
public class ParserDelegator extends HTMLEditorKit.Parser implements Serializable {
private static final Object DTD_KEY = new Object();
private static DTD dtd = null;
/**
* Sets the default DTD.
@ -62,10 +61,6 @@ public class ParserDelegator extends HTMLEditorKit.Parser implements Serializabl
}
private static synchronized DTD getDefaultDTD() {
AppContext appContext = AppContext.getAppContext();
DTD dtd = (DTD) appContext.get(DTD_KEY);
if (dtd == null) {
DTD _dtd = null;
// (PENDING) Hate having to hard code!
@ -77,10 +72,7 @@ public class ParserDelegator extends HTMLEditorKit.Parser implements Serializabl
System.out.println("Throw an exception: could not get default dtd: " + nm);
}
dtd = createDTD(_dtd, nm);
appContext.put(DTD_KEY, dtd);
}
return dtd;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -227,7 +227,7 @@ Java_sun_awt_image_ImageRepresentation_setICMpixels(JNIEnv *env, jclass cls,
/* Release the locked arrays */
(*env)->ReleasePrimitiveArrayCritical(env, jlut, srcLUT, JNI_ABORT);
(*env)->ReleasePrimitiveArrayCritical(env, jpix, srcData, JNI_ABORT);
(*env)->ReleasePrimitiveArrayCritical(env, jdata, dstData, JNI_ABORT);
(*env)->ReleasePrimitiveArrayCritical(env, jdata, dstData, 0);
return JNI_TRUE;
}
@ -385,7 +385,7 @@ Java_sun_awt_image_ImageRepresentation_setDiffICM(JNIEnv *env, jclass cls,
}
(*env)->ReleasePrimitiveArrayCritical(env, jpix, srcData, JNI_ABORT);
(*env)->ReleasePrimitiveArrayCritical(env, jdata, dstData, JNI_ABORT);
(*env)->ReleasePrimitiveArrayCritical(env, jdata, dstData, 0);
return JNI_TRUE;
}

View File

@ -174,6 +174,30 @@ public final class AttributeClass {
}
/**
* Returns 3 int values.
* xres, yres, resolution as either dpi or dpcm
* The resolution is just a single byte of data.
*/
public int[] getIntResolutionValue() {
int[] res = {0, 0, 0};
byte[] bufArray = (byte[])myValue;
if (bufArray != null) {
int nBytes = 4; // 32-bit signed integer
for (int j=0; j<2; j++) { // 2 set of integers
byte[] intBytes = new byte[nBytes];
// REMIND: # bytes should be 8
for (int i=0; i< nBytes; i++) {
//+ 1 because the 1st byte is length
intBytes[i] = bufArray[i+(4*j)+1];
}
res[j] = convertToInt(intBytes);
}
res[2] = (int)bufArray[9];
}
return res;
}
/**
* Returns String value.
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -141,7 +141,7 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
private MediaSizeName[] mediaSizeNames;
private CustomMediaSizeName[] customMediaSizeNames;
private int defaultMediaIndex;
private int[] rawResolutions = null;
private int[] ppdResolutions = null;
private PrinterResolution[] printerResolutions = null;
private boolean isCupsPrinter;
private boolean init;
@ -205,8 +205,7 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
OrientationRequested.PORTRAIT,
new PageRanges(1),
//PresentationDirection,
// CUPS does not supply printer-resolution attribute
//new PrinterResolution(300, 300, PrinterResolution.DPI),
new PrinterResolution(300, 300, PrinterResolution.DPI),
//PrintQuality.NORMAL,
new RequestingUserName("", Locale.getDefault()),
//SheetCollate.UNCOLLATED, //CUPS has no sheet collate?
@ -467,7 +466,9 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
: getSupportedOutputBins();
customMediaSizeNames = cps.getCustomMediaSizeNames();
defaultMediaIndex = cps.getDefaultMediaIndex();
rawResolutions = cps.getRawResolutions();
if (ppdResolutions == null) {
ppdResolutions = cps.getRawResolutions();
}
}
urlConnection.disconnect();
init = true;
@ -821,14 +822,7 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
}
}
} else if (category == PrinterResolution.class) {
PrinterResolution[] supportedRes = getPrintResolutions();
if (supportedRes == null) {
return null;
}
PrinterResolution []arr =
new PrinterResolution[supportedRes.length];
System.arraycopy(supportedRes, 0, arr, 0, supportedRes.length);
return arr;
return getPrintResolutions();
} else if (category == OutputBin.class) {
return Arrays.copyOf(outputBins, outputBins.length);
}
@ -1137,8 +1131,6 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
catList.add(Chromaticity.class);
}
// CUPS does not report printer resolution via IPP but it
// may be gleaned from the PPD.
PrinterResolution[] supportedRes = getPrintResolutions();
if (supportedRes != null && (supportedRes.length > 0)) {
catList.add(PrinterResolution.class);
@ -1264,7 +1256,6 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
}
}
@Override
public synchronized PrintServiceAttributeSet getAttributes() {
if (!init) {
@ -1684,9 +1675,7 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
} else if (category == PrinterResolution.class) {
PrinterResolution[] supportedRes = getPrintResolutions();
if ((supportedRes != null) && (supportedRes.length > 0)) {
return supportedRes[0];
} else {
return new PrinterResolution(300, 300, PrinterResolution.DPI);
return supportedRes[0];
}
} else if (category == OutputBin.class) {
if (attribClass != null) {
@ -1697,26 +1686,40 @@ public final class IPPPrintService implements PrintService, SunPrinterJobService
return null;
}
/* Called only from contexts that have called initAttributes().
* Try IPP first, and if that produces nothing, fall back to the PPD
*/
private PrinterResolution[] getPrintResolutions() {
int[] rawResolutions = null;
if (printerResolutions == null) {
if (rawResolutions == null) {
printerResolutions = new PrinterResolution[0];
} else {
int numRes = rawResolutions.length / 2;
PrinterResolution[] pres = new PrinterResolution[numRes];
for (int i=0; i < numRes; i++) {
pres[i] = new PrinterResolution(rawResolutions[i*2],
rawResolutions[i*2+1],
PrinterResolution.DPI);
}
printerResolutions = pres;
AttributeClass attribClass = (getAttMap != null) ?
getAttMap.get("printer-resolution-supported")
: null;
if (attribClass != null) {
rawResolutions = attribClass.getIntResolutionValue();
}
if (rawResolutions == null) {
rawResolutions = ppdResolutions;
}
if (rawResolutions == null) {
rawResolutions = new int[] { 300, 300, 3 } ;
}
int numRes = rawResolutions.length / 3;
PrinterResolution[] pres = new PrinterResolution[numRes];
for (int i = 0; i < numRes; i++) {
int units = (rawResolutions[i*3+2] == 4) ? PrinterResolution.DPCM : PrinterResolution.DPI;
pres[i] = new PrinterResolution(rawResolutions[i*3],
rawResolutions[i*3+1],
units);
}
printerResolutions = pres;
}
return printerResolutions;
return printerResolutions.clone();
}
private boolean isSupportedResolution(PrinterResolution res) {
PrinterResolution[] supportedRes = getPrintResolutions();
PrinterResolution[] supportedRes =
(PrinterResolution[])getSupportedAttributeValues(PrinterResolution.class, null, null);
if (supportedRes != null) {
for (int i=0; i<supportedRes.length; i++) {
if (res.equals(supportedRes[i])) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,6 +52,8 @@ typedef ppd_file_t* (*fn_ppdOpenFile)(const char *);
typedef void (*fn_ppdClose)(ppd_file_t *);
typedef ppd_option_t* (*fn_ppdFindOption)(ppd_file_t *, const char *);
typedef ppd_size_t* (*fn_ppdPageSize)(ppd_file_t *, char *);
typedef ppd_attr_t* (*fn_ppdFindAttr)(ppd_file_t *, const char *name, const char *spec);
typedef ppd_attr_t* (*fn_ppdFindNextAttr)(ppd_file_t *, const char *name, const char *spec);
fn_cupsServer j2d_cupsServer;
fn_ippPort j2d_ippPort;
@ -64,6 +66,8 @@ fn_cupsFreeDests j2d_cupsFreeDests;
fn_ppdOpenFile j2d_ppdOpenFile;
fn_ppdClose j2d_ppdClose;
fn_ppdFindOption j2d_ppdFindOption;
fn_ppdFindAttr j2d_ppdFindAttr;
fn_ppdFindNextAttr j2d_ppdFindNextAttr;
fn_ppdPageSize j2d_ppdPageSize;
@ -152,6 +156,18 @@ Java_sun_print_CUPSPrinter_initIDs(JNIEnv *env,
return JNI_FALSE;
}
j2d_ppdFindAttr = (fn_ppdFindAttr)dlsym(handle, "ppdFindAttr");
if (j2d_ppdFindAttr == NULL) {
dlclose(handle);
return JNI_FALSE;
}
j2d_ppdFindNextAttr = (fn_ppdFindNextAttr)dlsym(handle, "ppdFindNextAttr");
if (j2d_ppdFindNextAttr == NULL) {
dlclose(handle);
return JNI_FALSE;
}
j2d_ppdPageSize = (fn_ppdPageSize)dlsym(handle, "ppdPageSize");
if (j2d_ppdPageSize == NULL) {
dlclose(handle);
@ -636,6 +652,9 @@ Java_sun_print_CUPSPrinter_getResolutions(JNIEnv *env,
return;
}
// IPP value of 3 means DPI, 4 means dpcm
jobject dpi = (*env)->NewObject(env, intCls, intCtr, 3);
CHECK_NULL(dpi);
// NOTE: cupsGetPPD returns a pointer to a filename of a temporary file.
// unlink() must be called to remove the file after using it.
@ -672,6 +691,7 @@ Java_sun_print_CUPSPrinter_getResolutions(JNIEnv *env,
CHECK_NULL(ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, rxObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, dpi);
}
for (i = 0; i < resolution->num_choices; i++) {
@ -700,6 +720,41 @@ Java_sun_print_CUPSPrinter_getResolutions(JNIEnv *env,
CHECK_NULL(ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, rxObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, dpi);
}
}
} else {
ppd_attr_t *defresolution = j2d_ppdFindAttr(ppd, "DefaultResolution", NULL);
if (defresolution == NULL) {
defresolution = j2d_ppdFindAttr(ppd, "Resolution", NULL);
}
if (defresolution != NULL) {
int matches = sscanf(defresolution->value, "%dx%ddpi", &defx, &defy);
if (matches == 2) {
if (defx <= 0 || defy <= 0) {
defx = 0;
defy = 0;
}
} else {
matches = sscanf(defresolution->value, "%ddpi", &defx);
if (matches == 1) {
if (defx <= 0) {
defx = 0;
} else {
defy = defx;
}
}
}
if (defx > 0) {
jobject rxObj, ryObj;
rxObj = (*env)->NewObject(env, intCls, intCtr, defx);
CHECK_NULL(rxObj);
ryObj = (*env)->NewObject(env, intCls, intCtr, defy);
CHECK_NULL(ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, rxObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, ryObj);
(*env)->CallBooleanMethod(env, arrayList, arrListAddMID, dpi);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -550,14 +550,33 @@ uintptr_t search_symbol(struct symtab* symtab, uintptr_t base,
return (uintptr_t) NULL;
}
static bool is_in(uintptr_t offset, struct elf_symbol* sym) {
if (sym->size == 0 && offset == sym->offset) {
// offset points to the top of the symbol.
// Some functions have size 0. For example, __restore_rt() (signal trampoline
// in glibc) would be detected as out of the function incorrectly, even if it
// points to the top of the instruction address, because the size of
// __restore_rt() is 0 (you can see this with "readelf -s libc.so.6" when
// debug symbols are available).
// Hence we need to treat this as a special case if the function size is 0,
// only the exact symbol address should be treated as inside.
return true;
} else if (offset >= sym->offset && offset < sym->offset + sym->size) {
// offset is in address range of the symbol
return true;
}
// offset is out of address range of the symbol
return false;
}
const char* nearest_symbol(struct symtab* symtab, uintptr_t offset,
uintptr_t* poffset) {
int n = 0;
if (!symtab) return NULL;
for (; n < symtab->num_symbols; n++) {
struct elf_symbol* sym = &(symtab->symbols[n]);
if (sym->name != NULL &&
offset >= sym->offset && offset < sym->offset + sym->size) {
if (sym->name != NULL && is_in(offset, sym)) {
if (poffset) *poffset = (offset - sym->offset);
return sym->name;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,14 @@ import sun.jvm.hotspot.debugger.cdbg.*;
by the architecture-specific subpackages. */
public interface LinuxDebugger extends JVMDebugger {
// SIGHANDLER_NAMES holds the name of signal handler.
public static final List<String> SIGHANDLER_NAMES = List.of(
// For AMD64
// - sysdeps/unix/sysv/linux/x86_64/libc_sigaction.c in glibc
// - gdb/amd64-linux-tdep.c in GDB
"__restore_rt"
);
public String addressValueToString(long address) throws DebuggerException;
public boolean readJBoolean(long address) throws DebuggerException;
public byte readJByte(long address) throws DebuggerException;
@ -52,6 +60,7 @@ public interface LinuxDebugger extends JVMDebugger {
public long[] getThreadIntegerRegisterSet(int lwp_id) throws DebuggerException;
public long getAddressValue(Address addr) throws DebuggerException;
public Address findLibPtrByAddress(Address pc);
public boolean isSignalTrampoline(Address pc);
// For LinuxCDebugger
public List<ThreadProxy> getThreadList();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,6 +130,12 @@ public class LinuxDebuggerLocal extends DebuggerBase implements LinuxDebugger {
: new LinuxAddress(this, ptr);
}
@Override
public boolean isSignalTrampoline(Address pc) {
var sym = lookup(getAddressValue(pc));
return sym == null ? false : SIGHANDLER_NAMES.contains(sym.getName());
}
// Note on Linux threads are really processes. When target process is
// attached by a serviceability agent thread, only that thread can do
// ptrace operations on the target. This is because from kernel's point

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,9 +24,12 @@
package sun.jvm.hotspot.debugger.linux.amd64;
import java.util.function.Function;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.debugger.linux.*;
import sun.jvm.hotspot.debugger.linux.amd64.*;
import sun.jvm.hotspot.debugger.cdbg.*;
import sun.jvm.hotspot.debugger.cdbg.basic.*;
import sun.jvm.hotspot.runtime.*;
@ -34,6 +37,36 @@ import sun.jvm.hotspot.runtime.amd64.*;
public final class LinuxAMD64CFrame extends BasicCFrame {
private static LinuxAMD64CFrame getFrameFromReg(LinuxDebugger dbg, Function<Integer, Address> getreg) {
Address rip = getreg.apply(AMD64ThreadContext.RIP);
Address rsp = getreg.apply(AMD64ThreadContext.RSP);
Address libptr = dbg.findLibPtrByAddress(rip);
Address cfa = getreg.apply(AMD64ThreadContext.RBP);
DwarfParser dwarf = null;
if (libptr != null) { // Native frame
dwarf = new DwarfParser(libptr);
try {
dwarf.processDwarf(rip);
} catch (DebuggerException e) {
// DWARF processing should succeed when the frame is native
// but it might fail if Common Information Entry (CIE) has language
// personality routine and/or Language Specific Data Area (LSDA).
return new LinuxAMD64CFrame(dbg, rsp, cfa, rip, dwarf, true);
}
cfa = getreg.apply(dwarf.getCFARegister())
.addOffsetTo(dwarf.getCFAOffset());
}
return (cfa == null) ? null
: new LinuxAMD64CFrame(dbg, rsp, cfa, rip, dwarf);
}
public static LinuxAMD64CFrame getTopFrame(LinuxDebugger dbg, Address rip, ThreadContext context) {
return getFrameFromReg(dbg, context::getRegisterAsAddress);
}
public static LinuxAMD64CFrame getTopFrame(LinuxDebugger dbg, Address rsp, Address rip, ThreadContext context) {
Address libptr = dbg.findLibPtrByAddress(rip);
Address cfa = context.getRegisterAsAddress(AMD64ThreadContext.RBP);
@ -80,8 +113,12 @@ public final class LinuxAMD64CFrame extends BasicCFrame {
// override base class impl to avoid ELF parsing
public ClosestSymbol closestSymbolToPC() {
Address symAddr = use1ByteBeforeToLookup ? pc().addOffsetTo(-1) : pc();
// try native lookup in debugger.
return dbg.lookup(dbg.getAddressValue(symAddr));
// Returns a special symbol if the address is signal handler,
// otherwise returns closest symbol generated by LinuxDebugger.
return dbg.isSignalTrampoline(symAddr)
? new ClosestSymbol("<signal handler called>", 0)
: dbg.lookup(dbg.getAddressValue(symAddr));
}
public Address pc() {
@ -159,7 +196,12 @@ public final class LinuxAMD64CFrame extends BasicCFrame {
return null;
}
return isValidFrame(nextCFA, isNative) ? nextCFA : null;
if (dbg.isSignalTrampoline(senderPC)) {
// Return without frame check if sender is signal trampoline.
return nextCFA;
} else {
return isValidFrame(nextCFA, isNative) ? nextCFA : null;
}
}
@Override
@ -173,6 +215,12 @@ public final class LinuxAMD64CFrame extends BasicCFrame {
return null;
}
if (dbg.isSignalTrampoline(pc())) {
// RSP points signal context
// https://github.com/torvalds/linux/blob/v6.17/arch/x86/kernel/signal.c#L94
return getFrameFromReg(dbg, r -> LinuxAMD64ThreadContext.getRegFromSignalTrampoline(this.rsp, r.intValue()));
}
ThreadContext context = th.getContext();
Address nextRSP = sp != null ? sp : getNextRSP();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@ package sun.jvm.hotspot.debugger.linux.amd64;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.debugger.amd64.*;
import sun.jvm.hotspot.debugger.linux.*;
import sun.jvm.hotspot.runtime.*;
public class LinuxAMD64ThreadContext extends AMD64ThreadContext {
private LinuxDebugger debugger;
@ -43,4 +44,23 @@ public class LinuxAMD64ThreadContext extends AMD64ThreadContext {
public Address getRegisterAsAddress(int index) {
return debugger.newAddress(getRegister(index));
}
public static Address getRegFromSignalTrampoline(Address sp, int index) {
// ucontext_t is located at top of stack.
// See definition of rt_sigframe in arch/x86/include/asm/sigframe.h
// in Linux Kernel.
Address addrUCMContext = sp.addOffsetTo(40); // offsetof(ucontext_t, uc_mcontext) = 40
Address addrGRegs = addrUCMContext; // gregs is located at top of ucontext_t
// They are from sys/ucontext.h
final int REG_RBP = 10;
final int REG_RSP = 15;
final int REG_RIP = 16;
return switch(index) {
case AMD64ThreadContext.RBP -> addrGRegs.getAddressAt(REG_RBP * VM.getVM().getAddressSize());
case AMD64ThreadContext.RSP -> addrGRegs.getAddressAt(REG_RSP * VM.getVM().getAddressSize());
case AMD64ThreadContext.RIP -> addrGRegs.getAddressAt(REG_RIP * VM.getVM().getAddressSize());
default -> throw new IllegalArgumentException("Unsupported register index: " + index);
};
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,6 @@ import com.sun.source.tree.NewClassTree;
import com.sun.source.tree.Scope;
import com.sun.source.tree.Tree;
import com.sun.source.tree.Tree.Kind;
import static com.sun.source.tree.Tree.Kind.METHOD;
import com.sun.source.tree.TypeParameterTree;
import com.sun.source.tree.VariableTree;
import com.sun.source.tree.YieldTree;
@ -81,6 +80,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.function.Predicate;
import java.util.TreeSet;
import javax.lang.model.element.Element;
import javax.lang.model.element.ElementKind;
@ -114,7 +114,6 @@ import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.BiConsumer;
@ -149,14 +148,10 @@ import javax.tools.JavaFileManager.Location;
import javax.tools.StandardLocation;
import jdk.jshell.ExpressionToTypeInfo.ExpressionInfo;
import static jdk.jshell.Util.REPL_DOESNOTMATTER_CLASS_NAME;
import static jdk.jshell.SourceCodeAnalysis.Completeness.DEFINITELY_INCOMPLETE;
import static jdk.jshell.TreeDissector.printType;
import static java.util.stream.Collectors.joining;
import static javax.lang.model.element.ElementKind.CONSTRUCTOR;
import static javax.lang.model.element.ElementKind.MODULE;
import static javax.lang.model.element.ElementKind.PACKAGE;
import javax.lang.model.type.IntersectionType;
import javax.lang.model.util.Elements;
@ -815,7 +810,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
};
String wrappedCode = codeWrap.wrapped();
return this.proc.taskFactory.analyze(codeWrap, task -> {
List<Highlight> result = new ArrayList<>();
TreeSet<Highlight> result = new TreeSet<>(Comparator.comparing(Highlight::start).thenComparing(Highlight::end));
CompilationUnitTree cut = task.cuTrees().iterator().next();
Trees trees = task.trees();
SourcePositions sp = trees.getSourcePositions();
@ -1050,8 +1045,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
}
}.scan(cut, null);
result.removeIf(h -> h.start() == h.end());
Collections.sort(result, (h1, h2) -> h1.start() - h2.start());
return result;
return new ArrayList<>(result);
});
}

View File

@ -201,7 +201,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_one_old_region) {
size_t garbage = make_garbage_above_collection_threshold(10);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(10UL));
EXPECT_EQ(garbage, _collection_set->get_old_garbage());
@ -214,7 +216,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, prime_many_old_regions) {
size_t g1 = make_garbage_above_collection_threshold(100);
size_t g2 = make_garbage_above_collection_threshold(101);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(100UL, 101UL));
EXPECT_EQ(g1 + g2, _collection_set->get_old_garbage());
@ -226,7 +230,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, require_multiple_mixed_evacuations) {
size_t garbage = create_too_much_garbage_for_one_mixed_evacuation();
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_LT(_collection_set->get_old_garbage(), garbage);
EXPECT_GT(_heuristics->unprocessed_old_collection_candidates(), 0UL);
@ -248,7 +254,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) {
ASSERT_EQ(3UL, _heuristics->unprocessed_old_collection_candidates());
// Here the region is still pinned, so it cannot be added to the collection set.
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
// The two unpinned regions should be added to the collection set and the pinned
// region should be retained at the front of the list of candidates as it would be
@ -261,7 +269,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, skip_pinned_regions) {
// the now unpinned region should be added to the collection set.
make_unpinned(1);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_EQ(_collection_set->get_old_garbage(), g2);
EXPECT_TRUE(collection_set_is(1UL));
@ -278,14 +288,18 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_first) {
make_pinned(0);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(1UL, 2UL));
EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 1UL);
make_unpinned(0);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL));
EXPECT_EQ(_heuristics->unprocessed_old_collection_candidates(), 0UL);
@ -301,7 +315,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) {
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL, 1UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g2);
@ -309,7 +325,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, pinned_region_is_last) {
make_unpinned(2);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(2UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g3);
@ -327,7 +345,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) {
make_pinned(0);
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(1UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g2);
@ -336,7 +356,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, unpinned_region_is_middle) {
make_unpinned(0);
make_unpinned(2);
_collection_set->clear();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
EXPECT_TRUE(collection_set_is(0UL, 2UL));
EXPECT_EQ(_collection_set->get_old_garbage(), g1 + g3);
@ -354,7 +376,9 @@ TEST_VM_F(ShenandoahOldHeuristicTest, all_candidates_are_pinned) {
make_pinned(1);
make_pinned(2);
_heuristics->prepare_for_old_collections();
_heuristics->prime_collection_set(_collection_set);
if (_heuristics->prime_collection_set(_collection_set)) {
_heuristics->finalize_mixed_evacs();
}
// In the case when all candidates are pinned, we want to abandon
// this set of mixed collection candidates so that another old collection

View File

@ -1,3 +1,32 @@
#
# Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#############################################################################
#
# List of quarantined tests for testing in AOT_JDK mode.
#
#############################################################################
runtime/modules/PatchModule/PatchModuleClassList.java 0000000 generic-all
runtime/NMT/NMTWithCDS.java 0000000 generic-all
runtime/symbols/TestSharedArchiveConfigFile.java 0000000 generic-all

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8374862
* @summary Regression test for -XX:+Verbose -XX:+WizardMode -XX:+PrintDeoptimizationDetails crash
* @requires vm.debug
* @run main/othervm -XX:+Verbose -XX:+WizardMode -XX:+PrintDeoptimizationDetails compiler.uncommontrap.TestDeoptDetailsLockRank
*/
package compiler.uncommontrap;
public class TestDeoptDetailsLockRank {
public static void main(String[] args) {
System.out.println("passed");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -143,6 +143,7 @@ public class TestJcmd {
sb.append(String.format("FROM %s:%s\n", DockerfileConfig.getBaseImageName(),
DockerfileConfig.getBaseImageVersion()));
sb.append("COPY /jdk /jdk\n");
sb.append("ENV LANG=C.UTF-8\n");
sb.append("ENV JAVA_HOME=/jdk\n");
if (!IS_PODMAN) { // only needed for docker

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,6 +55,8 @@ public class TestStressG1Uncommit {
public static void main(String[] args) throws Exception {
ArrayList<String> options = new ArrayList<>();
Collections.addAll(options,
"-XX:MinHeapFreeRatio=40",
"-XX:MaxHeapFreeRatio=70",
"-Xlog:gc,gc+heap+region=debug",
"-XX:+UseG1GC",
"-Xmx1g",

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @bug 8304147
* @summary make sure dynamic archive does not archive array classes with incorrect values in
* Array::_secondary_supers
* @requires vm.cds
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build ArraySuperTest jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar ArraySuperApp.jar ArraySuperApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. ArraySuperTest
*/
import java.util.function.Predicate;
import jdk.test.lib.helpers.ClassFileInstaller;
public class ArraySuperTest extends DynamicArchiveTestBase {
public static void main(String[] args) throws Exception {
runTest(ArraySuperTest::test);
}
static void test() throws Exception {
String topArchiveName = getNewArchiveName();
String appJar = ClassFileInstaller.getJarPath("ArraySuperApp.jar");
String mainClass = ArraySuperApp.class.getName();
dump(topArchiveName, "-cp", appJar, mainClass).assertNormalExit();
run(topArchiveName, "-cp", appJar, "-Xshare:off", mainClass, "withDynamicArchive").assertNormalExit();
run(topArchiveName, "-cp", appJar, mainClass, "withDynamicArchive").assertNormalExit();
}
}
class ArraySuperApp implements Predicate {
static volatile Object array;
public boolean test(Object o) {
return true;
}
static void main(String args[]) {
array = new ArraySuperApp[1];
if (args.length > 0) {
Predicate[] p = new Predicate[0];
System.out.println(p.getClass().isInstance(array));
p = (Predicate[])array;
p[0] = new ArraySuperApp();
System.out.println("All tests passed");
}
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026, NTT DATA
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import jdk.test.lib.JDKToolFinder;
import jdk.test.lib.JDKToolLauncher;
import jdk.test.lib.SA.SATestUtils;
import jdk.test.lib.Utils;
import jdk.test.lib.apps.LingeredApp;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.util.CoreUtils;
/**
* @test
* @bug 8374482
* @requires (os.family == "linux") & (vm.hasSA)
* @requires os.arch == "amd64"
* @library /test/lib
* @run driver TestJhsdbJstackMixedCore
*/
public class TestJhsdbJstackMixedCore {
private static void runJstackMixed(String coreFileName) throws Exception {
JDKToolLauncher launcher = JDKToolLauncher.createUsingTestJDK("jhsdb");
launcher.addVMArgs(Utils.getTestJavaOpts());
launcher.addToolArg("jstack");
launcher.addToolArg("--mixed");
launcher.addToolArg("--exe");
launcher.addToolArg(JDKToolFinder.getTestJDKTool("java"));
launcher.addToolArg("--core");
launcher.addToolArg(coreFileName);
ProcessBuilder pb = SATestUtils.createProcessBuilder(launcher);
Process jhsdb = pb.start();
OutputAnalyzer out = new OutputAnalyzer(jhsdb);
jhsdb.waitFor();
System.out.println(out.getStdout());
System.err.println(out.getStderr());
out.shouldContain("<signal handler called>");
out.shouldContain("Java_jdk_test_lib_apps_LingeredApp_crash");
}
public static void main(String... args) throws Throwable {
LingeredApp app = new LingeredApp();
app.setForceCrash(true);
LingeredApp.startApp(app, CoreUtils.getAlwaysPretouchArg(true));
app.waitAppTerminate();
String crashOutput = app.getOutput().getStdout();
String coreFileName = CoreUtils.getCoreFileLocation(crashOutput, app.getPid());
runJstackMixed(coreFileName);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,6 +131,7 @@ public class LdapPoolTimeoutTest {
|| msg.contains("No route to host")
|| msg.contains("Timed out waiting for lock")
|| msg.contains("Connect timed out")
|| msg.contains("Connection timed out")
|| msg.contains("Timeout exceeded while waiting for a connection"))) {
// got the expected exception
System.out.println("Received expected NamingException with message: " + msg);

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8373120
* @summary Stress test two consecutive timed Object.wait calls where only the first one is notified.
* @run main/othervm -XX:CompileCommand=exclude,java.lang.VirtualThread::afterYield NotifiedThenTimedOutWait 1 100 100
*/
/*
* @test
* @run main/othervm -XX:CompileCommand=exclude,java.lang.VirtualThread::afterYield NotifiedThenTimedOutWait 2 100 100
*/
import java.time.Instant;
import java.util.concurrent.Phaser;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadLocalRandom;
public class NotifiedThenTimedOutWait {
public static void main(String[] args) throws Exception {
int race = (args.length > 0) ? Integer.parseInt(args[0]) : 1;
int nruns = (args.length > 1) ? Integer.parseInt(args[1]) : 100;
int iterations = (args.length > 2) ? Integer.parseInt(args[2]) : 100;
for (int i = 1; i <= nruns; i++) {
System.out.println(Instant.now() + " => " + i + " of " + nruns);
switch (race) {
case 1 -> race1(iterations);
case 2 -> race2(iterations);
}
}
}
/**
* Barrier in synchronized block.
*/
private static void race1(int iterations) throws InterruptedException {
final int timeout = 1;
var lock = new Object();
var start = new Phaser(2);
var end = new Phaser(2);
var vthread = Thread.ofVirtual().start(() -> {
try {
for (int j = 0; j < iterations; j++) {
synchronized (lock) {
start.arriveAndAwaitAdvance();
lock.wait(timeout);
lock.wait(timeout);
}
end.arriveAndAwaitAdvance();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
});
ThreadFactory factory = ThreadLocalRandom.current().nextBoolean()
? Thread.ofPlatform().factory() : Thread.ofVirtual().factory();
var notifier = factory.newThread(() -> {
for (int j = 0; j < iterations; j++) {
start.arriveAndAwaitAdvance();
synchronized (lock) {
lock.notify();
}
end.arriveAndAwaitAdvance();
}
});
notifier.start();
vthread.join();
notifier.join();
}
/**
* Barrier before synchronized block.
*/
private static void race2(int iterations) throws InterruptedException {
final int timeout = 1;
var lock = new Object();
var start = new Phaser(2);
var vthread = Thread.startVirtualThread(() -> {
try {
for (int i = 0; i < iterations; i++) {
start.arriveAndAwaitAdvance();
synchronized (lock) {
lock.wait(timeout);
lock.wait(timeout);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
});
ThreadFactory factory = ThreadLocalRandom.current().nextBoolean()
? Thread.ofPlatform().factory() : Thread.ofVirtual().factory();
var notifier = factory.newThread(() -> {
for (int i = 0; i < iterations; i++) {
start.arriveAndAwaitAdvance();
synchronized (lock) {
lock.notify();
}
}
});
notifier.start();
vthread.join();
notifier.join();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,17 +23,18 @@
/* @test
* @bug 8147078
* @run testng/othervm -ea -esa Test8147078
* @run junit/othervm -ea -esa Test8147078
*/
import org.testng.annotations.Test;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import static java.lang.invoke.MethodType.methodType;
import static org.testng.AssertJUnit.*;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class Test8147078 {
@ -65,16 +66,10 @@ public class Test8147078 {
@Test
public void testNoExceptionType() {
boolean caught = false;
try {
var cce = assertThrows(ClassCastException.class, () -> {
MethodHandle eek = (MethodHandle) MH_catchException.invoke(MH_target, String.class, MH_handler);
} catch (ClassCastException cce) {
assertEquals("java.lang.String", cce.getMessage());
caught = true;
} catch (Throwable t) {
fail("unexpected exception caught: " + t);
}
assertTrue(caught);
});
assertEquals("java.lang.String", cce.getMessage());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,17 +23,16 @@
/* @test
* @bug 8177146
* @run testng/othervm TestMethodHandleBind
* @run junit/othervm TestMethodHandleBind
*/
import org.testng.annotations.Test;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import static java.lang.invoke.MethodHandles.lookup;
import static org.testng.Assert.*;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.Test;
public class TestMethodHandleBind extends pkg.A {
static class B extends TestMethodHandleBind {}
@ -42,7 +41,7 @@ public class TestMethodHandleBind extends pkg.A {
public void testInstanceOfCallerClass() throws Throwable {
MethodHandle bound = lookup().bind(new TestMethodHandleBind() , "m1", MethodType.methodType(String.class));
String x = (String)bound.invoke();
assertEquals(x, this.getClass().getSimpleName());
assertEquals(this.getClass().getSimpleName(), x);
}
@Test
@ -50,47 +49,37 @@ public class TestMethodHandleBind extends pkg.A {
MethodHandle bound = lookup().bind(new B() , "m1", MethodType.methodType(String.class));
// MethodHandle bound = lookup().findVirtual(B.class, "m1", MethodType.methodType(String.class)).bindTo(new B());
String x = (String)bound.invoke();
assertEquals(x, "B");
assertEquals("B", x);
}
@Test
public void testInstanceOfReceiverClass() throws Throwable {
try {
MethodHandle bound = lookup().bind(new pkg.A() , "m1", MethodType.methodType(String.class));
bound.invoke();
fail("IllegalAccessException expected");
} catch (IllegalAccessException e) {
}
assertThrows(IllegalAccessException.class, () -> lookup().bind(new pkg.A() , "m1", MethodType.methodType(String.class)));
}
@Test
public void testPublicMethod() throws Throwable {
MethodHandle bound = lookup().bind(new pkg.A() , "m2", MethodType.methodType(String.class));
String x = (String)bound.invoke();
assertEquals(x, "A");
assertEquals("A", x);
}
@Test
public void testPublicMethod2() throws Throwable {
MethodHandle bound = lookup().bind(new TestMethodHandleBind(), "m2", MethodType.methodType(String.class));
String x = (String)bound.invoke();
assertEquals(x, this.getClass().getSimpleName());
assertEquals(this.getClass().getSimpleName(), x);
}
@Test
public void testInstanceOfCallerClassVarargs() throws Throwable {
MethodHandle bound = lookup().bind(new TestMethodHandleBind() , "m3", MethodType.methodType(String.class, String[].class));
String x = (String)bound.invoke("a", "b", "c");
assertEquals(x, this.getClass().getSimpleName() + "abc");
assertEquals(this.getClass().getSimpleName() + "abc", x);
}
@Test
public void testInstanceOfReceiverClassVarargs() throws Throwable {
try {
MethodHandle bound = lookup().bind(new pkg.A(), "m3", MethodType.methodType(String.class, String[].class));
bound.invoke();
fail("IllegalAccessException expected");
} catch (IllegalAccessException e) {
}
assertThrows(IllegalAccessException.class, () -> lookup().bind(new pkg.A(), "m3", MethodType.methodType(String.class, String[].class)));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,7 +24,7 @@
/* @test
* @summary test access checking by java.lang.invoke.MethodHandles.Lookup
* @compile AccessControlTest.java AccessControlTest_subpkg/Acquaintance_remote.java
* @run testng/othervm test.java.lang.invoke.AccessControlTest
* @run junit/othervm test.java.lang.invoke.AccessControlTest
*/
package test.java.lang.invoke;
@ -33,12 +33,13 @@ import java.lang.invoke.*;
import java.lang.reflect.*;
import java.lang.reflect.Modifier;
import java.util.*;
import org.testng.annotations.*;
import static java.lang.invoke.MethodHandles.*;
import static java.lang.invoke.MethodHandles.Lookup.*;
import static java.lang.invoke.MethodType.*;
import static org.testng.Assert.*;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.Test;
import test.java.lang.invoke.AccessControlTest_subpkg.Acquaintance_remote;
@ -69,7 +70,7 @@ public class AccessControlTest {
this.prevLookupClass = lookup.previousLookupClass();
this.lookupModes = lookup.lookupModes();
assert(lookupString().equals(lookup.toString()));
assertEquals(lookupString(), lookup.toString());
numberOf(lookupClass().getClassLoader()); // assign CL#
}
public LookupCase(Class<?> lookupClass, Class<?> prevLookupClass, int lookupModes) {
@ -96,7 +97,7 @@ public class AccessControlTest {
int cmp = c1.getName().compareTo(c2.getName());
if (cmp != 0) return cmp;
cmp = numberOf(c1.getClassLoader()) - numberOf(c2.getClassLoader());
assert(cmp != 0);
assertNotEquals(0, cmp);
return cmp;
} else if (p1 != p2){
if (p1 == null)
@ -106,7 +107,7 @@ public class AccessControlTest {
int cmp = p1.getName().compareTo(p2.getName());
if (cmp != 0) return cmp;
cmp = numberOf(p1.getClassLoader()) - numberOf(p2.getClassLoader());
assert(cmp != 0);
assertNotEquals(0, cmp);
return cmp;
}
return -(this.lookupModes() - that.lookupModes());
@ -211,8 +212,8 @@ public class AccessControlTest {
c1.getPackageName().equals(c2.getPackageName()));
boolean sameTopLevel = (topLevelClass(c1) == topLevelClass(c2));
boolean sameClass = (c1 == c2);
assert(samePackage || !sameTopLevel);
assert(sameTopLevel || !sameClass);
assertTrue(samePackage || !sameTopLevel);
assertTrue(sameTopLevel || !sameClass);
boolean accessible = sameClass;
if ((modes1 & PACKAGE) != 0) accessible |= samePackage;
@ -251,17 +252,17 @@ public class AccessControlTest {
changed |= (PRIVATE|PROTECTED); // [A5]
}
if (sameClass) {
assert(changed == 0); // [A11] (no deprivation if same class)
assertEquals(0, changed); // [A11] (no deprivation if same class)
}
if (accessible) assert((changed & PUBLIC) == 0);
if (accessible) assertEquals(0, changed & PUBLIC);
int modes2 = modes1 & ~changed;
Class<?> plc = (m1 == m2) ? prevLookupClass() : c1; // [A9] [A10]
if ((modes1 & UNCONDITIONAL) != 0) plc = null; // [A8]
LookupCase l2 = new LookupCase(c2, plc, modes2);
assert(l2.lookupClass() == c2); // [A1]
assert((modes1 | modes2) == modes1); // [A1-a] (no elevation of access)
assert(l2.prevLookupClass() == null || (modes2 & MODULE) == 0);
assertSame(l2.lookupClass(), c2); // [A1]
assertEquals(modes1, modes1 | modes2); // [A1-a] (no elevation of access)
assertTrue(l2.prevLookupClass() == null || (modes2 & MODULE) == 0);
return l2;
}
@ -280,8 +281,8 @@ public class AccessControlTest {
}
if (newModes == oldModes) return this; // return self if no change
LookupCase l2 = new LookupCase(lookupClass(), prevLookupClass(), newModes);
assert((oldModes | newModes) == oldModes); // [A2] (no elevation of access)
assert(l2.prevLookupClass() == null || (newModes & MODULE) == 0);
assertEquals(oldModes, oldModes | newModes); // [A2] (no elevation of access)
assertTrue(l2.prevLookupClass() == null || (newModes & MODULE) == 0);
return l2;
}
@ -331,7 +332,7 @@ public class AccessControlTest {
&& Modifier.isPublic(m.getModifiers());
}
assert(m1 == m2 && prevLookupClass == null);
assertNull(prevLookupClass);
if (!willAccessClass(c2, false))
return false;
@ -380,7 +381,7 @@ public class AccessControlTest {
&& Modifier.isPublic(c2.getModifiers());
}
assert(m1 == m2 && prevLookupClass == null);
assertNull(prevLookupClass);
LookupCase lc = this.in(c2);
int modes1 = lc.lookupModes();
@ -409,8 +410,8 @@ public class AccessControlTest {
Class<?> c = cls;
for (Class<?> ec; (ec = c.getEnclosingClass()) != null; )
c = ec;
assert(c.getEnclosingClass() == null);
assert(c == cls || cls.getEnclosingClass() != null);
assertNull(c.getEnclosingClass());
assertTrue(c == cls || cls.getEnclosingClass() != null);
return c;
}
@ -443,14 +444,14 @@ public class AccessControlTest {
if (edges == null) CASE_EDGES.put(l2, edges = new TreeSet<>());
if (edges.add(l1)) {
Class<?> c1 = l1.lookupClass();
assert(l2.lookupClass() == c2); // [A1]
assertSame(l2.lookupClass(), c2); // [A1]
int m1 = l1.lookupModes();
int m2 = l2.lookupModes();
assert((m1 | m2) == m1); // [A2] (no elevation of access)
assertEquals(m1, (m1 | m2)); // [A2] (no elevation of access)
LookupCase expect = dropAccess == 0 ? l1.in(c2) : l1.in(c2).dropLookupMode(dropAccess);
if (!expect.equals(l2))
System.out.println("*** expect "+l1+" => "+expect+" but got "+l2);
assertEquals(l2, expect);
assertEquals(expect, l2);
}
}
@ -567,7 +568,7 @@ public class AccessControlTest {
if (willAccess != didAccess) {
System.out.println(sourceCase+" => "+targetClass.getSimpleName()+(isFindOrAccessClass?"":"."+methodName+methodType));
System.out.println("fail "+(isFindOrAccessClass?kind:"on "+method)+" ex="+accessError);
assertEquals(willAccess, didAccess);
assertEquals(didAccess, willAccess);
}
testCount++;
if (!didAccess) testCountFails++;
@ -579,10 +580,10 @@ public class AccessControlTest {
System.out.println(targetClass.getSimpleName()+"."+methodName+methodType);
try {
Method method = targetClass.getDeclaredMethod(methodName, methodType.parameterArray());
assertEquals(method.getReturnType(), methodType.returnType());
assertEquals(methodType.returnType(), method.getReturnType());
int haveMods = method.getModifiers();
assert(Modifier.isStatic(haveMods));
assert(targetAccess == fixMods(haveMods));
assertTrue(Modifier.isStatic(haveMods));
assertEquals(targetAccess, fixMods(haveMods));
return method;
} catch (NoSuchMethodException ex) {
throw new AssertionError(methodName, ex);
@ -604,7 +605,7 @@ public class AccessControlTest {
case PACKAGE: return "pkg_in_";
case PRIVATE: return "pri_in_";
}
assert(false);
fail();
return "?";
}
private static final int[] ACCESS_CASES = {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
/* @test
* @bug 8155106
* @run testng/othervm -ea -esa test.java.lang.invoke.ArrayConstructorTest
* @run junit/othervm -ea -esa test.java.lang.invoke.ArrayConstructorTest
*/
package test.java.lang.invoke;
@ -32,30 +32,22 @@ import java.lang.invoke.MethodHandles;
import static java.lang.invoke.MethodType.methodType;
import static org.testng.AssertJUnit.*;
import org.testng.annotations.*;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import static org.junit.jupiter.api.Assertions.*;
public class ArrayConstructorTest {
static final MethodHandles.Lookup LOOKUP = MethodHandles.lookup();
@Test
public static void testFindConstructorArray() {
boolean caught = false;
try {
MethodHandle h = LOOKUP.findConstructor(Object[].class, methodType(void.class));
} catch (NoSuchMethodException nsme) {
assertEquals("no constructor for array class: [Ljava.lang.Object;", nsme.getMessage());
caught = true;
} catch (Exception e) {
throw new AssertionError("unexpected exception: " + e);
}
assertTrue(caught);
public void testFindConstructorArray() {
var nsme = assertThrows(NoSuchMethodException.class, () -> LOOKUP.findConstructor(Object[].class, methodType(void.class)));
assertEquals("no constructor for array class: [Ljava.lang.Object;", nsme.getMessage());
}
@DataProvider
static Object[][] arrayConstructorNegative() {
return new Object[][]{
{String.class, IllegalArgumentException.class, "not an array class: java.lang.String"},
@ -63,34 +55,28 @@ public class ArrayConstructorTest {
};
}
@Test(dataProvider = "arrayConstructorNegative")
public static void testArrayConstructorNegative(Class<?> clazz, Class<?> exceptionClass, String message) {
boolean caught = false;
try {
MethodHandle h = MethodHandles.arrayConstructor(clazz);
} catch (Exception e) {
assertEquals(exceptionClass, e.getClass());
if (message != null) {
assertEquals(message, e.getMessage());
}
caught = true;
@ParameterizedTest
@MethodSource("arrayConstructorNegative")
public void testArrayConstructorNegative(Class<?> clazz, Class<? extends Exception> exceptionClass, String message) {
var e = assertThrowsExactly(exceptionClass, () -> MethodHandles.arrayConstructor(clazz));
if (message != null) {
assertEquals(message, e.getMessage());
}
assertTrue(caught);
}
@Test
public static void testArrayConstructor() throws Throwable {
public void testArrayConstructor() throws Throwable {
MethodHandle h = MethodHandles.arrayConstructor(String[].class);
assertEquals(methodType(String[].class, int.class), h.type());
String[] a = (String[]) h.invoke(17);
assertEquals(17, a.length);
}
@Test(expectedExceptions = {NegativeArraySizeException.class})
public static void testArrayConstructorNegativeIndex() throws Throwable {
@Test
public void testArrayConstructorNegativeIndex() throws Throwable {
MethodHandle h = MethodHandles.arrayConstructor(String[].class);
assertEquals(methodType(String[].class, int.class), h.type());
h.invoke(-1); // throws exception
assertThrows(NegativeArraySizeException.class, () -> h.invoke(-1));
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,36 +22,39 @@
*/
/* @test
* @run testng/othervm -ea -esa test.java.lang.invoke.ArrayLengthTest
* @run junit/othervm -ea -esa test.java.lang.invoke.ArrayLengthTest
*/
package test.java.lang.invoke;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import static org.testng.AssertJUnit.*;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
import org.testng.annotations.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ArrayLengthTest {
@DataProvider
Object[][] arrayClasses() {
return new Object[][] {
{int[].class},
{long[].class},
{float[].class},
{double[].class},
{boolean[].class},
{byte[].class},
{short[].class},
{char[].class},
{Object[].class},
{StringBuffer[].class}
static Object[] arrayClasses() {
return new Object[] {
int[].class,
long[].class,
float[].class,
double[].class,
boolean[].class,
byte[].class,
short[].class,
char[].class,
Object[].class,
StringBuffer[].class
};
}
@Test(dataProvider = "arrayClasses")
@ParameterizedTest
@MethodSource("arrayClasses")
public void testArrayLength(Class<?> arrayClass) throws Throwable {
MethodHandle arrayLength = MethodHandles.arrayLength(arrayClass);
assertEquals(int.class, arrayLength.type().returnType());
@ -60,25 +63,28 @@ public class ArrayLengthTest {
assertEquals(10, arrayLength.invoke(array));
}
@Test(dataProvider = "arrayClasses", expectedExceptions = NullPointerException.class)
@ParameterizedTest
@MethodSource("arrayClasses")
public void testArrayLengthInvokeNPE(Class<?> arrayClass) throws Throwable {
MethodHandle arrayLength = MethodHandles.arrayLength(arrayClass);
arrayLength.invoke(null);
assertThrows(NullPointerException.class, () -> arrayLength.invoke(null));
}
@Test(expectedExceptions = IllegalArgumentException.class)
@Test
public void testArrayLengthNoArray() {
MethodHandles.arrayLength(String.class);
assertThrows(IllegalArgumentException.class, () -> MethodHandles.arrayLength(String.class));
}
@Test(expectedExceptions = NullPointerException.class)
@Test
public void testArrayLengthNPE() {
MethodHandles.arrayLength(null);
assertThrows(NullPointerException.class, () -> MethodHandles.arrayLength(null));
}
@Test(expectedExceptions = NullPointerException.class)
@Test
public void testNullReference() throws Throwable {
MethodHandle arrayLength = MethodHandles.arrayLength(String[].class);
int len = (int)arrayLength.invokeExact((String[])null);
assertThrows(NullPointerException.class, () -> {
int len = (int)arrayLength.invokeExact((String[])null);
});
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,20 +22,19 @@
*/
/* @test
* @run testng/othervm CallerSensitiveMethodHandle
* @run junit/othervm CallerSensitiveMethodHandle
* @summary Check Lookup findVirtual, findStatic and unreflect behavior with
* caller sensitive methods with focus on AccessibleObject.setAccessible
*/
import org.testng.annotations.Test;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodHandles.Lookup;
import java.lang.reflect.Field;
import static java.lang.invoke.MethodType.*;
import static org.testng.Assert.*;
import static org.junit.jupiter.api.Assertions.*;
import org.junit.jupiter.api.Test;
public class CallerSensitiveMethodHandle {
private static int field = 0;
@ -46,7 +45,7 @@ public class CallerSensitiveMethodHandle {
MethodHandle mh = l.findVirtual(Field.class, "setInt", methodType(void.class, Object.class, int.class));
int newValue = 5;
mh.invokeExact(f, (Object) null, newValue);
assertTrue(field == newValue);
assertEquals(newValue, field);
}
@Test
@ -55,6 +54,6 @@ public class CallerSensitiveMethodHandle {
MethodHandle MH_lookup2 = lookup.findStatic(MethodHandles.class, "lookup", methodType(Lookup.class));
Lookup lookup2 = (Lookup) MH_lookup2.invokeExact();
System.out.println(lookup2 + " original lookup class " + lookup.lookupClass());
assertTrue(lookup2.lookupClass() == lookup.lookupClass());
assertSame(lookup.lookupClass(), lookup2.lookupClass());
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,15 +24,13 @@
/* @test
* @summary Smoke-test class specializer, used to create BoundMethodHandle classes
* @compile/module=java.base java/lang/invoke/ClassSpecializerHelper.java
* @run testng/othervm/timeout=250 -ea -esa ClassSpecializerTest
* @run junit/othervm/timeout=250 -ea -esa ClassSpecializerTest
*/
// Useful diagnostics to try:
// -Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true
// -Djava.lang.invoke.MethodHandle.DUMP_CLASS_FILES=true
import org.testng.annotations.*;
import java.lang.invoke.*;
import java.util.ArrayList;
import java.util.Arrays;
@ -40,6 +38,11 @@ import java.util.List;
import static java.lang.invoke.ClassSpecializerHelper.*;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
public class ClassSpecializerTest {
@Test
@ -58,12 +61,12 @@ public class ClassSpecializerTest {
}
args.set(0, key * 1000 + 42);
Frob f = (Frob) mh.invokeWithArguments(args.toArray());
assert(f.kind() == k);
assertSame(k, f.kind());
System.out.println("k.f(...) = " + f.toString());
List<Object> l = f.asList();
System.out.println("f.l = " + l);
args.subList(0,1).clear(); // drop label
assert(args.equals(l));
assertEquals(args, l);
}
}
private static Object coughUpA(Class<?> pt) throws Throwable {

Some files were not shown because too many files have changed in this diff Show More