Merge branch 'master' into 8336759-int-loop-long-limit

This commit is contained in:
Kangcheng Xu 2026-03-19 11:27:07 -04:00
commit 1795d7a97f
176 changed files with 3278 additions and 2434 deletions

View File

@ -6327,36 +6327,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
// Special prefetch versions which use the dcbz instruction.
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($src$$Register, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@ -6369,7 +6341,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}

View File

@ -168,16 +168,27 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
"Perform Ecore Optimization") \
\
/* Minimum array size in bytes to use AVX512 intrinsics */ \
/* for copy, inflate and fill which don't bail out early based on any */ \
/* for inflate and fill which don't bail out early based on any */ \
/* condition. When this value is set to zero compare operations like */ \
/* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
product(int, AVX3Threshold, 4096, DIAGNOSTIC, \
"Minimum array size in bytes to use AVX512 intrinsics" \
"for copy, inflate and fill. When this value is set as zero" \
"for inflate and fill. When this value is set as zero" \
"compare operations can also use AVX512 intrinsics.") \
range(0, max_jint) \
constraint(AVX3ThresholdConstraintFunc,AfterErgo) \
\
/* Minimum array size in bytes to use AVX512 intrinsics */ \
/* for copy and fill which don't bail out early based on any */ \
/* condition. When this value is set to zero clear operations that */ \
/* work on memory blocks can also use AVX512 intrinsics. */ \
product(int, CopyAVX3Threshold, 4096, DIAGNOSTIC, \
"Minimum array size in bytes to use AVX512 intrinsics" \
"for copy and fill. When this value is set as zero" \
"clear operations can also use AVX512 intrinsics.") \
range(0, max_jint) \
constraint(CopyAVX3ThresholdConstraintFunc,AfterErgo) \
\
product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC, \
"Turn off JVM mitigations related to Intel micro code " \
"mitigations for the Intel JCC erratum") \

View File

@ -5820,7 +5820,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X
// cnt - number of qwords (8-byte words).
// base - start address, qword aligned.
Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0);
bool use64byteVector = (MaxVectorSize == 64) && (CopyAVX3Threshold == 0);
if (use64byteVector) {
vpxor(xtmp, xtmp, xtmp, AVX_512bit);
} else if (MaxVectorSize >= 32) {
@ -5884,7 +5884,7 @@ void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register rtmp, X
// Clearing constant sized memory using YMM/ZMM registers.
void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) {
assert(UseAVX > 2 && VM_Version::supports_avx512vl(), "");
bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
int vector64_count = (cnt & (~0x7)) >> 3;
cnt = cnt & 0x7;
@ -6109,8 +6109,8 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
// Fill 64-byte chunks
Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
// If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
cmpptr(count, VM_Version::avx3_threshold());
// If number of bytes to fill < CopyAVX3Threshold, perform fill using AVX2
cmpptr(count, CopyAVX3Threshold);
jccb(Assembler::below, L_check_fill_64_bytes_avx2);
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
@ -9483,7 +9483,6 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va
Label L_fill_zmm_sequence;
int shift = -1;
int avx3threshold = VM_Version::avx3_threshold();
switch(type) {
case T_BYTE: shift = 0;
break;
@ -9499,10 +9498,10 @@ void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register va
fatal("Unhandled type: %s\n", type2name(type));
}
if ((avx3threshold != 0) || (MaxVectorSize == 32)) {
if ((CopyAVX3Threshold != 0) || (MaxVectorSize == 32)) {
if (MaxVectorSize == 64) {
cmpq(count, avx3threshold >> shift);
cmpq(count, CopyAVX3Threshold >> shift);
jcc(Assembler::greater, L_fill_zmm_sequence);
}

View File

@ -166,12 +166,12 @@ class StubGenerator: public StubCodeGenerator {
// - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs)
// for both special cases (various small block sizes) and aligned copy loop. This is the
// default configuration.
// - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it.
// - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a
// better performance for disjoint copies. For conjoint/backward copy vector based
// copy performs better.
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
// - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over
// 64 byte vector registers (ZMMs).
address generate_disjoint_copy_avx3_masked(StubId stub_id, address* entry);

View File

@ -144,7 +144,7 @@ address StubGenerator::generate_updateBytesAdler32() {
__ align32();
if (VM_Version::supports_avx512vl()) {
// AVX2 performs better for smaller inputs because of leaner post loop reduction sequence..
__ cmpl(s, MAX2(128, VM_Version::avx3_threshold()));
__ cmpl(s, MAX2(128, CopyAVX3Threshold));
__ jcc(Assembler::belowEqual, SLOOP1A_AVX2);
__ lea(end, Address(s, data, Address::times_1, - (2*CHUNKSIZE -1)));

View File

@ -511,12 +511,12 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest,
// - If target supports AVX3 features (BW+VL+F) then implementation uses 32 byte vectors (YMMs)
// for both special cases (various small block sizes) and aligned copy loop. This is the
// default configuration.
// - If copy length is above AVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// - If copy length is above CopyAVX3Threshold, then implementation use 64 byte vectors (ZMMs)
// for main copy loop (and subsequent tail) since bulk of the cycles will be consumed in it.
// - If user forces MaxVectorSize=32 then above 4096 bytes its seen that REP MOVs shows a
// better performance for disjoint copies. For conjoint/backward copy vector based
// copy performs better.
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
// - If user sets CopyAVX3Threshold=0, then special cases for small blocks sizes operate over
// 64 byte vector registers (ZMMs).
// Inputs:
@ -575,8 +575,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
StubCodeMark mark(this, stub_id);
address start = __ pc();
int avx3threshold = VM_Version::avx3_threshold();
bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
const int large_threshold = 2621440; // 2.5 MB
Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry;
Label L_repmovs, L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64;
@ -647,7 +646,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
__ cmpq(temp2, large_threshold);
__ jcc(Assembler::greaterEqual, L_copy_large);
}
if (avx3threshold != 0) {
if (CopyAVX3Threshold != 0) {
__ cmpq(count, threshold[shift]);
if (MaxVectorSize == 64) {
// Copy using 64 byte vectors.
@ -659,7 +658,7 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, addres
}
}
if ((MaxVectorSize < 64) || (avx3threshold != 0)) {
if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) {
// Partial copy to make dst address 32 byte aligned.
__ movq(temp2, to);
__ andq(temp2, 31);
@ -913,8 +912,7 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres
StubCodeMark mark(this, stub_id);
address start = __ pc();
int avx3threshold = VM_Version::avx3_threshold();
bool use64byteVector = (MaxVectorSize > 32) && (avx3threshold == 0);
bool use64byteVector = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
Label L_main_pre_loop, L_main_pre_loop_64bytes, L_pre_main_post_64;
Label L_main_loop, L_main_loop_64bytes, L_tail, L_tail64, L_exit, L_entry;
@ -979,12 +977,12 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, addres
// PRE-MAIN-POST loop for aligned copy.
__ BIND(L_entry);
if ((MaxVectorSize > 32) && (avx3threshold != 0)) {
if ((MaxVectorSize > 32) && (CopyAVX3Threshold != 0)) {
__ cmpq(temp1, threshold[shift]);
__ jcc(Assembler::greaterEqual, L_pre_main_post_64);
}
if ((MaxVectorSize < 64) || (avx3threshold != 0)) {
if ((MaxVectorSize < 64) || (CopyAVX3Threshold != 0)) {
// Partial copy to make dst address 32 byte aligned.
__ leaq(temp2, Address(to, temp1, (Address::ScaleFactor)(shift), 0));
__ andq(temp2, 31);
@ -1199,7 +1197,7 @@ void StubGenerator::arraycopy_avx3_special_cases_conjoint(XMMRegister xmm, KRegi
bool use64byteVector, Label& L_entry, Label& L_exit) {
Label L_entry_64, L_entry_96, L_entry_128;
Label L_entry_160, L_entry_192;
bool avx3 = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0);
bool avx3 = (MaxVectorSize > 32) && (CopyAVX3Threshold == 0);
int size_mat[][6] = {
/* T_BYTE */ {32 , 64, 96 , 128 , 160 , 192 },

View File

@ -1967,6 +1967,18 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(UseCopySignIntrinsic)) {
FLAG_SET_DEFAULT(UseCopySignIntrinsic, true);
}
// CopyAVX3Threshold is the threshold at which 64-byte instructions are used
// for implementing the array copy and clear operations.
// The Intel platforms that supports the serialize instruction
// have improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
if (FLAG_IS_DEFAULT(CopyAVX3Threshold)) {
if (is_intel() && is_intel_server_family() && supports_serialize()) {
FLAG_SET_DEFAULT(CopyAVX3Threshold, 0);
} else {
FLAG_SET_DEFAULT(CopyAVX3Threshold, AVX3Threshold);
}
}
}
void VM_Version::print_platform_virtualization_info(outputStream* st) {
@ -2122,17 +2134,6 @@ bool VM_Version::is_intel_darkmont() {
return is_intel() && is_intel_server_family() && (_model == 0xCC || _model == 0xDD);
}
// avx3_threshold() sets the threshold at which 64-byte instructions are used
// for implementing the array copy and clear operations.
// The Intel platforms that supports the serialize instruction
// has improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
int VM_Version::avx3_threshold() {
return (is_intel_server_family() &&
supports_serialize() &&
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
}
void VM_Version::clear_apx_test_state() {
clear_apx_test_state_stub();
}

View File

@ -958,8 +958,6 @@ public:
static bool is_intel_darkmont();
static int avx3_threshold();
static bool is_intel_tsc_synched_at_init();
static void insert_features_names(VM_Version::VM_Features features, stringStream& ss);

View File

@ -2667,3 +2667,7 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
void os::jfr_report_memory_info() {}
#endif // INCLUDE_JFR
void os::print_open_file_descriptors(outputStream* st) {
// File descriptor counting not implemented on AIX
}

View File

@ -76,6 +76,7 @@
# include <fcntl.h>
# include <fenv.h>
# include <inttypes.h>
# include <mach/mach.h>
# include <poll.h>
# include <pthread.h>
# include <pwd.h>
@ -102,6 +103,7 @@
#endif
#ifdef __APPLE__
#include <libproc.h>
#include <mach/task_info.h>
#include <mach-o/dyld.h>
#endif
@ -2596,3 +2598,45 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
return res;
} // end: os::pd_dll_unload()
void os::print_open_file_descriptors(outputStream* st) {
#ifdef __APPLE__
char buf[1024 * sizeof(struct proc_fdinfo)];
os::Bsd::print_open_file_descriptors(st, buf, sizeof(buf));
#else
st->print_cr("Open File Descriptors: unknown");
#endif
}
void os::Bsd::print_open_file_descriptors(outputStream* st, char* buf, size_t buflen) {
#ifdef __APPLE__
pid_t my_pid;
// ensure the scratch buffer is big enough for at least one FD info struct
precond(buflen >= sizeof(struct proc_fdinfo));
kern_return_t kres = pid_for_task(mach_task_self(), &my_pid);
if (kres != KERN_SUCCESS) {
st->print_cr("Open File Descriptors: unknown");
return;
}
size_t max_fds = buflen / sizeof(struct proc_fdinfo);
struct proc_fdinfo* fds = reinterpret_cast<struct proc_fdinfo*>(buf);
// fill our buffer with FD info, up to the available buffer size
int res = proc_pidinfo(my_pid, PROC_PIDLISTFDS, 0, fds, max_fds * sizeof(struct proc_fdinfo));
if (res <= 0) {
st->print_cr("Open File Descriptors: unknown");
return;
}
// print lower threshold if count exceeds buffer size
int nfiles = res / sizeof(struct proc_fdinfo);
if ((size_t)nfiles >= max_fds) {
st->print_cr("Open File Descriptors: > %zu", max_fds);
return;
}
st->print_cr("Open File Descriptors: %d", nfiles);
#else
st->print_cr("Open File Descriptors: unknown");
#endif
}

View File

@ -123,6 +123,8 @@ class os::Bsd {
static int get_node_by_cpu(int cpu_id);
static void print_uptime_info(outputStream* st);
static void print_open_file_descriptors(outputStream* st, char* buf, size_t buflen);
static void print_open_file_descriptors(outputStream* st);
};
#endif // OS_BSD_OS_BSD_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,13 +35,18 @@
#include <dirent.h>
ExplicitHugePageSupport::ExplicitHugePageSupport() :
_initialized(false), _pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {}
_initialized(false), _pagesizes(), _pre_allocated_pagesizes(), _default_hugepage_size(SIZE_MAX), _inconsistent(false) {}
os::PageSizes ExplicitHugePageSupport::pagesizes() const {
assert(_initialized, "Not initialized");
return _pagesizes;
}
os::PageSizes ExplicitHugePageSupport::pre_allocated_pagesizes() const {
assert(_initialized, "Not initialized");
return _pre_allocated_pagesizes;
}
size_t ExplicitHugePageSupport::default_hugepage_size() const {
assert(_initialized, "Not initialized");
return _default_hugepage_size;
@ -129,6 +134,20 @@ static os::PageSizes scan_hugepages() {
return pagesizes;
}
static os::PageSizes filter_pre_allocated_hugepages(os::PageSizes pagesizes) {
os::PageSizes pre_allocated{};
char filename[PATH_MAX];
for (size_t ps = pagesizes.smallest(); ps != 0; ps = pagesizes.next_larger(ps)) {
os::snprintf_checked(filename, sizeof(filename), "%s/hugepages-%zukB/nr_hugepages", sys_hugepages, ps / K);
size_t pages;
bool read_success = read_number_file(filename, &pages);
if (read_success && pages > 0) {
pre_allocated.add(ps);
}
}
return pre_allocated;
}
void ExplicitHugePageSupport::print_on(outputStream* os) {
if (_initialized) {
os->print_cr("Explicit hugepage support:");
@ -148,13 +167,14 @@ void ExplicitHugePageSupport::scan_os() {
_default_hugepage_size = scan_default_hugepagesize();
if (_default_hugepage_size > 0) {
_pagesizes = scan_hugepages();
_pre_allocated_pagesizes = filter_pre_allocated_hugepages(_pagesizes);
// See https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt: /proc/meminfo should match
// /sys/kernel/mm/hugepages/hugepages-xxxx. However, we may run on a broken kernel (e.g. on WSL)
// that only exposes /proc/meminfo but not /sys/kernel/mm/hugepages. In that case, we are not
// sure about the state of hugepage support by the kernel, so we won't use explicit hugepages.
if (!_pagesizes.contains(_default_hugepage_size)) {
log_info(pagesize)("Unexpected configuration: default pagesize (%zu) "
"has no associated directory in /sys/kernel/mm/hugepages..", _default_hugepage_size);
"has no associated directory in /sys/kernel/mm/hugepages.", _default_hugepage_size);
_inconsistent = true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -47,6 +47,9 @@ class ExplicitHugePageSupport {
// in /sys/kernel/mm/hugepages/hugepage-xxx)
os::PageSizes _pagesizes;
// Above pages filtered for where the contents of file nr_hugepages was larger than zero
os::PageSizes _pre_allocated_pagesizes;
// Contains the default hugepage. The "default hugepage size" is the one that
// - is marked in /proc/meminfo as "Hugepagesize"
// - is the size one gets when using mmap(MAP_HUGETLB) when omitting size specifiers like MAP_HUGE_SHIFT)
@ -61,6 +64,7 @@ public:
void scan_os();
os::PageSizes pagesizes() const;
os::PageSizes pre_allocated_pagesizes() const;
size_t default_hugepage_size() const;
void print_on(outputStream* os);

View File

@ -83,6 +83,7 @@
#endif
# include <ctype.h>
# include <dirent.h>
# include <dlfcn.h>
# include <endian.h>
# include <errno.h>
@ -113,6 +114,7 @@
# include <sys/types.h>
# include <sys/utsname.h>
# include <syscall.h>
# include <time.h>
# include <unistd.h>
#ifdef __GLIBC__
# include <malloc.h>
@ -2161,6 +2163,8 @@ void os::print_os_info(outputStream* st) {
os::Posix::print_rlimit_info(st);
os::print_open_file_descriptors(st);
os::Posix::print_load_average(st);
st->cr();
@ -3815,7 +3819,7 @@ static int hugetlbfs_page_size_flag(size_t page_size) {
static bool hugetlbfs_sanity_check(size_t page_size) {
const os::PageSizes page_sizes = HugePages::explicit_hugepage_info().pagesizes();
assert(page_sizes.contains(page_size), "Invalid page sizes passed");
assert(page_sizes.contains(page_size), "Invalid page sizes passed (%zu)", page_size);
// Include the page size flag to ensure we sanity check the correct page size.
int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | hugetlbfs_page_size_flag(page_size);
@ -4062,10 +4066,10 @@ void os::Linux::large_page_init() {
_large_page_size = large_page_size;
// Populate _page_sizes with large page sizes less than or equal to
// _large_page_size.
for (size_t page_size = _large_page_size; page_size != 0;
page_size = all_large_pages.next_smaller(page_size)) {
// Populate _page_sizes with _large_page_size (default large page size) even if not pre-allocated.
// Then, populate _page_sizes with all smaller large page sizes that have been pre-allocated.
os::PageSizes pre_allocated = HugePages::explicit_hugepage_info().pre_allocated_pagesizes();
for (size_t page_size = _large_page_size; page_size != 0; page_size = pre_allocated.next_smaller(page_size)) {
_page_sizes.add(page_size);
}
}
@ -5429,3 +5433,33 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
return res;
} // end: os::pd_dll_unload()
void os::print_open_file_descriptors(outputStream* st) {
DIR* dirp = opendir("/proc/self/fd");
int fds = 0;
struct dirent* dentp;
const jlong TIMEOUT_NS = 50000000L; // 50 ms in nanoseconds
bool timed_out = false;
// limit proc file read to 50ms
jlong start = os::javaTimeNanos();
assert(dirp != nullptr, "No proc fs?");
while ((dentp = readdir(dirp)) != nullptr && !timed_out) {
if (isdigit(dentp->d_name[0])) fds++;
if (fds % 100 == 0) {
jlong now = os::javaTimeNanos();
if ((now - start) > TIMEOUT_NS) {
timed_out = true;
}
}
}
closedir(dirp);
if (timed_out) {
st->print_cr("Open File Descriptors: > %d", fds);
} else {
st->print_cr("Open File Descriptors: %d", fds);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -366,6 +366,8 @@ provider hotspot_jni {
probe IsInstanceOf__return(uintptr_t);
probe IsSameObject__entry(void*, void*, void*);
probe IsSameObject__return(uintptr_t);
probe IsVirtualThread__entry(void*, void*);
probe IsVirtualThread__return(uintptr_t);
probe MonitorEnter__entry(void*, void*);
probe MonitorEnter__return(uint32_t);
probe MonitorExit__entry(void*, void*);

View File

@ -6276,6 +6276,10 @@ const void* os::get_saved_assert_context(const void** sigInfo) {
return nullptr;
}
void os::print_open_file_descriptors(outputStream* st) {
// File descriptor counting not supported on Windows.
}
/*
* Windows/x64 does not use stack frames the way expected by Java:
* [1] in most cases, there is no frame pointer. All locals are addressed via RSP

View File

@ -81,6 +81,7 @@ bool AOTConstantPoolResolver::is_resolution_deterministic(ConstantPool* cp, int
bool AOTConstantPoolResolver::is_class_resolution_deterministic(InstanceKlass* cp_holder, Klass* resolved_class) {
assert(!is_in_archivebuilder_buffer(cp_holder), "sanity");
assert(!is_in_archivebuilder_buffer(resolved_class), "sanity");
assert_at_safepoint(); // try_add_candidate() is called below and requires to be at safepoint.
if (resolved_class->is_instance_klass()) {
InstanceKlass* ik = InstanceKlass::cast(resolved_class);
@ -346,7 +347,15 @@ void AOTConstantPoolResolver::maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m
break;
case Bytecodes::_invokehandle:
InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK);
if (CDSConfig::is_dumping_method_handles()) {
ResolvedMethodEntry* method_entry = cp->resolved_method_entry_at(raw_index);
int cp_index = method_entry->constant_pool_index();
Symbol* sig = cp->uncached_signature_ref_at(cp_index);
Klass* k;
if (check_methodtype_signature(cp(), sig, &k, true)) {
InterpreterRuntime::cds_resolve_invokehandle(raw_index, cp, CHECK);
}
}
break;
default:
@ -400,7 +409,7 @@ void AOTConstantPoolResolver::preresolve_indy_cp_entries(JavaThread* current, In
// Check the MethodType signatures used by parameters to the indy BSMs. Make sure we don't
// use types that have been excluded, or else we might end up creating MethodTypes that cannot be stored
// in the AOT cache.
bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret) {
bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret, bool is_invokehandle) {
ResourceMark rm;
for (SignatureStream ss(sig); !ss.is_done(); ss.next()) {
if (ss.is_reference()) {
@ -413,11 +422,18 @@ bool AOTConstantPoolResolver::check_methodtype_signature(ConstantPool* cp, Symbo
if (SystemDictionaryShared::should_be_excluded(k)) {
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name());
log_warning(aot, resolve)("Cannot aot-resolve %s because %s is excluded",
is_invokehandle ? "invokehandle" : "Lambda proxy",
k->external_name());
}
return false;
}
// cp->pool_holder() must be able to resolve k in production run
precond(CDSConfig::is_dumping_aot_linked_classes());
precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data()));
precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data()));
if (ss.at_return_type() && return_type_ret != nullptr) {
*return_type_ret = k;
}
@ -475,11 +491,44 @@ bool AOTConstantPoolResolver::check_lambda_metafactory_methodhandle_arg(Constant
return false;
}
// klass and sigature of the method (no need to check the method name)
Symbol* sig = cp->method_handle_signature_ref_at(mh_index);
Symbol* klass_name = cp->klass_name_at(cp->method_handle_klass_index_at(mh_index));
if (log_is_enabled(Debug, aot, resolve)) {
ResourceMark rm;
log_debug(aot, resolve)("Checking MethodType of MethodHandle for LambdaMetafactory BSM arg %d: %s", arg_i, sig->as_C_string());
}
{
Klass* k = find_loaded_class(Thread::current(), cp->pool_holder()->class_loader(), klass_name);
if (k == nullptr) {
// Dumping AOT cache: all classes should have been loaded by FinalImageRecipes::load_all_classes(). k must have
// been a class that was excluded when FinalImageRecipes recorded all classes at the end of the training run.
//
// Dumping static CDS archive: all classes in the classlist have already been loaded, before we resolve
// constants. k must have been a class that was excluded when the classlist was written
// at the end of the training run.
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is not loaded", klass_name->as_C_string());
}
return false;
}
if (SystemDictionaryShared::should_be_excluded(k)) {
if (log_is_enabled(Warning, aot, resolve)) {
ResourceMark rm;
log_warning(aot, resolve)("Cannot aot-resolve Lambda proxy because %s is excluded", k->external_name());
}
return false;
}
// cp->pool_holder() must be able to resolve k in production run
precond(CDSConfig::is_dumping_aot_linked_classes());
precond(SystemDictionaryShared::is_builtin_loader(cp->pool_holder()->class_loader_data()));
precond(SystemDictionaryShared::is_builtin_loader(k->class_loader_data()));
}
return check_methodtype_signature(cp, sig);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -74,7 +74,10 @@ class AOTConstantPoolResolver : AllStatic {
static void maybe_resolve_fmi_ref(InstanceKlass* ik, Method* m, Bytecodes::Code bc, int raw_index,
GrowableArray<bool>* resolve_fmi_list, TRAPS);
static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr);
public:
static bool check_methodtype_signature(ConstantPool* cp, Symbol* sig, Klass** return_type_ret = nullptr, bool is_invokehandle = false);
private:
static bool check_lambda_metafactory_signature(ConstantPool* cp, Symbol* sig);
static bool check_lambda_metafactory_methodtype_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i);
static bool check_lambda_metafactory_methodhandle_arg(ConstantPool* cp, int bsms_attribute_index, int arg_i);

View File

@ -98,8 +98,8 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
DumpRegion* rw_region = &builder->_rw_region;
DumpRegion* ro_region = &builder->_ro_region;
dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs);
dumptime_log_metaspace_region("ro region", ro_region, &builder->_ro_src_objs);
dumptime_log_metaspace_region("rw region", rw_region, &builder->_rw_src_objs, &builder->_ro_src_objs);
dumptime_log_metaspace_region("ro region", ro_region, &builder->_rw_src_objs, &builder->_ro_src_objs);
address bitmap_end = address(bitmap + bitmap_size_in_bytes);
log_region_range("bitmap", address(bitmap), bitmap_end, nullptr);
@ -122,17 +122,6 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
class AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs : public UniqueMetaspaceClosure {
GrowableArrayCHeap<ArchivedObjInfo, mtClass> _objs;
static int compare_objs_by_addr(ArchivedObjInfo* a, ArchivedObjInfo* b) {
intx diff = a->_src_addr - b->_src_addr;
if (diff < 0) {
return -1;
} else if (diff == 0) {
return 0;
} else {
return 1;
}
}
public:
GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs() { return &_objs; }
@ -152,7 +141,7 @@ public:
void finish() {
UniqueMetaspaceClosure::finish();
_objs.sort(compare_objs_by_addr);
_objs.sort(compare_by_address);
}
}; // AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs
@ -203,24 +192,47 @@ void AOTMapLogger::runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap<Archived
}
void AOTMapLogger::dumptime_log_metaspace_region(const char* name, DumpRegion* region,
const ArchiveBuilder::SourceObjList* src_objs) {
const ArchiveBuilder::SourceObjList* rw_objs,
const ArchiveBuilder::SourceObjList* ro_objs) {
address region_base = address(region->base());
address region_top = address(region->top());
log_region_range(name, region_base, region_top, region_base + _buffer_to_requested_delta);
if (log_is_enabled(Debug, aot, map)) {
GrowableArrayCHeap<ArchivedObjInfo, mtClass> objs;
for (int i = 0; i < src_objs->objs()->length(); i++) {
ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i);
// With -XX:+UseCompactObjectHeaders, it's possible for small objects (including some from
// ro_objs) to be allocated in the gaps in the RW region.
collect_metaspace_objs(&objs, region_base, region_top, rw_objs);
collect_metaspace_objs(&objs, region_base, region_top, ro_objs);
objs.sort(compare_by_address);
log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length());
}
}
void AOTMapLogger::collect_metaspace_objs(GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs,
address region_base, address region_top ,
const ArchiveBuilder::SourceObjList* src_objs) {
for (int i = 0; i < src_objs->objs()->length(); i++) {
ArchiveBuilder::SourceObjInfo* src_info = src_objs->at(i);
address buf_addr = src_info->buffered_addr();
if (region_base <= buf_addr && buf_addr < region_top) {
ArchivedObjInfo info;
info._src_addr = src_info->source_addr();
info._buffered_addr = src_info->buffered_addr();
info._buffered_addr = buf_addr;
info._requested_addr = info._buffered_addr + _buffer_to_requested_delta;
info._bytes = src_info->size_in_bytes();
info._type = src_info->type();
objs.append(info);
objs->append(info);
}
}
}
log_metaspace_objects_impl(address(region->base()), address(region->end()), &objs, 0, objs.length());
int AOTMapLogger::compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b) {
if (a->_buffered_addr < b->_buffered_addr) {
return -1;
} else if (a->_buffered_addr > b->_buffered_addr) {
return 1;
} else {
return 0;
}
}

View File

@ -127,7 +127,12 @@ private:
static void runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs);
static void runtime_log_metaspace_regions(FileMapInfo* mapinfo, GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs);
static void dumptime_log_metaspace_region(const char* name, DumpRegion* region,
const ArchiveBuilder::SourceObjList* src_objs);
const ArchiveBuilder::SourceObjList* rw_objs,
const ArchiveBuilder::SourceObjList* ro_objs);
static void collect_metaspace_objs(GrowableArrayCHeap<ArchivedObjInfo, mtClass>* objs,
address region_base, address region_top ,
const ArchiveBuilder::SourceObjList* src_objs);
static int compare_by_address(ArchivedObjInfo* a, ArchivedObjInfo* b);
// Common code for dumptime/runtime
static void log_file_header(FileMapInfo* mapinfo);

View File

@ -64,6 +64,11 @@ HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
address AOTMappedHeapWriter::_requested_bottom;
address AOTMappedHeapWriter::_requested_top;
static size_t _num_strings = 0;
static size_t _string_bytes = 0;
static size_t _num_packages = 0;
static size_t _num_protection_domains = 0;
GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
@ -71,8 +76,6 @@ GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedH
AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
@ -87,7 +90,6 @@ void AOTMappedHeapWriter::init() {
Universe::heap()->collect(GCCause::_java_lang_system_gc);
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
_fillers = new (mtClassShared) FillersTable();
_requested_bottom = nullptr;
_requested_top = nullptr;
@ -141,9 +143,6 @@ int AOTMappedHeapWriter::narrow_oop_shift() {
void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
delete _source_objs;
_source_objs = nullptr;
delete _dumped_interned_strings;
_dumped_interned_strings = nullptr;
}
void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
@ -181,25 +180,6 @@ bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
}
}
// Keep track of the contents of the archived interned string table. This table
// is used only by CDSHeapVerifier.
void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
assert(!is_string_too_large_to_archive(string), "must be");
bool created;
_dumped_interned_strings->put_if_absent(string, true, &created);
if (created) {
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(string);
_dumped_interned_strings->maybe_grow();
}
}
bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
return _dumped_interned_strings->get(o) != nullptr;
}
// Various lookup functions between source_obj, buffered_obj and requested_obj
bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
@ -430,6 +410,7 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
assert(info != nullptr, "must be");
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
info->set_buffer_offset(buffer_offset);
assert(buffer_offset <= 0x7fffffff, "sanity");
OopHandle handle(Universe::vm_global(), src_obj);
_buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
@ -442,6 +423,9 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
_buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
log_info(aot)(" packages = %8zu", _num_packages);
log_info(aot)(" protection domains = %8zu", _num_protection_domains);
}
size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
@ -530,7 +514,25 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu
*field_addr = value;
}
void AOTMappedHeapWriter::update_stats(oop src_obj) {
if (java_lang_String::is_instance(src_obj)) {
_num_strings ++;
_string_bytes += src_obj->size() * HeapWordSize;
_string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
} else {
Klass* k = src_obj->klass();
Symbol* name = k->name();
if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
_num_packages ++;
} else if (name->equals("java/security/ProtectionDomain")) {
_num_protection_domains ++;
}
}
}
size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
update_stats(src_obj);
assert(!is_too_large_to_archive(src_obj), "already checked");
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
@ -896,8 +898,14 @@ void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) {
native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
}
guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
"Metadata %p should have been archived", native_ptr);
if (!ArchiveBuilder::current()->has_been_archived((address)native_ptr)) {
ResourceMark rm;
LogStreamHandle(Error, aot) log;
log.print("Marking native pointer for oop %p (type = %s, offset = %d)",
cast_from_oop<void*>(src_obj), src_obj->klass()->external_name(), field_offset);
src_obj->print_on(&log);
fatal("Metadata %p should have been archived", native_ptr);
}
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);

View File

@ -40,20 +40,6 @@
class MemRegion;
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings :
public ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>
{
public:
DumpedInternedStrings(unsigned size, unsigned max_size) :
ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>(size, max_size) {}
};
class AOTMappedHeapWriter : AllStatic {
friend class HeapShared;
friend class AOTMappedHeapLoader;
@ -131,7 +117,6 @@ private:
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
// See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
@ -190,6 +175,7 @@ private:
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static size_t copy_one_source_obj_to_buffer(oop src_obj);
static void update_stats(oop src_obj);
static void maybe_fill_gc_region_gap(size_t required_byte_size);
static size_t filler_array_byte_size(int length);
@ -227,8 +213,6 @@ public:
static bool is_too_large_to_archive(size_t size);
static bool is_too_large_to_archive(oop obj);
static bool is_string_too_large_to_archive(oop string);
static bool is_dumped_interned_string(oop o);
static void add_to_dumped_interned_strings(oop string);
static void write(GrowableArrayCHeap<oop, mtClassShared>*, AOTMappedHeapInfo* heap_info);
static address requested_address(); // requested address of the lowest achived heap object
static size_t get_filler_size_at(address buffered_addr);

View File

@ -96,7 +96,7 @@ class KeepAliveObjectsTable : public HashTable<oop, bool,
36137, // prime number
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> {};
HeapShared::oop_address_hash> {};
static KeepAliveObjectsTable* _keep_alive_objs_table;
static OopHandle _keep_alive_objs_array;

View File

@ -242,20 +242,6 @@ void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
}
}
static bool is_interned_string(oop obj) {
if (!java_lang_String::is_instance(obj)) {
return false;
}
ResourceMark rm;
int len;
jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
if (name == nullptr) {
fatal("Insufficient memory for dumping");
}
return StringTable::lookup(name, len) == obj;
}
static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
if (UseCompressedOops) {
return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
@ -264,10 +250,6 @@ static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
}
}
bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) {
return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr;
}
void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
@ -325,7 +307,7 @@ size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
ensure_buffer_space(new_used);
if (is_interned_string(src_obj)) {
if (HeapShared::is_interned_string(src_obj)) {
java_lang_String::hash_code(src_obj); // Sets the hash code field(s)
java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
@ -402,7 +384,7 @@ void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_add
mw = mw.copy_set_hash(src_hash);
}
if (is_interned_string(src_obj)) {
if (HeapShared::is_interned_string(src_obj)) {
// Mark the mark word of interned string so the loader knows to link these to
// the string table at runtime.
mw = mw.set_marked();

View File

@ -148,8 +148,6 @@ public:
return size_t(buffered_addr) - size_t(buffer_bottom());
}
static bool is_dumped_interned_string(oop obj);
static size_t source_obj_to_buffered_offset(oop src_obj);
static address source_obj_to_buffered_addr(oop src_obj);

View File

@ -627,6 +627,7 @@ void ArchiveBuilder::dump_ro_metadata() {
start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
RegeneratedClasses::record_regenerated_objects();
DumpRegion::report_gaps(&_alloc_stats);
}
void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
@ -639,33 +640,10 @@ void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
address src = src_info->source_addr();
int bytes = src_info->size_in_bytes(); // word-aligned
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
int bytes = src_info->size_in_bytes();
char* dest = dump_region->allocate_metaspace_obj(bytes, src, src_info->type(),
src_info->read_only(), &_alloc_stats);
char* oldtop = dump_region->top();
if (src_info->type() == MetaspaceClosureType::ClassType) {
// Allocate space for a pointer directly in front of the future InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
// in systemDictionaryShared.cpp.
Klass* klass = (Klass*)src;
if (klass->is_instance_klass()) {
SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
dump_region->allocate(sizeof(address));
}
#ifdef _LP64
// More strict alignments needed for UseCompressedClassPointers
if (UseCompressedClassPointers) {
alignment = nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift());
}
#endif
} else if (src_info->type() == MetaspaceClosureType::SymbolType) {
// Symbols may be allocated by using AllocateHeap, so their sizes
// may be less than size_in_bytes() indicates.
bytes = ((Symbol*)src)->byte_size();
}
char* dest = dump_region->allocate(bytes, alignment);
memcpy(dest, src, bytes);
// Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
@ -692,11 +670,6 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
log_trace(aot)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
src_info->set_buffered_addr((address)dest);
char* newtop = dump_region->top();
_alloc_stats.record(src_info->type(), int(newtop - oldtop), src_info->read_only());
DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
}
// This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are

View File

@ -30,6 +30,7 @@
#include "cds/cdsConfig.hpp"
#include "cds/classListParser.hpp"
#include "cds/classListWriter.hpp"
#include "cds/dumpAllocStats.hpp"
#include "cds/dynamicArchive.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.hpp"
@ -46,6 +47,7 @@
#include "utilities/debug.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/rbTree.inline.hpp"
#include "utilities/spinYield.hpp"
CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr;
@ -116,13 +118,17 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
address value = *ptr_loc;
// We don't want any pointer that points to very bottom of the archive, otherwise when
// AOTMetaspace::default_base_address()==0, we can't distinguish between a pointer
// to nothing (null) vs a pointer to an objects that happens to be at the very bottom
// of the archive.
assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
if (value != nullptr) {
// We don't want any pointer that points to very bottom of the AOT metaspace, otherwise
// when AOTMetaspace::default_base_address()==0, we can't distinguish between a pointer
// to nothing (null) vs a pointer to an objects that happens to be at the very bottom
// of the AOT metaspace.
//
// This should never happen because the protection zone prevents any valid objects from
// being allocated at the bottom of the AOT metaspace.
assert(AOTMetaspace::protection_zone_size() > 0, "must be");
assert(ArchiveBuilder::current()->any_to_offset(value) > 0, "cannot point to bottom of AOT metaspace");
assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
size_t idx = ptr_loc - ptr_base();
if (_ptrmap->size() <= idx) {
@ -130,7 +136,6 @@ void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
}
assert(idx < _ptrmap->size(), "must be");
_ptrmap->set_bit(idx);
//tty->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx);
}
}
}
@ -144,7 +149,6 @@ void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
size_t idx = ptr_loc - ptr_base();
assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
_ptrmap->clear_bit(idx);
//tty->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ %5zu", p2i(ptr_loc), p2i(*ptr_loc), idx);
}
class ArchivePtrBitmapCleaner: public BitMapClosure {
@ -249,16 +253,179 @@ void DumpRegion::commit_to(char* newtop) {
which, commit, _vs->actual_committed_size(), _vs->high());
}
// Basic allocation. Any alignment gaps will be wasted.
char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
// Always align to at least minimum alignment
alignment = MAX2(SharedSpaceObjectAlignment, alignment);
char* p = (char*)align_up(_top, alignment);
char* newtop = p + align_up(num_bytes, (size_t)SharedSpaceObjectAlignment);
char* newtop = p + align_up(num_bytes, SharedSpaceObjectAlignment);
expand_top_to(newtop);
memset(p, 0, newtop - p);
return p;
}
class DumpRegion::AllocGap {
size_t _gap_bytes; // size of this gap in bytes
char* _gap_bottom; // must be SharedSpaceObjectAlignment aligned
public:
size_t gap_bytes() const { return _gap_bytes; }
char* gap_bottom() const { return _gap_bottom; }
AllocGap(size_t bytes, char* bottom) : _gap_bytes(bytes), _gap_bottom(bottom) {
precond(is_aligned(gap_bytes(), SharedSpaceObjectAlignment));
precond(is_aligned(gap_bottom(), SharedSpaceObjectAlignment));
}
};
struct DumpRegion::AllocGapCmp {
static RBTreeOrdering cmp(AllocGap a, AllocGap b) {
RBTreeOrdering order = rbtree_primitive_cmp(a.gap_bytes(), b.gap_bytes());
if (order == RBTreeOrdering::EQ) {
order = rbtree_primitive_cmp(a.gap_bottom(), b.gap_bottom());
}
return order;
}
};
struct Empty {};
using AllocGapNode = RBNode<DumpRegion::AllocGap, Empty>;
class DumpRegion::AllocGapTree : public RBTreeCHeap<AllocGap, Empty, AllocGapCmp, mtClassShared> {
public:
size_t add_gap(char* gap_bottom, char* gap_top) {
precond(gap_bottom < gap_top);
size_t gap_bytes = pointer_delta(gap_top, gap_bottom, 1);
precond(gap_bytes > 0);
_total_gap_bytes += gap_bytes;
AllocGap gap(gap_bytes, gap_bottom); // constructor checks alignment
AllocGapNode* node = allocate_node(gap, Empty{});
insert(gap, node);
log_trace(aot, alloc)("adding a gap of %zu bytes @ %p (total = %zu) in %zu blocks", gap_bytes, gap_bottom, _total_gap_bytes, size());
return gap_bytes;
}
char* allocate_from_gap(size_t num_bytes) {
// The gaps are sorted in ascending order of their sizes. When two gaps have the same
// size, the one with a lower gap_bottom comes first.
//
// Find the first gap that's big enough, with the lowest gap_bottom.
AllocGap target(num_bytes, nullptr);
AllocGapNode* node = closest_ge(target);
if (node == nullptr) {
return nullptr; // Didn't find any usable gap.
}
size_t gap_bytes = node->key().gap_bytes();
char* gap_bottom = node->key().gap_bottom();
char* result = gap_bottom;
precond(is_aligned(result, SharedSpaceObjectAlignment));
remove(node);
precond(_total_gap_bytes >= num_bytes);
_total_gap_bytes -= num_bytes;
_total_gap_bytes_used += num_bytes;
_total_gap_allocs++;
DEBUG_ONLY(node = nullptr); // Don't use it anymore!
precond(gap_bytes >= num_bytes);
if (gap_bytes > num_bytes) {
gap_bytes -= num_bytes;
gap_bottom += num_bytes;
AllocGap gap(gap_bytes, gap_bottom); // constructor checks alignment
AllocGapNode* new_node = allocate_node(gap, Empty{});
insert(gap, new_node);
}
log_trace(aot, alloc)("%zu bytes @ %p in a gap of %zu bytes (used gaps %zu times, remain gap = %zu bytes in %zu blocks)",
num_bytes, result, gap_bytes, _total_gap_allocs, _total_gap_bytes, size());
return result;
}
};
size_t DumpRegion::_total_gap_bytes = 0;
size_t DumpRegion::_total_gap_bytes_used = 0;
size_t DumpRegion::_total_gap_allocs = 0;
DumpRegion::AllocGapTree DumpRegion::_gap_tree;
// Alignment gaps happen only for the RW space. Collect the gaps into the _gap_tree so they can be
// used for future small object allocation.
char* DumpRegion::allocate_metaspace_obj(size_t num_bytes, address src, MetaspaceClosureType type, bool read_only, DumpAllocStats* stats) {
num_bytes = align_up(num_bytes, SharedSpaceObjectAlignment);
size_t alignment = SharedSpaceObjectAlignment; // alignment for the dest pointer
bool is_class = (type == MetaspaceClosureType::ClassType);
bool is_instance_class = is_class && ((Klass*)src)->is_instance_klass();
#ifdef _LP64
// More strict alignments needed for UseCompressedClassPointers
if (is_class && UseCompressedClassPointers) {
size_t klass_alignment = checked_cast<size_t>(nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()));
alignment = MAX2(alignment, klass_alignment);
precond(is_aligned(alignment, SharedSpaceObjectAlignment));
}
#endif
if (alignment == SharedSpaceObjectAlignment && type != MetaspaceClosureType::SymbolType) {
// The addresses of Symbols must be in the same order as they are in ArchiveBuilder::SourceObjList.
// If we put them in gaps, their order will change.
//
// We have enough small objects that all gaps are usually filled.
char* p = _gap_tree.allocate_from_gap(num_bytes);
if (p != nullptr) {
// Already memset to 0 when adding the gap
stats->record(type, checked_cast<int>(num_bytes), /*read_only=*/false); // all gaps are from RW space (for classes)
return p;
}
}
// Reserve space for a pointer directly in front of the buffered InstanceKlass, so
// we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
// without building another hashtable. See RunTimeClassInfo::get_for()
// in systemDictionaryShared.cpp.
const size_t RuntimeClassInfoPtrSize = is_instance_class ? sizeof(address) : 0;
if (is_class && !is_aligned(top() + RuntimeClassInfoPtrSize, alignment)) {
// We need to add a gap to align the buffered Klass. Save the gap for future small allocations.
assert(read_only == false, "only gaps in RW region are reusable");
char* gap_bottom = top();
char* gap_top = align_up(gap_bottom + RuntimeClassInfoPtrSize, alignment) - RuntimeClassInfoPtrSize;
size_t gap_bytes = _gap_tree.add_gap(gap_bottom, gap_top);
allocate(gap_bytes);
}
char* oldtop = top();
if (is_instance_class) {
SystemDictionaryShared::validate_before_archiving((InstanceKlass*)src);
allocate(RuntimeClassInfoPtrSize);
}
precond(is_aligned(top(), alignment));
char* result = allocate(num_bytes);
log_trace(aot, alloc)("%zu bytes @ %p", num_bytes, result);
stats->record(type, pointer_delta_as_int(top(), oldtop), read_only); // includes RuntimeClassInfoPtrSize for classes
return result;
}
// Usually we have no gaps left.
void DumpRegion::report_gaps(DumpAllocStats* stats) {
_gap_tree.visit_in_order([&](const AllocGapNode* node) {
stats->record_gap(checked_cast<int>(node->key().gap_bytes()));
return true;
});
if (_gap_tree.size() > 0) {
log_warning(aot)("Unexpected %zu gaps (%zu bytes) for Klass alignment",
_gap_tree.size(), _total_gap_bytes);
}
if (_total_gap_allocs > 0) {
log_info(aot)("Allocated %zu objects of %zu bytes in gaps (remain = %zu bytes)",
_total_gap_allocs, _total_gap_bytes_used, _total_gap_bytes);
}
}
void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
intptr_t *p = (intptr_t*)_top;

View File

@ -28,7 +28,9 @@
#include "cds/cds_globals.hpp"
#include "cds/serializeClosure.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceClosureType.hpp"
#include "memory/virtualspace.hpp"
#include "runtime/nonJavaThread.hpp"
#include "runtime/semaphore.hpp"
@ -37,6 +39,7 @@
#include "utilities/macros.hpp"
class BootstrapInfo;
class DumpAllocStats;
class ReservedSpace;
class VirtualSpace;
@ -159,6 +162,18 @@ private:
void commit_to(char* newtop);
public:
// Allocation gaps (due to Klass alignment)
class AllocGapTree;
class AllocGap;
struct AllocGapCmp;
private:
static AllocGapTree _gap_tree;
static size_t _total_gap_bytes;
static size_t _total_gap_bytes_used;
static size_t _total_gap_allocs;
public:
DumpRegion(const char* name)
: _name(name), _base(nullptr), _top(nullptr), _end(nullptr),
@ -167,6 +182,7 @@ public:
char* expand_top_to(char* newtop);
char* allocate(size_t num_bytes, size_t alignment = 0);
char* allocate_metaspace_obj(size_t num_bytes, address src, MetaspaceClosureType type, bool read_only, DumpAllocStats* stats);
void append_intptr_t(intptr_t n, bool need_to_mark = false) NOT_CDS_RETURN;
@ -191,6 +207,8 @@ public:
bool contains(char* p) {
return base() <= p && p < top();
}
static void report_gaps(DumpAllocStats* stats);
};
// Closure for serializing initialization data out to a data area to be

View File

@ -53,7 +53,7 @@ class CDSHeapVerifier : public KlassClosure {
15889, // prime number
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> _table;
HeapShared::oop_address_hash> _table;
GrowableArray<const char**> _exclusions;
GrowableArray<oop> _shared_secret_accessors;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -129,15 +129,3 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) {
_bytes [RW][MethodTrainingDataType]);
}
#ifdef ASSERT
void DumpAllocStats::verify(int expected_byte_size, bool read_only) const {
int bytes = 0;
const int what = (int)(read_only ? RO : RW);
for (int type = 0; type < int(_number_of_types); type ++) {
bytes += _bytes[what][type];
}
assert(bytes == expected_byte_size, "counter mismatch (%s: %d vs %d)",
(read_only ? "RO" : "RW"), bytes, expected_byte_size);
}
#endif // ASSERT

View File

@ -41,6 +41,7 @@ public:
f(StringHashentry) \
f(StringBucket) \
f(CppVTables) \
f(Gap) \
f(Other)
#define DUMPED_TYPE_DECLARE(name) name ## Type,
@ -111,12 +112,19 @@ public:
_bytes [which][t] += byte_size;
}
void record_gap(int byte_size) {
_counts[RW][GapType] += 1;
_bytes [RW][GapType] += byte_size;
}
void record_other_type(int byte_size, bool read_only) {
int which = (read_only) ? RO : RW;
_counts[which][OtherType] += 1;
_bytes [which][OtherType] += byte_size;
}
void record_cpp_vtables(int byte_size) {
_counts[RW][CppVTablesType] += 1;
_bytes[RW][CppVTablesType] += byte_size;
}
@ -145,9 +153,6 @@ public:
}
void print_stats(int ro_all, int rw_all);
DEBUG_ONLY(void verify(int expected_byte_size, bool read_only) const);
};
#endif // SHARE_CDS_DUMPALLOCSTATS_HPP

View File

@ -175,23 +175,39 @@ oop HeapShared::CachedOopInfo::orig_referrer() const {
return _orig_referrer.resolve();
}
unsigned HeapShared::oop_hash(oop const& p) {
// This is a simple hashing of the oop's address. This function is used
// while copying the oops into the AOT heap region. We don't want to
// have any side effects during the copying, so we avoid calling
// p->identity_hash() which can update the object header.
unsigned HeapShared::oop_address_hash(oop const& p) {
assert(SafepointSynchronize::is_at_safepoint() ||
JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
// Do not call p->identity_hash() as that will update the
// object header.
return primitive_hash(cast_from_oop<intptr_t>(p));
}
unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
return oop_hash(oh.resolve());
}
unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
// About the hashcode in the cached objects:
// - If a source object has a hashcode, it must be copied into the cache.
// That's because some cached hashtables are laid out using this hashcode.
// - If a source object doesn't have a hashcode, we avoid computing it while
// copying the objects into the cache. This will allow the hashcode to be
// dynamically and randomly computed in each production, which generally
// desirable to make the hashcodes more random between runs.
unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) {
oop o = oh.resolve();
if (o == nullptr) {
return 0;
}
if (!_use_identity_hash_for_archived_object_cache) {
// This is called while we are copying the objects. Don't call o->identity_hash()
// as that will update the object header.
return oop_address_hash(o);
} else {
// This is called after all objects are copied. It's OK to update
// the object's hashcode.
//
// This may be called after we have left the AOT dumping safepoint.
// Objects in archived_object_cache() may be moved by the GC, so we
// can't use the address of o for computing the hash.
return o->identity_hash();
}
}
@ -271,6 +287,12 @@ void HeapShared::prepare_for_archiving(TRAPS) {
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
// Controls the hashing method for the _archived_object_cache.
// Changes from false to true once, after all objects are copied,
// inside make_archived_object_cache_gc_safe().
// See archived_object_cache_hash() for more details.
bool HeapShared::_use_identity_hash_for_archived_object_cache = false;
bool HeapShared::is_archived_heap_in_use() {
if (HeapShared::is_loading()) {
if (HeapShared::is_loading_streaming_mode()) {
@ -384,9 +406,8 @@ void HeapShared::materialize_thread_object() {
}
}
void HeapShared::add_to_dumped_interned_strings(oop string) {
void HeapShared::archive_interned_string(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
@ -404,6 +425,24 @@ void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
}
}
void HeapShared::make_archived_object_cache_gc_safe() {
ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
// It's safe to change the behavior of the hash function now, because iterate_all()
// doesn't call the hash function.
// See archived_object_cache_hash() for more details.
assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
_use_identity_hash_for_archived_object_cache = true;
// Copy all CachedOopInfo into a new table using a different hashing algorithm
archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
new_cache->put_when_absent(oh, info);
});
destroy_archived_object_cache();
_archived_object_cache = new_cache;
}
HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
OopHandle oh(Universe::vm_global(), obj);
CachedOopInfo* result = _archived_object_cache->get(oh);
@ -417,14 +456,53 @@ bool HeapShared::has_been_archived(oop obj) {
}
int HeapShared::append_root(oop obj) {
assert(SafepointSynchronize::is_at_safepoint(), "sanity");
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (obj != nullptr) {
assert(has_been_archived(obj), "must be");
}
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_pending_roots != nullptr, "sanity");
return _pending_roots->append(obj);
if (obj == nullptr) {
assert(_pending_roots->at(0) == nullptr, "root index 0 always maps to null");
return 0;
} else if (CDSConfig::is_dumping_aot_linked_classes()) {
// The AOT compiler may refer the same obj many times, so we
// should use the same index for this oop to avoid excessive entries
// in the roots array.
CachedOopInfo* obj_info = get_cached_oop_info(obj);
assert(obj_info != nullptr, "must be archived");
if (obj_info->root_index() > 0) {
return obj_info->root_index();
} else {
assert(obj_info->root_index() < 0, "must not be zero");
int i = _pending_roots->append(obj);
obj_info->set_root_index(i);
return i;
}
} else {
return _pending_roots->append(obj);
}
}
int HeapShared::get_root_index(oop obj) {
if (java_lang_Class::is_instance(obj)) {
obj = scratch_java_mirror(obj);
}
CachedOopInfo* obj_info = get_cached_oop_info(obj);
const char* error = nullptr;
if (obj_info == nullptr) {
error = "Not a cached oop";
} else if (obj_info->root_index() < 0) {
error = "Not a cached oop root";
} else {
return obj_info->root_index();
}
ResourceMark rm;
log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error,
cast_from_oop<uintptr_t>(obj),
obj->klass()->external_name());
return -1;
}
oop HeapShared::get_root(int index, bool clear) {
@ -453,6 +531,13 @@ void HeapShared::finish_materialize_objects() {
}
void HeapShared::clear_root(int index) {
if (CDSConfig::is_using_aot_linked_classes()) {
// When AOT linked classes are in use, all roots will be in use all
// the time, there's no benefit for clearing the roots. Also, we
// can't clear the roots as they can be shared.
return;
}
assert(index >= 0, "sanity");
assert(CDSConfig::is_using_archive(), "must be");
if (is_archived_heap_in_use()) {
@ -600,9 +685,10 @@ objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
return (objArrayOop)_scratch_objects_table->get_oop(src);
}
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
_pending_roots->append(nullptr); // root index 0 represents a null oop
}
void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
@ -883,6 +969,11 @@ void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeap
ArchiveBuilder::OtherROAllocMark mark;
write_subgraph_info_table();
delete _pending_roots;
_pending_roots = nullptr;
make_archived_object_cache_gc_safe();
}
void HeapShared::scan_java_mirror(oop orig_mirror) {
@ -1911,6 +2002,11 @@ void HeapShared::verify_subgraph_from(oop orig_obj) {
void HeapShared::verify_reachable_objects_from(oop obj) {
_num_total_verifications ++;
if (java_lang_Class::is_instance(obj)) {
Klass* k = java_lang_Class::as_Klass(obj);
if (RegeneratedClasses::has_been_regenerated(k)) {
k = RegeneratedClasses::get_regenerated_object(k);
obj = k->java_mirror();
}
obj = scratch_java_mirror(obj);
assert(obj != nullptr, "must be");
}
@ -2264,12 +2360,22 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
#endif
}
bool HeapShared::is_dumped_interned_string(oop o) {
if (is_writing_mapping_mode()) {
return AOTMappedHeapWriter::is_dumped_interned_string(o);
} else {
return AOTStreamedHeapWriter::is_dumped_interned_string(o);
bool HeapShared::is_interned_string(oop obj) {
if (!java_lang_String::is_instance(obj)) {
return false;
}
ResourceMark rm;
int len = 0;
jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
if (name == nullptr) {
fatal("Insufficient memory for dumping");
}
return StringTable::lookup(name, len) == obj;
}
bool HeapShared::is_dumped_interned_string(oop o) {
return is_interned_string(o) && has_been_archived(o);
}
// These tables should be used only within the CDS safepoint, so

View File

@ -40,7 +40,6 @@
#include "utilities/hashTable.hpp"
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings;
class FileMapInfo;
class KlassSubGraphInfo;
class MetaspaceObjToOopHandleTable;
@ -176,7 +175,7 @@ public:
static void initialize_streaming() NOT_CDS_JAVA_HEAP_RETURN;
static void enable_gc() NOT_CDS_JAVA_HEAP_RETURN;
static void materialize_thread_object() NOT_CDS_JAVA_HEAP_RETURN;
static void add_to_dumped_interned_strings(oop string) NOT_CDS_JAVA_HEAP_RETURN;
static void archive_interned_string(oop string);
static void finalize_initialization(FileMapInfo* static_mapinfo) NOT_CDS_JAVA_HEAP_RETURN;
private:
@ -195,13 +194,8 @@ private:
static void print_stats();
public:
static void debug_trace();
static unsigned oop_hash(oop const& p);
static unsigned oop_handle_hash(OopHandle const& oh);
static unsigned oop_handle_hash_raw(OopHandle const& oh);
static unsigned oop_address_hash(oop const& p);
static bool oop_handle_equals(const OopHandle& a, const OopHandle& b);
static unsigned string_oop_hash(oop const& string) {
return java_lang_String::hash_code(string);
}
class CopyKlassSubGraphInfoToArchive;
@ -217,27 +211,37 @@ public:
// One or more fields in this object are pointing to MetaspaceObj
bool _has_native_pointers;
// >= 0 if this oop has been append to the list of roots
int _root_index;
public:
CachedOopInfo(OopHandle orig_referrer, bool has_oop_pointers)
: _orig_referrer(orig_referrer),
_buffer_offset(0),
_has_oop_pointers(has_oop_pointers),
_has_native_pointers(false) {}
_has_native_pointers(false),
_root_index(-1) {}
oop orig_referrer() const;
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
size_t buffer_offset() const { return _buffer_offset; }
bool has_oop_pointers() const { return _has_oop_pointers; }
bool has_native_pointers() const { return _has_native_pointers; }
void set_has_native_pointers() { _has_native_pointers = true; }
int root_index() const { return _root_index; }
void set_root_index(int i) { _root_index = i; }
};
private:
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
static bool _use_identity_hash_for_archived_object_cache;
static unsigned archived_object_cache_hash(OopHandle const& oh);
typedef ResizeableHashTable<OopHandle, CachedOopInfo,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_handle_hash_raw,
HeapShared::archived_object_cache_hash,
HeapShared::oop_handle_equals> ArchivedObjectCache;
static ArchivedObjectCache* _archived_object_cache;
@ -297,7 +301,7 @@ private:
typedef ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> SeenObjectsTable;
HeapShared::oop_address_hash> SeenObjectsTable;
static SeenObjectsTable *_seen_objects_table;
@ -394,6 +398,7 @@ private:
delete _archived_object_cache;
_archived_object_cache = nullptr;
}
static void make_archived_object_cache_gc_safe();
static ArchivedObjectCache* archived_object_cache() {
return _archived_object_cache;
}
@ -406,6 +411,7 @@ private:
KlassSubGraphInfo* subgraph_info,
oop orig_obj);
static bool is_interned_string(oop obj);
static bool is_dumped_interned_string(oop o);
// Scratch objects for archiving Klass::java_mirror()
@ -437,6 +443,11 @@ private:
// Dump-time only. Returns the index of the root, which can be used at run time to read
// the root using get_root(index, ...).
static int append_root(oop obj);
// AOT-compile time only.
// Returns -1 if obj is not in the heap root set.
static int get_root_index(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(-1);
static GrowableArrayCHeap<oop, mtClassShared>* pending_roots() { return _pending_roots; }
// Dump-time and runtime
@ -445,9 +456,7 @@ private:
// Run-time only
static void clear_root(int index);
static void get_segment_indexes(int index, int& segment_index, int& internal_index);
static void setup_test_class(const char* test_class_name) PRODUCT_RETURN;
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -946,7 +946,7 @@ void StringTable::init_shared_table() {
// so we are all good.
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
HeapShared::archive_interned_string(string);
}
n++;
return true;

View File

@ -368,10 +368,10 @@ class methodHandle;
do_intrinsic(_inflateStringB, java_lang_StringLatin1, inflate_name, inflateB_signature, F_S) \
do_signature(inflateB_signature, "([BI[BII)V") \
do_intrinsic(_toBytesStringU, java_lang_StringUTF16, toBytes_name, toBytesU_signature, F_S) \
do_name( toBytes_name, "toBytes") \
do_name( toBytes_name, "toBytes0") \
do_signature(toBytesU_signature, "([CII)[B") \
do_intrinsic(_getCharsStringU, java_lang_StringUTF16, getCharsU_name, getCharsU_signature, F_S) \
do_name( getCharsU_name, "getChars") \
do_name( getCharsU_name, "getChars0") \
do_signature(getCharsU_signature, "([BII[CI)V") \
do_intrinsic(_getCharStringU, java_lang_StringUTF16, getChar_name, getCharStringU_signature, F_S) \
do_signature(getCharStringU_signature, "([BI)C") \

View File

@ -813,7 +813,10 @@ Node* BarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* mem, Node* toobi
return old_tlab_top;
}
static const TypeFunc* clone_type() {
const TypeFunc* BarrierSetC2::_clone_type_Type = nullptr;
void BarrierSetC2::make_clone_type() {
assert(BarrierSetC2::_clone_type_Type == nullptr, "should be");
// Create input type (domain)
int argcnt = NOT_LP64(3) LP64_ONLY(4);
const Type** const domain_fields = TypeTuple::fields(argcnt);
@ -829,7 +832,12 @@ static const TypeFunc* clone_type() {
const Type** const range_fields = TypeTuple::fields(0);
const TypeTuple* const range = TypeTuple::make(TypeFunc::Parms + 0, range_fields);
return TypeFunc::make(domain, range);
BarrierSetC2::_clone_type_Type = TypeFunc::make(domain, range);
}
inline const TypeFunc* BarrierSetC2::clone_type() {
assert(BarrierSetC2::_clone_type_Type != nullptr, "should be initialized");
return BarrierSetC2::_clone_type_Type;
}
#define XTOP LP64_ONLY(COMMA phase->top())

View File

@ -270,6 +270,9 @@ public:
// various GC barrier sets inherit from the BarrierSetC2 class to sprinkle
// barriers into the accesses.
class BarrierSetC2: public CHeapObj<mtGC> {
private:
static const TypeFunc* _clone_type_Type;
protected:
virtual void resolve_address(C2Access& access) const;
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
@ -379,6 +382,9 @@ public:
static int arraycopy_payload_base_offset(bool is_array);
static void make_clone_type();
static const TypeFunc* clone_type();
#ifndef PRODUCT
virtual void dump_barrier_data(const MachNode* mach, outputStream* st) const {
st->print("%x", mach->barrier_data());

View File

@ -37,7 +37,6 @@
#include "utilities/copy.hpp"
size_t ThreadLocalAllocBuffer::_max_size = 0;
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
@ -225,30 +224,6 @@ void ThreadLocalAllocBuffer::startup_initialization() {
// abort during VM initialization.
_target_refills = MAX2(_target_refills, 2U);
#ifdef COMPILER2
// If the C2 compiler is present, extra space is needed at the end of
// TLABs, otherwise prefetching instructions generated by the C2
// compiler will fault (due to accessing memory outside of heap).
// The amount of space is the max of the number of lines to
// prefetch for array and for instance allocations. (Extra space must be
// reserved to accommodate both types of allocations.)
//
// Only SPARC-specific BIS instructions are known to fault. (Those
// instructions are generated if AllocatePrefetchStyle==3 and
// AllocatePrefetchInstr==1). To be on the safe side, however,
// extra space is reserved for all combinations of
// AllocatePrefetchStyle and AllocatePrefetchInstr.
//
// If the C2 compiler is not present, no space is reserved.
// +1 for rounding up to next cache line, +1 to be safe
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
(int)HeapWordSize;
}
#endif
// During jvm startup, the main thread is initialized
// before the heap is initialized. So reinitialize it now.
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
@ -454,8 +429,7 @@ void ThreadLocalAllocStats::publish() {
}
size_t ThreadLocalAllocBuffer::end_reserve() {
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
return CollectedHeap::lab_alignment_reserve();
}
size_t ThreadLocalAllocBuffer::estimated_used_bytes() const {

View File

@ -57,7 +57,6 @@ private:
uint64_t _allocated_before_last_gc; // total bytes allocated up until the last gc
static size_t _max_size; // maximum size of any TLAB
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
static unsigned _target_refills; // expected number of refills between GCs
unsigned _number_of_refills;

View File

@ -519,7 +519,33 @@ void ShenandoahBarrierSetC2::post_barrier(GraphKit* kit,
#undef __
const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() {
const TypeFunc* ShenandoahBarrierSetC2::_write_barrier_pre_Type = nullptr;
const TypeFunc* ShenandoahBarrierSetC2::_clone_barrier_Type = nullptr;
const TypeFunc* ShenandoahBarrierSetC2::_load_reference_barrier_Type = nullptr;
inline const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() {
assert(ShenandoahBarrierSetC2::_write_barrier_pre_Type != nullptr, "should be initialized");
return ShenandoahBarrierSetC2::_write_barrier_pre_Type;
}
inline const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() {
assert(ShenandoahBarrierSetC2::_clone_barrier_Type != nullptr, "should be initialized");
return ShenandoahBarrierSetC2::_clone_barrier_Type;
}
const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() {
assert(ShenandoahBarrierSetC2::_load_reference_barrier_Type != nullptr, "should be initialized");
return ShenandoahBarrierSetC2::_load_reference_barrier_Type;
}
void ShenandoahBarrierSetC2::init() {
ShenandoahBarrierSetC2::make_write_barrier_pre_Type();
ShenandoahBarrierSetC2::make_clone_barrier_Type();
ShenandoahBarrierSetC2::make_load_reference_barrier_Type();
}
void ShenandoahBarrierSetC2::make_write_barrier_pre_Type() {
assert(ShenandoahBarrierSetC2::_write_barrier_pre_Type == nullptr, "should be");
const Type **fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
@ -528,10 +554,11 @@ const TypeFunc* ShenandoahBarrierSetC2::write_barrier_pre_Type() {
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
ShenandoahBarrierSetC2::_write_barrier_pre_Type = TypeFunc::make(domain, range);
}
const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() {
void ShenandoahBarrierSetC2::make_clone_barrier_Type() {
assert(ShenandoahBarrierSetC2::_clone_barrier_Type == nullptr, "should be");
const Type **fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = TypeOopPtr::NOTNULL; // src oop
const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
@ -540,10 +567,11 @@ const TypeFunc* ShenandoahBarrierSetC2::clone_barrier_Type() {
fields = TypeTuple::fields(0);
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
return TypeFunc::make(domain, range);
ShenandoahBarrierSetC2::_clone_barrier_Type = TypeFunc::make(domain, range);
}
const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() {
void ShenandoahBarrierSetC2::make_load_reference_barrier_Type() {
assert(ShenandoahBarrierSetC2::_load_reference_barrier_Type == nullptr, "should be");
const Type **fields = TypeTuple::fields(2);
fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM; // original field value
fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address
@ -555,7 +583,7 @@ const TypeFunc* ShenandoahBarrierSetC2::load_reference_barrier_Type() {
fields[TypeFunc::Parms+0] = TypeOopPtr::BOTTOM;
const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
return TypeFunc::make(domain, range);
ShenandoahBarrierSetC2::_load_reference_barrier_Type = TypeFunc::make(domain, range);
}
Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {

View File

@ -82,6 +82,13 @@ private:
static bool clone_needs_barrier(Node* src, PhaseGVN& gvn);
static const TypeFunc* _write_barrier_pre_Type;
static const TypeFunc* _clone_barrier_Type;
static const TypeFunc* _load_reference_barrier_Type;
static void make_write_barrier_pre_Type();
static void make_clone_barrier_Type();
static void make_load_reference_barrier_Type();
protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
@ -106,6 +113,8 @@ public:
static const TypeFunc* write_barrier_pre_Type();
static const TypeFunc* clone_barrier_Type();
static const TypeFunc* load_reference_barrier_Type();
static void init();
virtual bool has_load_barrier_nodes() const { return true; }
// This is the entry-point for the backend to perform accesses through the Access API.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -568,6 +568,8 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv
return false;
}
int cp_index = method_entry->constant_pool_index();
if (!method_entry->is_resolved(Bytecodes::_invokevirtual)) {
if (method_entry->method() == nullptr) {
rejection_reason = "(method entry is not resolved)";
@ -577,9 +579,24 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv
rejection_reason = "(corresponding stub is generated on demand during method resolution)";
return false; // FIXME: corresponding stub is generated on demand during method resolution (see LinkResolver::resolve_static_call).
}
if (method_entry->is_resolved(Bytecodes::_invokehandle) && !CDSConfig::is_dumping_method_handles()) {
rejection_reason = "(not dumping method handles)";
return false;
if (method_entry->is_resolved(Bytecodes::_invokehandle)) {
if (!CDSConfig::is_dumping_method_handles()) {
rejection_reason = "(not dumping method handles)";
return false;
}
Symbol* sig = constant_pool()->uncached_signature_ref_at(cp_index);
Klass* k;
if (!AOTConstantPoolResolver::check_methodtype_signature(constant_pool(), sig, &k, true)) {
// invokehandles that were resolved in the training run should have been filtered in
// AOTConstantPoolResolver::maybe_resolve_fmi_ref so we shouldn't come to here.
//
// If we come here it's because the AOT assembly phase has executed an invokehandle
// that uses an excluded type like jdk.jfr.Event. This should not happen because the
// AOT assembly phase should execute only a very limited set of Java code.
ResourceMark rm;
fatal("AOT assembly phase must not resolve any invokehandles whose signatures include an excluded type");
}
}
if (method_entry->method()->is_method_handle_intrinsic() && !CDSConfig::is_dumping_method_handles()) {
rejection_reason = "(not dumping intrinsic method handles)";
@ -587,7 +604,6 @@ bool ConstantPoolCache::can_archive_resolved_method(ConstantPool* src_cp, Resolv
}
}
int cp_index = method_entry->constant_pool_index();
assert(src_cp->tag_at(cp_index).is_method() || src_cp->tag_at(cp_index).is_interface_method(), "sanity");
if (!AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) {

View File

@ -843,6 +843,22 @@ void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) {
assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity");
}
RegionNode* LibraryCallKit::create_bailout() {
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
return bailout;
}
bool LibraryCallKit::check_bailout(RegionNode* bailout) {
if (bailout->req() > 1) {
bailout = _gvn.transform(bailout)->as_Region();
Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
C->root()->add_req(halt);
}
return stopped();
}
//------------------------------generate_guard---------------------------
// Helper function for generating guarded fast-slow graph structures.
// The given 'test', if true, guards a slow path. If the test fails
@ -951,36 +967,19 @@ void LibraryCallKit::generate_string_range_check(Node* array,
Node* offset,
Node* count,
bool char_count,
bool halt_on_oob) {
RegionNode* region) {
if (stopped()) {
return; // already stopped
}
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
if (char_count) {
// Convert char count to byte count
count = _gvn.transform(new LShiftINode(count, intcon(1)));
}
// Offset and count must not be negative
generate_negative_guard(offset, bailout, nullptr, halt_on_oob);
generate_negative_guard(count, bailout, nullptr, halt_on_oob);
generate_negative_guard(offset, region, nullptr, true);
generate_negative_guard(count, region, nullptr, true);
// Offset + count must not exceed length of array
generate_limit_guard(offset, count, load_array_length(array), bailout, halt_on_oob);
if (bailout->req() > 1) {
if (halt_on_oob) {
bailout = _gvn.transform(bailout)->as_Region();
Node* frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
Node* halt = _gvn.transform(new HaltNode(bailout, frame, "unexpected guard failure in intrinsic"));
C->root()->add_req(halt);
} else {
PreserveJVMState pjvms(this);
set_control(_gvn.transform(bailout));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
}
generate_limit_guard(offset, count, load_array_length(array), region, true);
}
Node* LibraryCallKit::current_thread_helper(Node*& tls_output, ByteSize handle_offset,
@ -1139,10 +1138,6 @@ bool LibraryCallKit::inline_array_equals(StrIntrinsicNode::ArgEnc ae) {
//------------------------------inline_countPositives------------------------------
// int java.lang.StringCoding#countPositives0(byte[] ba, int off, int len)
bool LibraryCallKit::inline_countPositives() {
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
assert(callee()->signature()->size() == 3, "countPositives has 3 parameters");
// no receiver since it is static method
Node* ba = argument(0);
@ -1150,8 +1145,9 @@ bool LibraryCallKit::inline_countPositives() {
Node* len = argument(2);
ba = must_be_not_null(ba, true);
generate_string_range_check(ba, offset, len, false, true);
if (stopped()) {
RegionNode* bailout = create_bailout();
generate_string_range_check(ba, offset, len, false, bailout);
if (check_bailout(bailout)) {
return true;
}
@ -1283,9 +1279,6 @@ bool LibraryCallKit::inline_string_indexOf(StrIntrinsicNode::ArgEnc ae) {
//-----------------------------inline_string_indexOfI-----------------------
bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
if (!Matcher::match_rule_supported(Op_StrIndexOf)) {
return false;
}
@ -1307,9 +1300,10 @@ bool LibraryCallKit::inline_string_indexOfI(StrIntrinsicNode::ArgEnc ae) {
Node* tgt_start = array_element_address(tgt, intcon(0), T_BYTE);
// Range checks
generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, true);
generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, true);
if (stopped()) {
RegionNode* bailout = create_bailout();
generate_string_range_check(src, src_offset, src_count, ae != StrIntrinsicNode::LL, bailout);
generate_string_range_check(tgt, intcon(0), tgt_count, ae == StrIntrinsicNode::UU, bailout);
if (check_bailout(bailout)) {
return true;
}
@ -1404,7 +1398,11 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
Node* src_count = _gvn.transform(new SubINode(max, from_index));
// Range checks
generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, true);
RegionNode* bailout = create_bailout();
generate_string_range_check(src, src_offset, src_count, ae == StrIntrinsicNode::U, bailout);
if (check_bailout(bailout)) {
return true;
}
// Check for int_ch >= 0
Node* int_ch_cmp = _gvn.transform(new CmpINode(int_ch, intcon(0)));
@ -1454,9 +1452,6 @@ bool LibraryCallKit::inline_string_indexOfChar(StrIntrinsicNode::ArgEnc ae) {
// void StringLatin1.inflate0(byte[] src, int srcOff, char[] dst, int dstOff, int len)
// void StringLatin1.inflate0(byte[] src, int srcOff, byte[] dst, int dstOff, int len)
bool LibraryCallKit::inline_string_copy(bool compress) {
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
int nargs = 5; // 2 oops, 3 ints
assert(callee()->signature()->size() == nargs, "string copy has 5 arguments");
@ -1495,9 +1490,10 @@ bool LibraryCallKit::inline_string_copy(bool compress) {
}
// Range checks
generate_string_range_check(src, src_offset, length, convert_src, true);
generate_string_range_check(dst, dst_offset, length, convert_dst, true);
if (stopped()) {
RegionNode* bailout = create_bailout();
generate_string_range_check(src, src_offset, length, convert_src, bailout);
generate_string_range_check(dst, dst_offset, length, convert_dst, bailout);
if (check_bailout(bailout)) {
return true;
}
@ -1545,12 +1541,10 @@ bool LibraryCallKit::inline_string_copy(bool compress) {
#endif //_LP64
//------------------------inline_string_toBytesU--------------------------
// public static byte[] StringUTF16.toBytes(char[] value, int off, int len)
// public static byte[] StringUTF16.toBytes0(char[] value, int off, int len)
bool LibraryCallKit::inline_string_toBytesU() {
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
// Get the arguments.
assert(callee()->signature()->size() == 3, "character array encoder requires 3 arguments");
Node* value = argument(0);
Node* offset = argument(1);
Node* length = argument(2);
@ -1558,30 +1552,18 @@ bool LibraryCallKit::inline_string_toBytesU() {
Node* newcopy = nullptr;
// Set the original stack and the reexecute bit for the interpreter to reexecute
// the bytecode that invokes StringUTF16.toBytes() if deoptimization happens.
// the bytecode that invokes StringUTF16.toBytes0() if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
// Check if a null path was taken unconditionally.
value = null_check(value);
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
// Range checks
generate_negative_guard(offset, bailout);
generate_negative_guard(length, bailout);
generate_limit_guard(offset, length, load_array_length(value), bailout);
value = must_be_not_null(value, true);
RegionNode* bailout = create_bailout();
generate_negative_guard(offset, bailout, nullptr, true);
generate_negative_guard(length, bailout, nullptr, true);
generate_limit_guard(offset, length, load_array_length(value), bailout, true);
// Make sure that resulting byte[] length does not overflow Integer.MAX_VALUE
generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout);
if (bailout->req() > 1) {
PreserveJVMState pjvms(this);
set_control(_gvn.transform(bailout));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_maybe_recompile);
}
if (stopped()) {
generate_limit_guard(length, intcon(0), intcon(max_jint/2), bailout, true);
if (check_bailout(bailout)) {
return true;
}
@ -1640,12 +1622,9 @@ bool LibraryCallKit::inline_string_toBytesU() {
}
//------------------------inline_string_getCharsU--------------------------
// public void StringUTF16.getChars(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
// public void StringUTF16.getChars0(byte[] src, int srcBegin, int srcEnd, char dst[], int dstBegin)
bool LibraryCallKit::inline_string_getCharsU() {
if (too_many_traps(Deoptimization::Reason_intrinsic)) {
return false;
}
assert(callee()->signature()->size() == 5, "StringUTF16.getChars0() has 5 arguments");
// Get the arguments.
Node* src = argument(0);
Node* src_begin = argument(1);
@ -1658,8 +1637,8 @@ bool LibraryCallKit::inline_string_getCharsU() {
AllocateArrayNode* alloc = tightly_coupled_allocation(dst);
// Check if a null path was taken unconditionally.
src = null_check(src);
dst = null_check(dst);
src = must_be_not_null(src, true);
dst = must_be_not_null(dst, true);
if (stopped()) {
return true;
}
@ -1669,51 +1648,50 @@ bool LibraryCallKit::inline_string_getCharsU() {
src_begin = _gvn.transform(new LShiftINode(src_begin, intcon(1)));
// Range checks
generate_string_range_check(src, src_begin, length, true);
generate_string_range_check(dst, dst_begin, length, false);
if (stopped()) {
RegionNode* bailout = create_bailout();
generate_string_range_check(src, src_begin, length, true, bailout);
generate_string_range_check(dst, dst_begin, length, false, bailout);
if (check_bailout(bailout)) {
return true;
}
if (!stopped()) {
// Calculate starting addresses.
Node* src_start = array_element_address(src, src_begin, T_BYTE);
Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
// Calculate starting addresses.
Node* src_start = array_element_address(src, src_begin, T_BYTE);
Node* dst_start = array_element_address(dst, dst_begin, T_CHAR);
// Check if array addresses are aligned to HeapWordSize
const TypeInt* tsrc = gvn().type(src_begin)->is_int();
const TypeInt* tdst = gvn().type(dst_begin)->is_int();
bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
// Check if array addresses are aligned to HeapWordSize
const TypeInt* tsrc = gvn().type(src_begin)->is_int();
const TypeInt* tdst = gvn().type(dst_begin)->is_int();
bool aligned = tsrc->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_BYTE) + tsrc->get_con() * type2aelembytes(T_BYTE)) % HeapWordSize == 0) &&
tdst->is_con() && ((arrayOopDesc::base_offset_in_bytes(T_CHAR) + tdst->get_con() * type2aelembytes(T_CHAR)) % HeapWordSize == 0);
// Figure out which arraycopy runtime method to call (disjoint, uninitialized).
const char* copyfunc_name = "arraycopy";
address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::fast_arraycopy_Type(),
copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
src_start, dst_start, ConvI2X(length) XTOP);
// Do not let reads from the cloned object float above the arraycopy.
if (alloc != nullptr) {
if (alloc->maybe_set_complete(&_gvn)) {
// "You break it, you buy it."
InitializeNode* init = alloc->initialization();
assert(init->is_complete(), "we just did this");
init->set_complete_with_arraycopy();
assert(dst->is_CheckCastPP(), "sanity");
assert(dst->in(0)->in(0) == init, "dest pinned");
}
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
// other threads.
// Record what AllocateNode this StoreStore protects so that
// escape analysis can go from the MemBarStoreStoreNode to the
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
// based on the escape status of the AllocateNode.
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
} else {
insert_mem_bar(Op_MemBarCPUOrder);
// Figure out which arraycopy runtime method to call (disjoint, uninitialized).
const char* copyfunc_name = "arraycopy";
address copyfunc_addr = StubRoutines::select_arraycopy_function(T_CHAR, aligned, true, copyfunc_name, true);
Node* call = make_runtime_call(RC_LEAF|RC_NO_FP,
OptoRuntime::fast_arraycopy_Type(),
copyfunc_addr, copyfunc_name, TypeRawPtr::BOTTOM,
src_start, dst_start, ConvI2X(length) XTOP);
// Do not let reads from the cloned object float above the arraycopy.
if (alloc != nullptr) {
if (alloc->maybe_set_complete(&_gvn)) {
// "You break it, you buy it."
InitializeNode* init = alloc->initialization();
assert(init->is_complete(), "we just did this");
init->set_complete_with_arraycopy();
assert(dst->is_CheckCastPP(), "sanity");
assert(dst->in(0)->in(0) == init, "dest pinned");
}
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
// other threads.
// Record what AllocateNode this StoreStore protects so that
// escape analysis can go from the MemBarStoreStoreNode to the
// AllocateNode and eliminate the MemBarStoreStoreNode if possible
// based on the escape status of the AllocateNode.
insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
} else {
insert_mem_bar(Op_MemBarCPUOrder);
}
C->set_has_split_ifs(true); // Has chance for split-if optimization
@ -1725,9 +1703,16 @@ bool LibraryCallKit::inline_string_getCharsU() {
// static void StringUTF16.putChar(byte[] val, int index, int c)
// static char StringUTF16.getChar(byte[] val, int index)
bool LibraryCallKit::inline_string_char_access(bool is_store) {
Node* ch;
if (is_store) {
assert(callee()->signature()->size() == 3, "StringUTF16.putChar() has 3 arguments");
ch = argument(2);
} else {
assert(callee()->signature()->size() == 2, "StringUTF16.getChar() has 2 arguments");
ch = nullptr;
}
Node* value = argument(0);
Node* index = argument(1);
Node* ch = is_store ? argument(2) : nullptr;
// This intrinsic accesses byte[] array as char[] array. Computing the offsets
// correctly requires matched array shapes.
@ -6185,9 +6170,10 @@ bool LibraryCallKit::inline_encodeISOArray(bool ascii) {
}
// Check source & target bounds
generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, true);
generate_string_range_check(dst, dst_offset, length, false, true);
if (stopped()) {
RegionNode* bailout = create_bailout();
generate_string_range_check(src, src_offset, length, src_elem == T_BYTE, bailout);
generate_string_range_check(dst, dst_offset, length, false, bailout);
if (check_bailout(bailout)) {
return true;
}

View File

@ -130,6 +130,8 @@ class LibraryCallKit : public GraphKit {
virtual int reexecute_sp() { return _reexecute_sp; }
// Helper functions to inline natives
RegionNode* create_bailout();
bool check_bailout(RegionNode* bailout);
Node* generate_guard(Node* test, RegionNode* region, float true_prob);
Node* generate_slow_guard(Node* test, RegionNode* region);
Node* generate_fair_guard(Node* test, RegionNode* region);
@ -143,7 +145,7 @@ class LibraryCallKit : public GraphKit {
bool with_opaque = false);
void generate_string_range_check(Node* array, Node* offset,
Node* length, bool char_count,
bool halt_on_oob = false);
RegionNode* region);
Node* current_thread_helper(Node* &tls_output, ByteSize handle_offset,
bool is_immutable);
Node* generate_current_thread(Node* &tls_output);

View File

@ -1917,8 +1917,7 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
transform_later(cache_adr);
cache_adr = new CastP2XNode(needgc_false, cache_adr);
transform_later(cache_adr);
// Address is aligned to execute prefetch to the beginning of cache line size
// (it is important when BIS instruction is used on SPARC as prefetch).
// Address is aligned to execute prefetch to the beginning of cache line size.
Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
cache_adr = new AndXNode(cache_adr, mask);
transform_later(cache_adr);

View File

@ -49,6 +49,9 @@
#include "utilities/ostream.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/stringUtils.hpp"
#if INCLUDE_SHENANDOAHGC
#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
#endif // INCLUDE_SHENANDOAHGC
// Portions of code courtesy of Clifford Click
@ -732,6 +735,11 @@ void Type::Initialize_shared(Compile* current) {
mreg2type[Op_VecY] = TypeVect::VECTY;
mreg2type[Op_VecZ] = TypeVect::VECTZ;
#if INCLUDE_SHENANDOAHGC
ShenandoahBarrierSetC2::init();
#endif //INCLUDE_SHENANDOAHGC
BarrierSetC2::make_clone_type();
LockNode::initialize_lock_Type();
ArrayCopyNode::initialize_arraycopy_Type();
OptoRuntime::initialize_types();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024 Red Hat, Inc.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -3129,16 +3129,21 @@ JNI_END
JNI_ENTRY(jobject, jni_GetModule(JNIEnv* env, jclass clazz))
return Modules::get_module(clazz, THREAD);
HOTSPOT_JNI_GETMODULE_ENTRY(env, clazz);
jobject ret = Modules::get_module(clazz, THREAD);
HOTSPOT_JNI_GETMODULE_RETURN(ret);
return ret;
JNI_END
JNI_ENTRY(jboolean, jni_IsVirtualThread(JNIEnv* env, jobject obj))
HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY(env, obj);
jboolean ret = JNI_FALSE;
oop thread_obj = JNIHandles::resolve_external_guard(obj);
if (thread_obj != nullptr && thread_obj->is_a(vmClasses::BaseVirtualThread_klass())) {
return JNI_TRUE;
} else {
return JNI_FALSE;
ret = JNI_TRUE;
}
HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN(ret);
return ret;
JNI_END

View File

@ -274,6 +274,17 @@ JVMFlag::Error AVX3ThresholdConstraintFunc(int value, bool verbose) {
return JVMFlag::SUCCESS;
}
JVMFlag::Error CopyAVX3ThresholdConstraintFunc(int value, bool verbose) {
if (value != 0 && !is_power_of_2(value)) {
JVMFlag::printError(verbose,
"CopyAVX3Threshold ( %d ) must be 0 or "
"a power of two value between 0 and MAX_INT\n", value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
}
JVMFlag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
if (value >= 4032) {
JVMFlag::printError(verbose,

View File

@ -46,6 +46,7 @@
f(uintx, ArraycopyDstPrefetchDistanceConstraintFunc) \
f(uintx, ArraycopySrcPrefetchDistanceConstraintFunc) \
f(int, AVX3ThresholdConstraintFunc) \
f(int, CopyAVX3ThresholdConstraintFunc) \
f(uint, TypeProfileLevelConstraintFunc) \
f(uint, VerifyIterativeGVNConstraintFunc) \
f(intx, InitArrayShortSizeConstraintFunc) \

View File

@ -454,7 +454,7 @@ class os: AllStatic {
static size_t align_down_vm_page_size(size_t size) { return align_down(size, os::vm_page_size()); }
// The set of page sizes which the VM is allowed to use (may be a subset of
// the page sizes actually available on the platform).
// the page sizes actually available on the platform).
static const PageSizes& page_sizes() { return _page_sizes; }
// Returns the page size to use for a region of memory.
@ -893,6 +893,9 @@ class os: AllStatic {
static void print_date_and_time(outputStream* st, char* buf, size_t buflen);
static void print_elapsed_time(outputStream* st, double time);
// Prints the number of open file descriptors for the current process
static void print_open_file_descriptors(outputStream* st);
static void print_user_info(outputStream* st);
static void print_active_locale(outputStream* st);

View File

@ -335,7 +335,6 @@
nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \
static_field(ThreadLocalAllocBuffer, _reserve_for_allocation_prefetch, int) \
static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \
nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste, unsigned) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -683,6 +683,10 @@
#define HOTSPOT_JNI_GETMETHODID_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_GETMETHODID_RETURN(arg0)
#define HOTSPOT_JNI_GETMETHODID_RETURN_ENABLED() 0
#define HOTSPOT_JNI_GETMODULE_ENTRY(arg0, arg1)
#define HOTSPOT_JNI_GETMODULE_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_GETMODULE_RETURN(arg0)
#define HOTSPOT_JNI_GETMODULE_RETURN_ENABLED() 0
#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2)
#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN(arg0)
@ -811,6 +815,10 @@
#define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN(arg0)
#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN_ENABLED() 0
#define HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY(arg0, arg1)
#define HOTSPOT_JNI_ISVIRTUALTHREAD_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN(arg0)
#define HOTSPOT_JNI_ISVIRTUALTHREAD_RETURN_ENABLED() 0
#define HOTSPOT_JNI_MONITORENTER_ENTRY(arg0, arg1)
#define HOTSPOT_JNI_MONITORENTER_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_MONITORENTER_RETURN(arg0)
@ -1080,12 +1088,6 @@
#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(arg0)
#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN_ENABLED() 0
/* Modules */
#define HOTSPOT_JNI_GETMODULE_ENTRY(arg0, arg1)
#define HOTSPOT_JNI_GETMODULE_ENTRY_ENABLED() 0
#define HOTSPOT_JNI_GETMODULE_RETURN(arg0)
#define HOTSPOT_JNI_GETMODULE_RETURN_ENABLED()
#else /* !defined(DTRACE_ENABLED) */
#error This file should only be included when dtrace is not enabled
#endif /* !defined(DTRACE_ENABLED) */

View File

@ -429,7 +429,12 @@ public:
void free(void* ptr);
};
template <typename T>
RBTreeOrdering rbtree_primitive_cmp(T a, T b) { // handy function
if (a < b) return RBTreeOrdering::LT;
if (a > b) return RBTreeOrdering::GT;
return RBTreeOrdering::EQ;
}
template <typename K, typename V, typename COMPARATOR, MemTag mem_tag, AllocFailType strategy = AllocFailStrategy::EXIT_OOM>
using RBTreeCHeap = RBTree<K, V, COMPARATOR, RBTreeCHeapAllocator<mem_tag, strategy>>;

View File

@ -1329,6 +1329,13 @@ void VMError::report(outputStream* st, bool _verbose) {
STEP_IF("printing OS information", _verbose)
os::print_os_info(st);
st->cr();
#ifdef __APPLE__
// Avoid large stack allocation on Mac for FD count during signal-handling.
os::Bsd::print_open_file_descriptors(st, buf, sizeof(buf));
st->cr();
#else
os::print_open_file_descriptors(st);
#endif
STEP_IF("printing CPU info", _verbose)
os::print_cpu_info(st, buf, sizeof(buf));
@ -1550,6 +1557,8 @@ void VMError::print_vm_info(outputStream* st) {
os::print_os_info(st);
st->cr();
os::print_open_file_descriptors(st);
st->cr();
// STEP("printing CPU info")

View File

@ -67,30 +67,61 @@ final class StringUTF16 {
// Check the size of a UTF16-coded string
// Throw an exception if out of range
static int newBytesLength(int len) {
if (len < 0) {
throw new NegativeArraySizeException();
}
if (len >= MAX_LENGTH) {
throw new OutOfMemoryError("UTF16 String size is " + len +
", should be less than " + MAX_LENGTH);
}
private static int newBytesLength(int len) {
checkBytesLength(len);
return len << 1;
}
/**
* Checks if the provided length is a valid UTF-16 string byte array length.
*
* @param length a UTF-16 string byte array length
*
* @throws NegativeArraySizeException if {@code length < 0}
* @throws OutOfMemoryError if {@code length > (Integer.MAX_VALUE / 2)}
*/
private static void checkBytesLength(int length) {
if (length < 0) {
throw new NegativeArraySizeException();
}
if (length >= MAX_LENGTH) {
throw new OutOfMemoryError("UTF16 String size is " + length +
", should be less than " + MAX_LENGTH);
}
}
/**
* Writes the given code point to the specified position of the provided
* UTF-16 string byte array.
* <p>
* <b>WARNING: This method does not perform any input validations.</b>
*
* @param val a UTF-16 string byte array
* @param index the index of the character to write the code point to
* @param c a code point
*/
// vmIntrinsics::_putCharStringU
@IntrinsicCandidate
// intrinsic performs no bounds checks
static void putChar(byte[] val, int index, int c) {
assert index >= 0 && index < length(val) : "Trusted caller missed bounds check";
assert val != null && index >= 0 && index < length(val) : "Trusted caller violated input constraints";
index <<= 1;
val[index++] = (byte)(c >> HI_BYTE_SHIFT);
val[index] = (byte)(c >> LO_BYTE_SHIFT);
}
/**
* {@return the code point at the the specified position of the provided
* UTF-16 string byte array}
* <p>
* <b>WARNING: This method does not perform any input validations.</b>
*
* @param val a UTF-16 string byte array
* @param index the index of the character to get the code point from
*/
// vmIntrinsics::_getCharStringU
@IntrinsicCandidate
// intrinsic performs no bounds checks
static char getChar(byte[] val, int index) {
assert index >= 0 && index < length(val) : "Trusted caller missed bounds check";
assert val != null && index >= 0 && index < length(val) : "Trusted caller violated input constraints";
index <<= 1;
return (char)(((val[index++] & 0xff) << HI_BYTE_SHIFT) |
((val[index] & 0xff) << LO_BYTE_SHIFT));
@ -173,14 +204,27 @@ final class StringUTF16 {
}
/**
* {@return an encoded byte[] for the UTF16 characters in char[]}
* No checking is done on the characters, some may or may not be latin1.
* @param value a char array
* @param off an offset
* @param len a length
* {@return a UTF-16 string byte array produced by encoding the characters
* in the provided character array sub-range}
*
* @param value a character array to encode
* @param off the index of the character to start encoding from
* @param len the number of characters to encode
*
* @throws NegativeArraySizeException if {@code len < 0}
* @throws NullPointerException if {@code value} is null
* @throws OutOfMemoryError if {@code len > (Integer.MAX_VALUE / 2)}
* @throws StringIndexOutOfBoundsException if the sub-range is out of bounds
*/
@IntrinsicCandidate
static byte[] toBytes(char[] value, int off, int len) {
checkBytesLength(len);
String.checkBoundsOffCount(off, len, value.length); // Implicit null check on `value`
return toBytes0(value, off, len);
}
// vmIntrinsics::_toBytesStringU
@IntrinsicCandidate
private static byte[] toBytes0(char[] value, int off, int len) {
byte[] val = newBytesFor(len);
for (int i = 0; i < len; i++) {
putChar(val, i, value[off]);
@ -495,12 +539,28 @@ final class StringUTF16 {
return result;
}
@IntrinsicCandidate
/**
* Copies the specified sub-range of characters from a UTF-16 string byte
* array to the specified character array sub-range.
*
* @param value the source UTF-16 string byte array to copy from
* @param srcBegin the index (inclusive) of the first character in the source sub-range
* @param srcEnd the index (exclusive) of the last character in the source sub-range
* @param dst the target character array to copy to
* @param dstBegin the index (inclusive) of the first character in the target sub-range
*
* @throws NullPointerException if {@code value} or {@code dst} is null
* @throws StringIndexOutOfBoundsException if the sub-ranges are out of bounds
*/
static void getChars(byte[] value, int srcBegin, int srcEnd, char[] dst, int dstBegin) {
// We need a range check here because 'getChar' has no checks
if (srcBegin < srcEnd) {
String.checkBoundsOffCount(srcBegin, srcEnd - srcBegin, length(value));
}
checkBoundsBeginEnd(srcBegin, srcEnd, value); // Implicit null check on `value` via `checkBoundsBeginEnd()`
String.checkBoundsOffCount(dstBegin, srcEnd - srcBegin, dst.length); // Implicit null check on `dst`
getChars0(value, srcBegin, srcEnd, dst, dstBegin);
}
// vmIntrinsics::_getCharsStringU
@IntrinsicCandidate
private static void getChars0(byte[] value, int srcBegin, int srcEnd, char[] dst, int dstBegin) {
for (int i = srcBegin; i < srcEnd; i++) {
dst[dstBegin++] = getChar(value, i);
}
@ -721,7 +781,7 @@ final class StringUTF16 {
return -StringLatin1.compareToCI_UTF16(other, value);
}
public static int compareToFC_Latin1(byte[] value, byte[] other) {
static int compareToFC_Latin1(byte[] value, byte[] other) {
return -StringLatin1.compareToFC_UTF16(other, value);
}
@ -769,7 +829,7 @@ final class StringUTF16 {
return 0;
}
public static int compareToFC(byte[] value, byte[] other) {
static int compareToFC(byte[] value, byte[] other) {
int tlast = length(value);
int olast = length(other);
int lim = Math.min(tlast, olast);
@ -1970,13 +2030,13 @@ final class StringUTF16 {
}
}
static final int MAX_LENGTH = Integer.MAX_VALUE >> 1;
private static final int MAX_LENGTH = Integer.MAX_VALUE >> 1;
static void checkIndex(int off, byte[] val) {
private static void checkIndex(int off, byte[] val) {
String.checkIndex(off, length(val));
}
static void checkOffset(int off, byte[] val) {
private static void checkOffset(int off, byte[] val) {
String.checkOffset(off, length(val));
}

View File

@ -70,6 +70,7 @@ JNI_COCOA_ENTER(env);
dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(NSEC_PER_SEC)); // 1 second timeout
// Asynchronous call to openURL
dispatch_retain(semaphore);
[[NSWorkspace sharedWorkspace] openURLs:urls
withApplicationAtURL:appURI
configuration:configuration
@ -78,9 +79,11 @@ JNI_COCOA_ENTER(env);
status = (OSStatus) error.code;
}
dispatch_semaphore_signal(semaphore);
dispatch_release(semaphore);
}];
dispatch_semaphore_wait(semaphore, timeout);
dispatch_release(semaphore);
JNI_COCOA_EXIT(env);
return status;
@ -146,6 +149,7 @@ JNI_COCOA_ENTER(env);
dispatch_time_t timeout = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(NSEC_PER_SEC)); // 1 second timeout
// Asynchronous call - openURLs:withApplicationAtURL
dispatch_retain(semaphore);
[[NSWorkspace sharedWorkspace] openURLs:urls
withApplicationAtURL:appURI
configuration:configuration
@ -154,9 +158,11 @@ JNI_COCOA_ENTER(env);
status = (OSStatus) error.code;
}
dispatch_semaphore_signal(semaphore);
dispatch_release(semaphore);
}];
dispatch_semaphore_wait(semaphore, timeout);
dispatch_release(semaphore);
[urlToOpen release];
JNI_COCOA_EXIT(env);

View File

@ -1070,13 +1070,13 @@ public abstract class AbstractButton extends JComponent implements ItemSelectabl
Action oldValue = getAction();
if (action==null || !action.equals(a)) {
action = a;
if (oldValue!=null) {
if (oldValue != null) {
removeActionListener(oldValue);
oldValue.removePropertyChangeListener(actionPropertyChangeListener);
actionPropertyChangeListener = null;
}
configurePropertiesFromAction(action);
if (action!=null) {
if (action != null) {
// Don't add if it is already a listener
if (!isListener(ActionListener.class, action)) {
addActionListener(action);

View File

@ -111,7 +111,7 @@ abstract class ActionPropertyChangeListener<T extends JComponent>
while ((r = (OwnedWeakReference)queue.poll()) != null) {
ActionPropertyChangeListener<?> oldPCL = r.getOwner();
Action oldAction = oldPCL.getAction();
if (oldAction!=null) {
if (oldAction != null) {
oldAction.removePropertyChangeListener(oldPCL);
}
}

View File

@ -213,7 +213,7 @@ class AncestorNotifier implements ComponentListener, PropertyChangeListener, Ser
public void propertyChange(PropertyChangeEvent evt) {
String s = evt.getPropertyName();
if (s!=null && (s.equals("parent") || s.equals("ancestor"))) {
if (s != null && (s.equals("parent") || s.equals("ancestor"))) {
JComponent component = (JComponent)evt.getSource();
if (evt.getNewValue() != null) {

View File

@ -145,7 +145,7 @@ class ArrayTable implements Cloneable {
*/
public Object get(Object key) {
Object value = null;
if (table !=null) {
if (table != null) {
if (isArray()) {
Object[] array = (Object[])table;
for (int i = 0; i<array.length-1; i+=2) {
@ -181,7 +181,7 @@ class ArrayTable implements Cloneable {
*/
public boolean containsKey(Object key) {
boolean contains = false;
if (table !=null) {
if (table != null) {
if (isArray()) {
Object[] array = (Object[])table;
for (int i = 0; i<array.length-1; i+=2) {
@ -206,7 +206,7 @@ class ArrayTable implements Cloneable {
if (key==null) {
return null;
}
if (table !=null) {
if (table != null) {
if (isArray()){
// Is key on the list?
int index = -1;

View File

@ -1103,13 +1103,13 @@ implements ItemSelectable,ListDataListener,ActionListener, Accessible {
Action oldValue = getAction();
if (action==null || !action.equals(a)) {
action = a;
if (oldValue!=null) {
if (oldValue != null) {
removeActionListener(oldValue);
oldValue.removePropertyChangeListener(actionPropertyChangeListener);
actionPropertyChangeListener = null;
}
configurePropertiesFromAction(action);
if (action!=null) {
if (action != null) {
// Don't add if it is already a listener
if (!isListener(ActionListener.class, action)) {
addActionListener(action);

View File

@ -2694,7 +2694,7 @@ public class JList<E> extends JComponent implements Scrollable, Accessible
}
Rectangle newFirstRect = getCellBounds(newFirst,newFirst);
Rectangle firstRect = getCellBounds(first,first);
if ((newFirstRect != null) && (firstRect!=null)) {
if ((newFirstRect != null) && (firstRect != null)) {
while ( (newFirstRect.y + visibleRect.height <
firstRect.y + firstRect.height) &&
(newFirstRect.y < firstRect.y) ) {

View File

@ -975,7 +975,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement {
if (newFrame != frame) {
// Use the invoker's frame so that events
// are propagated properly
if (newFrame!=null) {
if (newFrame != null) {
this.frame = newFrame;
if(popup != null) {
setVisible(false);
@ -1012,7 +1012,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement {
*/
JPopupMenu getRootPopupMenu() {
JPopupMenu mp = this;
while((mp!=null) && (mp.isPopupMenu()!=true) &&
while((mp != null) && (mp.isPopupMenu()!=true) &&
(mp.getInvoker() != null) &&
(mp.getInvoker().getParent() instanceof JPopupMenu popupMenu)
) {
@ -1182,7 +1182,7 @@ public class JPopupMenu extends JComponent implements Accessible,MenuElement {
private static Frame getFrame(Component c) {
Component w = c;
while(!(w instanceof Frame) && (w!=null)) {
while(!(w instanceof Frame) && (w != null)) {
w = w.getParent();
}
return (Frame)w;

View File

@ -581,13 +581,13 @@ public class JTextField extends JTextComponent implements SwingConstants {
Action oldValue = getAction();
if (action==null || !action.equals(a)) {
action = a;
if (oldValue!=null) {
if (oldValue != null) {
removeActionListener(oldValue);
oldValue.removePropertyChangeListener(actionPropertyChangeListener);
actionPropertyChangeListener = null;
}
configurePropertiesFromAction(action);
if (action!=null) {
if (action != null) {
// Don't add if it is already a listener
if (!isListener(ActionListener.class, action)) {
addActionListener(action);

View File

@ -2087,7 +2087,7 @@ public class JTree extends JComponent implements Scrollable, Accessible
value = expandedState.get(path);
if (value == null || !value)
return false;
} while( (path=path.getParentPath())!=null );
} while( (path=path.getParentPath()) != null );
return true;
}

View File

@ -330,7 +330,7 @@ class KeyboardManager {
return;
}
Hashtable<Object, Object> keyMap = containerMap.get(topContainer);
if (keyMap!=null) {
if (keyMap != null) {
Vector<?> v = (Vector)keyMap.get(JMenuBar.class);
if (v != null) {
v.removeElement(mb);

View File

@ -925,7 +925,7 @@ public class PopupFactory {
add to that, otherwise
add to the window. */
while (!(parent instanceof Window) &&
(parent!=null)) {
(parent != null)) {
parent = parent.getParent();
}

View File

@ -506,7 +506,7 @@ public class SwingUtilities implements SwingConstants
public static boolean isDescendingFrom(Component a,Component b) {
if(a == b)
return true;
for(Container p = a.getParent();p!=null;p=p.getParent())
for(Container p = a.getParent(); p != null; p = p.getParent())
if(p == b)
return true;
return false;

View File

@ -1169,7 +1169,7 @@ public class UIDefaults extends Hashtable<Object,Object>
*/
private Class<?>[] getClassArray(Object[] args) {
Class<?>[] types = null;
if (args!=null) {
if (args != null) {
types = new Class<?>[args.length];
for (int i = 0; i< args.length; i++) {
/* PENDING(ges): At present only the primitive types
@ -1199,7 +1199,7 @@ public class UIDefaults extends Hashtable<Object,Object>
private String printArgs(Object[] array) {
String s = "{";
if (array !=null) {
if (array != null) {
for (int i = 0 ; i < array.length-1; i++) {
s = s.concat(array[i] + ",");
}

View File

@ -1070,11 +1070,11 @@ public class BasicComboPopup extends JPopupMenu implements ComboPopup {
ComponentOrientation o =(ComponentOrientation)e.getNewValue();
JList<?> list = getList();
if (list!=null && list.getComponentOrientation()!=o) {
if (list != null && list.getComponentOrientation()!=o) {
list.setComponentOrientation(o);
}
if (scroller!=null && scroller.getComponentOrientation()!=o) {
if (scroller != null && scroller.getComponentOrientation()!=o) {
scroller.setComponentOrientation(o);
}

View File

@ -876,7 +876,7 @@ public class BasicListUI extends ListUI
}
Long l = (Long)UIManager.get("List.timeFactor");
timeFactor = (l!=null) ? l.longValue() : 1000L;
timeFactor = (l != null) ? l.longValue() : 1000L;
updateIsFileList();
}

View File

@ -125,7 +125,7 @@ public class BasicMenuBarUI extends MenuBarUI {
for (int i = 0; i < menuBar.getMenuCount(); i++) {
JMenu menu = menuBar.getMenu(i);
if (menu!=null)
if (menu != null)
menu.getModel().addChangeListener(changeListener);
}
menuBar.addContainerListener(containerListener);
@ -167,7 +167,7 @@ public class BasicMenuBarUI extends MenuBarUI {
* Uninstalls default properties.
*/
protected void uninstallDefaults() {
if (menuBar!=null) {
if (menuBar != null) {
LookAndFeel.uninstallBorder(menuBar);
}
}
@ -180,7 +180,7 @@ public class BasicMenuBarUI extends MenuBarUI {
for (int i = 0; i < menuBar.getMenuCount(); i++) {
JMenu menu = menuBar.getMenu(i);
if (menu !=null)
if (menu != null)
menu.getModel().removeChangeListener(changeListener);
}
@ -240,7 +240,7 @@ public class BasicMenuBarUI extends MenuBarUI {
int i,c;
for(i=0,c = menuBar.getMenuCount() ; i < c ; i++) {
JMenu menu = menuBar.getMenu(i);
if(menu !=null && menu.isSelected()) {
if(menu != null && menu.isSelected()) {
menuBar.getSelectionModel().setSelectedIndex(i);
break;
}
@ -277,7 +277,7 @@ public class BasicMenuBarUI extends MenuBarUI {
MenuElement[] me;
MenuElement[] subElements;
JMenu menu = menuBar.getMenu(0);
if (menu!=null) {
if (menu != null) {
me = new MenuElement[3];
me[0] = (MenuElement) menuBar;
me[1] = (MenuElement) menu;

View File

@ -908,7 +908,7 @@ public class BasicPopupMenuUI extends PopupMenuUI {
}
boolean isInPopup(Component src) {
for (Component c=src; c!=null; c=c.getParent()) {
for (Component c=src; c != null; c=c.getParent()) {
if (c instanceof Window) {
break;
} else if (c instanceof JPopupMenu) {

View File

@ -688,7 +688,7 @@ public class BasicSpinnerUI extends SpinnerUI
arrowButton = (JButton)e.getSource();
}
} else {
if (arrowButton!=null && !arrowButton.getModel().isPressed()
if (arrowButton != null && !arrowButton.getModel().isPressed()
&& autoRepeatTimer.isRunning()) {
autoRepeatTimer.stop();
spinner = null;

View File

@ -2273,7 +2273,7 @@ public class BasicSplitPaneUI extends SplitPaneUI
JSplitPane parentSplitPane =
(JSplitPane)SwingUtilities.getAncestorOfClass(
JSplitPane.class, splitPane);
if (parentSplitPane!=null) {
if (parentSplitPane != null) {
parentSplitPane.requestFocus();
}
}
@ -2307,7 +2307,7 @@ public class BasicSplitPaneUI extends SplitPaneUI
} while (splitPane.isAncestorOf(focusOn) &&
!focusFrom.contains(focusOn));
}
if ( focusOn!=null && !splitPane.isAncestorOf(focusOn) ) {
if ( focusOn != null && !splitPane.isAncestorOf(focusOn) ) {
focusOn.requestFocus();
}
}
@ -2323,7 +2323,7 @@ public class BasicSplitPaneUI extends SplitPaneUI
if (focusOn != null) {
// don't change the focus if the new focused component belongs
// to the same splitpane and the same side
if ( focus!=null &&
if ( focus != null &&
( (SwingUtilities.isDescendingFrom(focus, left) &&
SwingUtilities.isDescendingFrom(focusOn, left)) ||
(SwingUtilities.isDescendingFrom(focus, right) &&
@ -2338,15 +2338,15 @@ public class BasicSplitPaneUI extends SplitPaneUI
Component left = splitPane.getLeftComponent();
Component right = splitPane.getRightComponent();
Component next;
if (focus!=null && SwingUtilities.isDescendingFrom(focus, left) &&
right!=null) {
if (focus != null && SwingUtilities.isDescendingFrom(focus, left) &&
right != null) {
next = getFirstAvailableComponent(right);
if (next != null) {
return next;
}
}
JSplitPane parentSplitPane = (JSplitPane)SwingUtilities.getAncestorOfClass(JSplitPane.class, splitPane);
if (parentSplitPane!=null) {
if (parentSplitPane != null) {
// focus next side of the parent split pane
next = getNextSide(parentSplitPane, focus);
} else {

View File

@ -528,7 +528,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants {
}
tabPane.removeContainerListener(getHandler());
if (htmlViews!=null) {
if (htmlViews != null) {
htmlViews.removeAllElements();
htmlViews = null;
}
@ -4090,7 +4090,7 @@ public class BasicTabbedPaneUI extends TabbedPaneUI implements SwingConstants {
setHtmlView(v, inserted, index);
}
} else { // Not HTML
if (htmlViews!=null) { // Add placeholder
if (htmlViews != null) { // Add placeholder
setHtmlView(null, inserted, index);
} // else nada!
}

View File

@ -980,7 +980,7 @@ public class BasicToolBarUI extends ToolBarUI implements SwingConstants
{
toolBar.setOrientation( orientation );
if (dragWindow !=null)
if (dragWindow != null)
dragWindow.setOrientation(orientation);
}
@ -1616,7 +1616,7 @@ public class BasicToolBarUI extends ToolBarUI implements SwingConstants
this.orientation = o;
Dimension size = getSize();
setSize(new Dimension(size.height, size.width));
if (offset!=null) {
if (offset != null) {
if( BasicGraphicsUtils.isLeftToRight(toolBar) ) {
setOffset(new Point(offset.y, offset.x));
} else if( o == JToolBar.HORIZONTAL ) {

View File

@ -943,7 +943,7 @@ public class BasicTreeUI extends TreeUI
lineTypeDashed = UIManager.getBoolean("Tree.lineTypeDashed");
Long l = (Long)UIManager.get("Tree.timeFactor");
timeFactor = (l!=null) ? l.longValue() : 1000L;
timeFactor = (l != null) ? l.longValue() : 1000L;
Object showsRootHandles = UIManager.get("Tree.showsRootHandles");
if (showsRootHandles != null) {

View File

@ -660,7 +660,7 @@ public abstract class AbstractRegionPainter implements Painter<JComponent> {
ImageScalingHelper.paint(g, 0, 0, w, h, img, insets, dstInsets,
ImageScalingHelper.PaintType.PAINT9_STRETCH, ImageScalingHelper.PAINT_ALL);
g.setRenderingHint(RenderingHints.KEY_INTERPOLATION,
oldScalingHints!=null?oldScalingHints:RenderingHints.VALUE_INTERPOLATION_NEAREST_NEIGHBOR);
oldScalingHints != null ? oldScalingHints:RenderingHints.VALUE_INTERPOLATION_NEAREST_NEIGHBOR);
} else {
// render directly
paint0(g, c, w, h, extendedCacheKeys);

View File

@ -531,7 +531,7 @@ public class NimbusLookAndFeel extends SynthLookAndFeel {
public Object createValue(UIDefaults table) {
Object obj = null;
// check specified state
if (state!=null){
if (state != null){
obj = uiDefaults.get(prefix+"["+state+"]."+suffix);
}
// check enabled state

View File

@ -63,13 +63,13 @@ class SynthPainterImpl extends SynthPainter {
if (p != null) {
if (g instanceof Graphics2D){
Graphics2D gfx = (Graphics2D)g;
if (transform!=null){
if (transform != null) {
gfx.transform(transform);
}
gfx.translate(x, y);
p.paint(gfx, ctx.getComponent(), w, h);
gfx.translate(-x, -y);
if (transform!=null){
if (transform != null){
try {
gfx.transform(transform.createInverse());
} catch (NoninvertibleTransformException e) {
@ -85,7 +85,7 @@ class SynthPainterImpl extends SynthPainter {
BufferedImage img = new BufferedImage(w,h,
BufferedImage.TYPE_INT_ARGB);
Graphics2D gfx = img.createGraphics();
if (transform!=null){
if (transform != null){
gfx.transform(transform);
}
p.paint(gfx, ctx.getComponent(), w, h);

View File

@ -814,7 +814,7 @@ public class SynthComboBoxUI extends BasicComboBoxUI implements
public void propertyChange(PropertyChangeEvent evt) {
ComboBoxEditor newEditor = comboBox.getEditor();
if (editor != newEditor){
if (editorComponent!=null){
if (editorComponent != null) {
editorComponent.removeFocusListener(this);
}
editor = newEditor;

View File

@ -225,7 +225,7 @@ public class SynthScrollPaneUI extends BasicScrollPaneUI
private int getComponentState(JComponent c) {
int baseState = SynthLookAndFeel.getComponentState(c);
if (viewportViewFocusHandler!=null && viewportViewHasFocus){
if (viewportViewFocusHandler != null && viewportViewHasFocus) {
baseState = baseState | FOCUSED;
}
return baseState;

View File

@ -1181,7 +1181,7 @@ public abstract class JTextComponent extends JComponent implements Scrollable, A
Hashtable<String, Action> h = new Hashtable<String, Action>();
for (Action a : actions) {
String value = (String)a.getValue(Action.NAME);
h.put((value!=null ? value:""), a);
h.put((value != null ? value : ""), a);
}
for (KeyBinding binding : bindings) {
Action a = h.get(binding.actionName);

View File

@ -107,11 +107,11 @@ public abstract class TextAction extends AbstractAction {
Hashtable<String, Action> h = new Hashtable<String, Action>();
for (Action a : list1) {
String value = (String)a.getValue(Action.NAME);
h.put((value!=null ? value:""), a);
h.put((value != null ? value : ""), a);
}
for (Action a : list2) {
String value = (String)a.getValue(Action.NAME);
h.put((value!=null ? value:""), a);
h.put((value != null ? value : ""), a);
}
Action[] actions = new Action[h.size()];
int index = 0;

View File

@ -259,7 +259,7 @@ public class DefaultTreeCellEditor implements ActionListener, TreeCellEditor,
((MouseEvent)event).getY());
editable = (lastPath != null && path != null &&
lastPath.equals(path));
if (path!=null) {
if (path != null) {
lastRow = tree.getRowForPath(path);
Object value = path.getLastPathComponent();
boolean isSelected = tree.isRowSelected(lastRow);

View File

@ -1646,17 +1646,17 @@ public class SwingUtilities2 {
if (container.isFocusCycleRoot()) {
FocusTraversalPolicy policy = container.getFocusTraversalPolicy();
Component comp = policy.getDefaultComponent(container);
if (comp!=null) {
if (comp != null) {
comp.requestFocus(FocusEvent.Cause.TRAVERSAL);
return comp;
}
}
Container rootAncestor = container.getFocusCycleRootAncestor();
if (rootAncestor!=null) {
if (rootAncestor != null) {
FocusTraversalPolicy policy = rootAncestor.getFocusTraversalPolicy();
Component comp = policy.getComponentAfter(rootAncestor, container);
if (comp!=null && SwingUtilities.isDescendingFrom(comp, container)) {
if (comp != null && SwingUtilities.isDescendingFrom(comp, container)) {
comp.requestFocus(FocusEvent.Cause.TRAVERSAL);
return comp;
}

View File

@ -76,10 +76,9 @@ public class ThreadLocalAllocBuffer extends VMObject {
private long endReserve() {
long labAlignmentReserve = VM.getVM().getLabAlignmentReserve();
long reserveForAllocationPrefetch = VM.getVM().getReserveForAllocationPrefetch();
long heapWordSize = VM.getVM().getHeapWordSize();
return Math.max(labAlignmentReserve, reserveForAllocationPrefetch) * heapWordSize;
return labAlignmentReserve * heapWordSize;
}
/** Support for iteration over heap -- not sure how this will

View File

@ -123,7 +123,6 @@ public class VM {
private int invocationEntryBCI;
private ReversePtrs revPtrs;
private VMRegImpl vmregImpl;
private int reserveForAllocationPrefetch;
private int labAlignmentReserve;
// System.getProperties from debuggee VM
@ -447,8 +446,6 @@ public class VM {
boolType = (CIntegerType) db.lookupType("bool");
Type threadLocalAllocBuffer = db.lookupType("ThreadLocalAllocBuffer");
CIntegerField reserveForAllocationPrefetchField = threadLocalAllocBuffer.getCIntegerField("_reserve_for_allocation_prefetch");
reserveForAllocationPrefetch = (int)reserveForAllocationPrefetchField.getCInteger(intType);
Type collectedHeap = db.lookupType("CollectedHeap");
CIntegerField labAlignmentReserveField = collectedHeap.getCIntegerField("_lab_alignment_reserve");
@ -915,10 +912,6 @@ public class VM {
return vmInternalInfo;
}
public int getReserveForAllocationPrefetch() {
return reserveForAllocationPrefetch;
}
public int getLabAlignmentReserve() {
return labAlignmentReserve;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -118,6 +118,9 @@ final class Field {
// An integral type (byte, short, int, long)
boolean integralType;
// An integral type that should be treated like a symbol, e.g. PID.
boolean identifier;
// A java.time.Duration
boolean timespan;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -274,6 +274,7 @@ final class FieldBuilder {
case "int", "long", "short", "byte":
field.integralType = true;
field.alignLeft = false;
field.identifier = fieldName.equals("id") || fieldName.endsWith("Id") || field.label.endsWith("Identifier");
break;
case "float", "double":
field.fractionalType = true;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -165,7 +165,7 @@ public class FieldFormatter {
return object + " Hz";
}
}
if (object instanceof Number number) {
if (object instanceof Number number && !field.identifier) {
return ValueFormatter.formatNumber(number);
}
return object.toString();

View File

@ -49,6 +49,7 @@ import java.util.List;
import java.util.ListIterator;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutorService;
@ -202,6 +203,7 @@ class ConsoleIOContext extends IOContext {
.filter(key -> key.startsWith(HISTORY_LINE_PREFIX))
.sorted()
.map(key -> repl.prefs.get(key))
.filter(Objects::nonNull)
.forEach(loadHistory::add);
for (ListIterator<String> it = loadHistory.listIterator(); it.hasNext(); ) {

View File

@ -83,6 +83,8 @@ compiler/c2/aarch64/TestStaticCallStub.java 8359963 linux-aarch64,macosx-aarch64
compiler/longcountedloops/TestLoopNestTooManyTraps.java 8376591 generic-all
compiler/unsafe/AlignmentGapAccess.java 8373487 generic-all
#############################################################################
# :hotspot_gc
@ -196,4 +198,3 @@ vmTestbase/nsk/monitoring/ThreadMXBean/findMonitorDeadlockedThreads/find006/Test
# in either implementation or test code.
#############################################################################

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @summary Stress allocation prefetch with large legal AllocatePrefetch* values
* @requires vm.compiler2.enabled
*
* @run main/othervm -Xbatch -XX:-TieredCompilation -XX:+UseTLAB
* -XX:AllocatePrefetchStyle=1
* -XX:AllocatePrefetchDistance=512
* -XX:AllocatePrefetchStepSize=512
* -XX:AllocatePrefetchLines=64
* -XX:AllocateInstancePrefetchLines=64
* compiler.c2.TestAllocatePrefetchStyleLargeFlags
* @run main/othervm -Xbatch -XX:-TieredCompilation -XX:+UseTLAB
* -XX:AllocatePrefetchStyle=2
* -XX:AllocatePrefetchDistance=512
* -XX:AllocatePrefetchStepSize=512
* -XX:AllocatePrefetchLines=64
* -XX:AllocateInstancePrefetchLines=64
* compiler.c2.TestAllocatePrefetchStyleLargeFlags
* @run main/othervm -Xbatch -XX:-TieredCompilation -XX:+UseTLAB
* -XX:AllocatePrefetchStyle=3
* -XX:AllocatePrefetchDistance=512
* -XX:AllocatePrefetchStepSize=512
* -XX:AllocatePrefetchLines=64
* -XX:AllocateInstancePrefetchLines=64
* compiler.c2.TestAllocatePrefetchStyleLargeFlags
*/
package compiler.c2;
public class TestAllocatePrefetchStyleLargeFlags {
private static volatile Object sink;
private static final class Payload {
private final int value;
private Payload(int value) {
this.value = value;
}
}
private static Object allocateInstance(int value) {
return new Payload(value);
}
private static Object allocateArray(int value) {
return new int[value & 31];
}
public static void main(String[] args) {
for (int i = 0; i < 50_000; i++) {
sink = allocateInstance(i);
sink = allocateArray(i);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -89,12 +89,16 @@ public class TestStringIntrinsicRangeChecks {
for (int srcOff = 0; srcOff < SIZE; ++srcOff) {
for (int dstOff = 0; dstOff < SIZE; ++dstOff) {
for (int len = 0; len < SIZE; ++len) {
int srcEnd = srcOff + len;
int dstEnd = dstOff + len;
// Check for potential overlows in source or destination array
boolean srcOverflow = (srcOff + len) > SIZE;
boolean srcOverflowB = (2*srcOff + 2*len) > SIZE;
boolean dstOverflow = (dstOff + len) > SIZE;
boolean dstOverflowB = (2*dstOff + 2*len) > SIZE;
boolean getCharsOver = (srcOff < len) && ((2*(len-1) >= SIZE) || ((dstOff + len - srcOff) > SIZE));
boolean getCharsOver = srcOff > srcEnd || (2*srcEnd) > SIZE || // src
(2*len) > SIZE || // len
dstOff > dstEnd || dstEnd > SIZE; // dst
// Check if an exception is thrown and bail out if result is inconsistent with above
// assumptions (for example, an exception was not thrown although an overflow happened).
check(compressByte, srcOverflowB || dstOverflow, byteArray, srcOff, SIZE, dstOff, len);
@ -102,7 +106,7 @@ public class TestStringIntrinsicRangeChecks {
check(inflateByte, srcOverflow || dstOverflowB, byteArray, srcOff, SIZE, dstOff, len);
check(inflateChar, srcOverflow || dstOverflow, byteArray, srcOff, SIZE, dstOff, len);
check(toBytes, srcOverflow, charArray, srcOff, len);
check(getChars, getCharsOver, byteArray, srcOff, len, SIZE, dstOff);
check(getChars, getCharsOver, byteArray, srcOff, srcEnd, SIZE, dstOff);
}
}
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2026 IBM Corporation. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,13 +25,15 @@
package compiler.loopopts.superword;
import compiler.lib.ir_framework.*;
import compiler.lib.verify.Verify;
import jdk.test.lib.Utils;
import jdk.test.whitebox.WhiteBox;
import java.lang.foreign.MemorySegment;
import java.lang.foreign.ValueLayout;
import java.lang.reflect.Array;
import java.util.Map;
import java.util.HashMap;
import java.util.Random;
import java.nio.ByteOrder;
/*
* @test
@ -61,6 +64,8 @@ public class TestCompatibleUseDefTypeSize {
float[] bF;
double[] aD;
double[] bD;
MemorySegment aMSF;
MemorySegment aMSD;
// List of tests
Map<String,TestFunction> tests = new HashMap<String,TestFunction>();
@ -92,6 +97,8 @@ public class TestCompatibleUseDefTypeSize {
bF = generateF();
aD = generateD();
bD = generateD();
aMSF = generateMemorySegmentF();
aMSD = generateMemorySegmentD();
// Add all tests to list
tests.put("test0", () -> { return test0(aB.clone(), bC.clone()); });
@ -122,6 +129,10 @@ public class TestCompatibleUseDefTypeSize {
tests.put("testLongToShort", () -> { return testLongToShort(aL.clone(), bS.clone()); });
tests.put("testLongToChar", () -> { return testLongToChar(aL.clone(), bC.clone()); });
tests.put("testLongToInt", () -> { return testLongToInt(aL.clone(), bI.clone()); });
tests.put("testFloatToIntMemorySegment", () -> { return testFloatToIntMemorySegment(copyF(aMSF), bF.clone()); });
tests.put("testDoubleToLongMemorySegment", () -> { return testDoubleToLongMemorySegment(copyD(aMSD), bD.clone()); });
tests.put("testIntToFloatMemorySegment", () -> { return testIntToFloatMemorySegment(copyF(aMSF), bF.clone()); });
tests.put("testLongToDoubleMemorySegment", () -> { return testLongToDoubleMemorySegment(copyD(aMSD), bD.clone()); });
// Compute gold value for all test methods before compilation
for (Map.Entry<String,TestFunction> entry : tests.entrySet()) {
@ -160,7 +171,11 @@ public class TestCompatibleUseDefTypeSize {
"testLongToByte",
"testLongToShort",
"testLongToChar",
"testLongToInt"})
"testLongToInt",
"testFloatToIntMemorySegment",
"testDoubleToLongMemorySegment",
"testIntToFloatMemorySegment",
"testLongToDoubleMemorySegment"})
public void runTests() {
for (Map.Entry<String,TestFunction> entry : tests.entrySet()) {
String name = entry.getKey();
@ -170,7 +185,7 @@ public class TestCompatibleUseDefTypeSize {
// Compute new result
Object[] result = test.run();
// Compare gold and new result
verify(name, gold, result);
Verify.checkEQ(gold, result);
}
}
@ -230,119 +245,32 @@ public class TestCompatibleUseDefTypeSize {
return a;
}
static void verify(String name, Object[] gold, Object[] result) {
if (gold.length != result.length) {
throw new RuntimeException("verify " + name + ": not the same number of outputs: gold.length = " +
gold.length + ", result.length = " + result.length);
}
for (int i = 0; i < gold.length; i++) {
Object g = gold[i];
Object r = result[i];
if (g.getClass() != r.getClass() || !g.getClass().isArray() || !r.getClass().isArray()) {
throw new RuntimeException("verify " + name + ": must both be array of same type:" +
" gold[" + i + "].getClass() = " + g.getClass().getSimpleName() +
" result[" + i + "].getClass() = " + r.getClass().getSimpleName());
}
if (g == r) {
throw new RuntimeException("verify " + name + ": should be two separate arrays (with identical content):" +
" gold[" + i + "] == result[" + i + "]");
}
if (Array.getLength(g) != Array.getLength(r)) {
throw new RuntimeException("verify " + name + ": arrays must have same length:" +
" gold[" + i + "].length = " + Array.getLength(g) +
" result[" + i + "].length = " + Array.getLength(r));
}
Class c = g.getClass().getComponentType();
if (c == byte.class) {
verifyB(name, i, (byte[])g, (byte[])r);
} else if (c == short.class) {
verifyS(name, i, (short[])g, (short[])r);
} else if (c == char.class) {
verifyC(name, i, (char[])g, (char[])r);
} else if (c == int.class) {
verifyI(name, i, (int[])g, (int[])r);
} else if (c == long.class) {
verifyL(name, i, (long[])g, (long[])r);
} else if (c == float.class) {
verifyF(name, i, (float[])g, (float[])r);
} else if (c == double.class) {
verifyD(name, i, (double[])g, (double[])r);
} else {
throw new RuntimeException("verify " + name + ": array type not supported for verify:" +
" gold[" + i + "].getClass() = " + g.getClass().getSimpleName() +
" result[" + i + "].getClass() = " + r.getClass().getSimpleName());
}
static MemorySegment generateMemorySegmentF() {
MemorySegment a = MemorySegment.ofArray(new float[RANGE]);
for (int i = 0; i < (int) a.byteSize(); i += 8) {
a.set(ValueLayout.JAVA_LONG_UNALIGNED, i, RANDOM.nextLong());
}
return a;
}
static void verifyB(String name, int i, byte[] g, byte[] r) {
for (int j = 0; j < g.length; j++) {
if (g[j] != r[j]) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
MemorySegment copyF(MemorySegment src) {
MemorySegment dst = generateMemorySegmentF();
MemorySegment.copy(src, 0, dst, 0, src.byteSize());
return dst;
}
static void verifyS(String name, int i, short[] g, short[] r) {
for (int j = 0; j < g.length; j++) {
if (g[j] != r[j]) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
static MemorySegment generateMemorySegmentD() {
MemorySegment a = MemorySegment.ofArray(new double[RANGE]);
for (int i = 0; i < (int) a.byteSize(); i += 8) {
a.set(ValueLayout.JAVA_LONG_UNALIGNED, i, RANDOM.nextLong());
}
return a;
}
static void verifyC(String name, int i, char[] g, char[] r) {
for (int j = 0; j < g.length; j++) {
if (g[j] != r[j]) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
}
static void verifyI(String name, int i, int[] g, int[] r) {
for (int j = 0; j < g.length; j++) {
if (g[j] != r[j]) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
}
static void verifyL(String name, int i, long[] g, long[] r) {
for (int j = 0; j < g.length; j++) {
if (g[j] != r[j]) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
}
static void verifyF(String name, int i, float[] g, float[] r) {
for (int j = 0; j < g.length; j++) {
if (Float.floatToIntBits(g[j]) != Float.floatToIntBits(r[j])) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
}
static void verifyD(String name, int i, double[] g, double[] r) {
for (int j = 0; j < g.length; j++) {
if (Double.doubleToLongBits(g[j]) != Double.doubleToLongBits(r[j])) {
throw new RuntimeException("verify " + name + ": arrays must have same content:" +
" gold[" + i + "][" + j + "] = " + g[j] +
" result[" + i + "][" + j + "] = " + r[j]);
}
}
MemorySegment copyD(MemorySegment src) {
MemorySegment dst = generateMemorySegmentD();
MemorySegment.copy(src, 0, dst, 0, src.byteSize());
return dst;
}
@Test
@ -707,4 +635,64 @@ public class TestCompatibleUseDefTypeSize {
return new Object[] { ints, res };
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_F, IRNode.VECTOR_SIZE + "min(max_int, max_float)", "> 0",
IRNode.STORE_VECTOR, "> 0",
IRNode.VECTOR_REINTERPRET, "> 0"},
applyIf = {"AlignVector", "false"},
applyIfPlatform = {"64-bit", "true"},
applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
static Object[] testFloatToIntMemorySegment(MemorySegment a, float[] b) {
for (int i = 0; i < RANGE; i++) {
a.set(ValueLayout.JAVA_FLOAT_UNALIGNED, 4L * i, b[i]);
}
return new Object[]{ a, b };
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_D, IRNode.VECTOR_SIZE + "min(max_long, max_double)", "> 0",
IRNode.STORE_VECTOR, "> 0",
IRNode.VECTOR_REINTERPRET, "> 0"},
applyIf = {"AlignVector", "false"},
applyIfPlatform = {"64-bit", "true"},
applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
static Object[] testDoubleToLongMemorySegment(MemorySegment a, double[] b) {
for (int i = 0; i < RANGE; i++) {
a.set(ValueLayout.JAVA_DOUBLE_UNALIGNED, 8L * i, b[i]);
}
return new Object[]{ a, b };
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_I, "> 0",
IRNode.STORE_VECTOR, "> 0",
IRNode.VECTOR_REINTERPRET, "> 0"},
applyIf = {"AlignVector", "false"},
applyIfPlatform = {"64-bit", "true"},
applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
static Object[] testIntToFloatMemorySegment(MemorySegment a, float[] b) {
for (int i = 0; i < RANGE; i++) {
b[i] = a.get(ValueLayout.JAVA_FLOAT_UNALIGNED, 4L * i);
}
return new Object[]{ a, b };
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_L, "> 0",
IRNode.STORE_VECTOR, "> 0",
IRNode.VECTOR_REINTERPRET, "> 0"},
applyIf = {"AlignVector", "false"},
applyIfPlatform = {"64-bit", "true"},
applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
static Object[] testLongToDoubleMemorySegment(MemorySegment a, double[] b) {
for (int i = 0; i < RANGE; i++) {
b[i] = a.get(ValueLayout.JAVA_DOUBLE_UNALIGNED, 8L * i);
}
return new Object[]{ a, b };
}
}

View File

@ -50,7 +50,7 @@ public class VectorMaskCastIdentityTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 2" }, applyIfCPUFeatureOr = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 2" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static int testTwoCastToDifferentType() {
// The types before and after the two casts are not the same, so the cast cannot be eliminated.
VectorMask<Float> mFloat64 = VectorMask.fromArray(FloatVector.SPECIES_64, mr, 0);
@ -84,7 +84,7 @@ public class VectorMaskCastIdentityTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static int testTwoCastToSameType() {
// The types before and after the two casts are the same, so the cast will be eliminated.
VectorMask<Integer> mInt128 = VectorMask.fromArray(IntVector.SPECIES_128, mr, 0);
@ -101,7 +101,7 @@ public class VectorMaskCastIdentityTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 1" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "= 1" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static int testOneCastToDifferentType() {
// The types before and after the only cast are different, the cast will not be eliminated.
VectorMask<Float> mFloat128 = VectorMask.fromArray(FloatVector.SPECIES_128, mr, 0).not();

View File

@ -74,7 +74,7 @@ public class VectorMaskCastTest {
// Byte
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Short> testByte64ToShort128(VectorMask<Byte> v) {
return v.cast(ShortVector.SPECIES_128);
}
@ -201,7 +201,7 @@ public class VectorMaskCastTest {
// Short
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Integer> testShort64ToInt128(VectorMask<Short> v) {
return v.cast(IntVector.SPECIES_128);
}
@ -215,7 +215,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Float> testShort64ToFloat128(VectorMask<Short> v) {
return v.cast(FloatVector.SPECIES_128);
}
@ -257,7 +257,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Byte> testShort128ToByte64(VectorMask<Short> v) {
return v.cast(ByteVector.SPECIES_64);
}
@ -384,7 +384,7 @@ public class VectorMaskCastTest {
// Int
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Long> testInt64ToLong128(VectorMask<Integer> v) {
return v.cast(LongVector.SPECIES_128);
}
@ -398,7 +398,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Double> testInt64ToDouble128(VectorMask<Integer> v) {
return v.cast(DoubleVector.SPECIES_128);
}
@ -412,7 +412,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Short> testInt128ToShort64(VectorMask<Integer> v) {
return v.cast(ShortVector.SPECIES_64);
}
@ -539,7 +539,7 @@ public class VectorMaskCastTest {
// Float
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Long> testFloat64ToLong128(VectorMask<Float> v) {
return v.cast(LongVector.SPECIES_128);
}
@ -553,7 +553,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Double> testFloat64ToDouble128(VectorMask<Float> v) {
return v.cast(DoubleVector.SPECIES_128);
}
@ -567,7 +567,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"})
public static VectorMask<Short> testFloat128ToShort64(VectorMask<Float> v) {
return v.cast(ShortVector.SPECIES_64);
}
@ -694,7 +694,7 @@ public class VectorMaskCastTest {
// Long
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Integer> testLong128ToInt64(VectorMask<Long> v) {
return v.cast(IntVector.SPECIES_64);
}
@ -708,7 +708,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Float> testLong128ToFloat64(VectorMask<Long> v) {
return v.cast(FloatVector.SPECIES_64);
}
@ -821,7 +821,7 @@ public class VectorMaskCastTest {
// Double
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Integer> testDouble128ToInt64(VectorMask<Double> v) {
return v.cast(IntVector.SPECIES_64);
}
@ -835,7 +835,7 @@ public class VectorMaskCastTest {
}
@Test
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeature = {"asimd", "true"})
@IR(counts = { IRNode.VECTOR_MASK_CAST, "> 0" }, applyIfCPUFeatureOr = {"asimd", "true", "rvv", "true"})
public static VectorMask<Float> testDouble128ToFloat64(VectorMask<Double> v) {
return v.cast(FloatVector.SPECIES_64);
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @requires vm.cds
* @requires vm.flagless
* @requires vm.bits == 64
* @bug 8376822
* @summary Allocation gaps in the RW region caused by -XX:+UseCompactObjectHeaders should be reused
* @library /test/lib
* @build MetaspaceAllocGaps
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar hello.jar Hello
* @run driver MetaspaceAllocGaps
*/
import jdk.test.lib.cds.SimpleCDSAppTester;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.lib.process.OutputAnalyzer;
public class MetaspaceAllocGaps {
public static void main(String[] args) throws Exception {
String appJar = ClassFileInstaller.getJarPath("hello.jar");
for (int i = 0; i < 2; i++) {
String compressedOops = "-XX:" + (i == 0 ? "-" : "+") + "UseCompressedOops";
SimpleCDSAppTester.of("MetaspaceAllocGaps" + i)
.addVmArgs("-Xlog:aot=debug,aot+alloc=trace",
"-XX:+UseCompactObjectHeaders")
.classpath(appJar)
.appCommandLine("Hello")
.setTrainingChecker((OutputAnalyzer out) -> {
// Typically all gaps should be filled. If not, we probably have a regression in C++ class ArchiveUtils.
//
// [0.422s][debug][aot ] Detailed metadata info (excluding heap region):
// [...]
// [0.422s][debug][aot ] Gap : 0 0 0.0 | 0 0 0.0 | 0 0 0.0 <<< look for this pattern
out.shouldMatch("Allocated [1-9][0-9]+ objects of [1-9][0-9]+ bytes in gaps .remain = 0 bytes")
.shouldMatch("debug.* Gap .*0[.]0.*0[.]0.*0[.]0")
.shouldNotMatch("Unexpected .* gaps .* for Klass alignment");
})
.setProductionChecker((OutputAnalyzer out) -> {
out.shouldContain("HelloWorld");
})
.runAOTWorkflow();
}
}
}
class Hello {
public static void main(String[] args) {
System.out.println("HelloWorld");
}
}

Some files were not shown because too many files have changed in this diff Show More