Merge remote-tracking branch 'origin/master' into _8366241_nmt_consolidate_structures

This commit is contained in:
Afshin Zafari 2025-09-30 12:03:31 +02:00
commit 944b91271f
62 changed files with 1254 additions and 857 deletions

View File

@ -160,7 +160,7 @@ void VM_Version::common_initialize() {
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
FLAG_SET_DEFAULT(AvoidUnalignedAccesses,
unaligned_access.value() != MISALIGNED_FAST);
unaligned_scalar.value() != MISALIGNED_SCALAR_FAST);
}
if (!AvoidUnalignedAccesses) {
@ -175,7 +175,12 @@ void VM_Version::common_initialize() {
// This machine has fast unaligned memory accesses
if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
FLAG_SET_DEFAULT(UseUnalignedAccesses,
unaligned_access.value() == MISALIGNED_FAST);
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
}
if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector,
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
}
#ifdef __riscv_ztso
@ -234,36 +239,6 @@ void VM_Version::common_initialize() {
warning("CRC32C intrinsics are not available on this CPU.");
FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
}
// UseZvbb (depends on RVV).
if (UseZvbb && !UseRVV) {
warning("Cannot enable UseZvbb on cpu without RVV support.");
FLAG_SET_DEFAULT(UseZvbb, false);
}
// UseZvbc (depends on RVV).
if (UseZvbc && !UseRVV) {
warning("Cannot enable UseZvbc on cpu without RVV support.");
FLAG_SET_DEFAULT(UseZvbc, false);
}
// UseZvkn (depends on RVV).
if (UseZvkn && !UseRVV) {
warning("Cannot enable UseZvkn on cpu without RVV support.");
FLAG_SET_DEFAULT(UseZvkn, false);
}
// UseZvfh (depends on RVV)
if (UseZvfh) {
if (!UseRVV) {
warning("Cannot enable UseZvfh on cpu without RVV support.");
FLAG_SET_DEFAULT(UseZvfh, false);
}
if (!UseZfh) {
warning("Cannot enable UseZvfh on cpu without Zfh support.");
FLAG_SET_DEFAULT(UseZvfh, false);
}
}
}
#ifdef COMPILER2

View File

@ -70,6 +70,35 @@ class VM_Version : public Abstract_VM_Version {
int64_t value() { return _value; }
virtual bool enabled() = 0;
virtual void update_flag() = 0;
protected:
bool deps_all_enabled(RVFeatureValue* dep0, ...) {
assert(dep0 != nullptr, "must not");
va_list va;
va_start(va, dep0);
RVFeatureValue* next = dep0;
bool enabled = true;
while (next != nullptr && enabled) {
enabled = next->enabled();
next = va_arg(va, RVFeatureValue*);
}
va_end(va);
return enabled;
}
void deps_string(stringStream& ss, RVFeatureValue* dep0, ...) {
assert(dep0 != nullptr, "must not");
ss.print("%s (%s)", dep0->pretty(), dep0->enabled() ? "enabled" : "disabled");
va_list va;
va_start(va, dep0);
RVFeatureValue* next = nullptr;
while ((next = va_arg(va, RVFeatureValue*)) != nullptr) {
ss.print(", %s (%s)", next->pretty(), next->enabled() ? "enabled" : "disabled");
}
va_end(va);
}
};
#define UPDATE_DEFAULT(flag) \
@ -85,27 +114,34 @@ class VM_Version : public Abstract_VM_Version {
} \
} \
#define UPDATE_DEFAULT_DEP(flag, dep) \
void update_flag() { \
assert(enabled(), "Must be."); \
/* dep must be declared before */ \
assert((uintptr_t)(this) > \
(uintptr_t)(&dep), "Invalid"); \
if (FLAG_IS_DEFAULT(flag)) { \
if (dep.enabled()) { \
FLAG_SET_DEFAULT(flag, true); \
} else { \
FLAG_SET_DEFAULT(flag, false); \
/* Sync CPU features with flags */ \
disable_feature(); \
} \
} else { \
/* Sync CPU features with flags */ \
if (!flag) { \
disable_feature(); \
} \
} \
} \
#define UPDATE_DEFAULT_DEP(flag, dep0, ...) \
void update_flag() { \
assert(enabled(), "Must be."); \
if (FLAG_IS_DEFAULT(flag)) { \
if (this->deps_all_enabled(dep0, ##__VA_ARGS__)) { \
FLAG_SET_DEFAULT(flag, true); \
} else { \
FLAG_SET_DEFAULT(flag, false); \
stringStream ss; \
deps_string(ss, dep0, ##__VA_ARGS__); \
warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
} \
} else { \
/* Sync CPU features with flags */ \
if (!flag) { \
disable_feature(); \
} else if (!deps_all_enabled(dep0, ##__VA_ARGS__)) { \
FLAG_SET_DEFAULT(flag, false); \
stringStream ss; \
deps_string(ss, dep0, ##__VA_ARGS__); \
warning("Cannot enable " #flag ", it's missing dependent extension(s) %s", ss.as_string(true)); \
/* Sync CPU features with flags */ \
disable_feature(); \
} \
} \
} \
#define NO_UPDATE_DEFAULT \
void update_flag() {} \
@ -191,7 +227,8 @@ class VM_Version : public Abstract_VM_Version {
// mvendorid Manufactory JEDEC id encoded, ISA vol 2 3.1.2..
// marchid Id for microarch. Mvendorid plus marchid uniquely identify the microarch.
// mimpid A unique encoding of the version of the processor implementation.
// unaligned_access Unaligned memory accesses (unknown, unspported, emulated, slow, firmware, fast)
// unaligned_scalar Performance of misaligned scalar accesses (unknown, emulated, slow, fast, unsupported)
// unaligned_vector Performance of misaligned vector accesses (unknown, unspported, slow, fast)
// satp mode SATP bits (number of virtual addr bits) mbare, sv39, sv48, sv57, sv64
public:
@ -202,40 +239,40 @@ class VM_Version : public Abstract_VM_Version {
//
// Fields description in `decl`:
// declaration name, extension name, bit value from linux, feature string?, mapped flag)
#define RV_EXT_FEATURE_FLAGS(decl) \
decl(ext_I , i , ('I' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_M , m , ('M' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_A , a , ('A' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_F , f , ('F' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_D , d , ('D' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_C , c , ('C' - 'A'), true , UPDATE_DEFAULT(UseRVC)) \
decl(ext_Q , q , ('Q' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_H , h , ('H' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_V , v , ('V' - 'A'), true , UPDATE_DEFAULT(UseRVV)) \
decl(ext_Zicbom , Zicbom , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbom)) \
decl(ext_Zicboz , Zicboz , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicboz)) \
decl(ext_Zicbop , Zicbop , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbop)) \
decl(ext_Zba , Zba , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZba)) \
decl(ext_Zbb , Zbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbb)) \
decl(ext_Zbc , Zbc , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zbs , Zbs , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
decl(ext_Zbkb , Zbkb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbkb)) \
decl(ext_Zcb , Zcb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
decl(ext_Zfa , Zfa , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
decl(ext_Zfh , Zfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
decl(ext_Zfhmin , Zfhmin , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
decl(ext_Zicsr , Zicsr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zicntr , Zicntr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zifencei , Zifencei , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zic64b , Zic64b , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZic64b)) \
decl(ext_Ztso , Ztso , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZtso)) \
decl(ext_Zihintpause , Zihintpause , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZihintpause)) \
decl(ext_Zacas , Zacas , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZacas)) \
decl(ext_Zvbb , Zvbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbb, ext_V)) \
decl(ext_Zvbc , Zvbc , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbc, ext_V)) \
decl(ext_Zvfh , Zvfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvfh, ext_V)) \
decl(ext_Zvkn , Zvkn , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkn, ext_V)) \
decl(ext_Zicond , Zicond , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicond)) \
#define RV_EXT_FEATURE_FLAGS(decl) \
decl(ext_I , i , ('I' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_M , m , ('M' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_A , a , ('A' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_F , f , ('F' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_D , d , ('D' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_C , c , ('C' - 'A'), true , UPDATE_DEFAULT(UseRVC)) \
decl(ext_Q , q , ('Q' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_H , h , ('H' - 'A'), true , NO_UPDATE_DEFAULT) \
decl(ext_V , v , ('V' - 'A'), true , UPDATE_DEFAULT(UseRVV)) \
decl(ext_Zicbom , Zicbom , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbom)) \
decl(ext_Zicboz , Zicboz , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicboz)) \
decl(ext_Zicbop , Zicbop , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicbop)) \
decl(ext_Zba , Zba , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZba)) \
decl(ext_Zbb , Zbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbb)) \
decl(ext_Zbc , Zbc , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zbs , Zbs , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
decl(ext_Zbkb , Zbkb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbkb)) \
decl(ext_Zcb , Zcb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
decl(ext_Zfa , Zfa , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
decl(ext_Zfh , Zfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
decl(ext_Zfhmin , Zfhmin , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
decl(ext_Zicsr , Zicsr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zicntr , Zicntr , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zifencei , Zifencei , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
decl(ext_Zic64b , Zic64b , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZic64b)) \
decl(ext_Ztso , Ztso , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZtso)) \
decl(ext_Zihintpause , Zihintpause , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZihintpause)) \
decl(ext_Zacas , Zacas , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZacas)) \
decl(ext_Zvbb , Zvbb , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbb, &ext_V, nullptr)) \
decl(ext_Zvbc , Zvbc , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvbc, &ext_V, nullptr)) \
decl(ext_Zvfh , Zvfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvfh, &ext_V, &ext_Zfh, nullptr)) \
decl(ext_Zvkn , Zvkn , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkn, &ext_V, nullptr)) \
decl(ext_Zicond , Zicond , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZicond)) \
#define DECLARE_RV_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
struct NAME##RVExtFeatureValue : public RVExtFeatureValue { \
@ -251,11 +288,12 @@ class VM_Version : public Abstract_VM_Version {
// Non-extension features
//
#define RV_NON_EXT_FEATURE_FLAGS(decl) \
decl(unaligned_access , Unaligned , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(mvendorid , VendorId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(marchid , ArchId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(mimpid , ImpId , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(satp_mode , SATP , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(unaligned_scalar , UnalignedScalar , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(unaligned_vector , UnalignedVector , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(zicboz_block_size, ZicbozBlockSize , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
#define DECLARE_RV_NON_EXT_FEATURE(NAME, PRETTY, LINUX_BIT, FSTRING, FLAGF) \
@ -396,12 +434,19 @@ private:
static VM_MODE parse_satp_mode(const char* vm_mode);
// Values from riscv_hwprobe()
enum UNALIGNED_ACCESS : int {
MISALIGNED_UNKNOWN = 0,
MISALIGNED_EMULATED = 1,
MISALIGNED_SLOW = 2,
MISALIGNED_FAST = 3,
MISALIGNED_UNSUPPORTED = 4
enum UNALIGNED_SCALAR_ACCESS : int {
MISALIGNED_SCALAR_UNKNOWN = 0,
MISALIGNED_SCALAR_EMULATED = 1,
MISALIGNED_SCALAR_SLOW = 2,
MISALIGNED_SCALAR_FAST = 3,
MISALIGNED_SCALAR_UNSUPPORTED = 4
};
enum UNALIGNED_VECTOR_ACCESS : int {
MISALIGNED_VECTOR_UNKNOWN = 0,
MISALIGNED_VECTOR_SLOW = 2,
MISALIGNED_VECTOR_FAST = 3,
MISALIGNED_VECTOR_UNSUPPORTED = 4
};
// Null terminated list

View File

@ -169,7 +169,7 @@ static void vmembk_print_on(outputStream* os);
////////////////////////////////////////////////////////////////////////////////
// global variables (for a description see os_aix.hpp)
size_t os::Aix::_physical_memory = 0;
physical_memory_size_type os::Aix::_physical_memory = 0;
pthread_t os::Aix::_main_thread = ((pthread_t)0);
@ -254,43 +254,43 @@ static bool is_close_to_brk(address a) {
return false;
}
bool os::free_memory(size_t& value) {
bool os::free_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::available_memory(size_t& value) {
bool os::available_memory(physical_memory_size_type& value) {
return Aix::available_memory(value);
}
bool os::Aix::available_memory(size_t& value) {
bool os::Aix::available_memory(physical_memory_size_type& value) {
os::Aix::meminfo_t mi;
if (os::Aix::get_meminfo(&mi)) {
value = static_cast<size_t>(mi.real_free);
value = static_cast<physical_memory_size_type>(mi.real_free);
return true;
} else {
return false;
}
}
bool os::total_swap_space(size_t& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
}
value = static_cast<size_t>(memory_info.pgsp_total * 4 * K);
value = static_cast<physical_memory_size_type>(memory_info.pgsp_total * 4 * K);
return true;
}
bool os::free_swap_space(size_t& value) {
bool os::free_swap_space(physical_memory_size_type& value) {
perfstat_memory_total_t memory_info;
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
return false;
}
value = static_cast<size_t>(memory_info.pgsp_free * 4 * K);
value = static_cast<physical_memory_size_type>(memory_info.pgsp_free * 4 * K);
return true;
}
size_t os::physical_memory() {
physical_memory_size_type os::physical_memory() {
return Aix::physical_memory();
}
@ -329,7 +329,7 @@ void os::Aix::initialize_system_info() {
if (!os::Aix::get_meminfo(&mi)) {
assert(false, "os::Aix::get_meminfo failed.");
}
_physical_memory = static_cast<size_t>(mi.real_total);
_physical_memory = static_cast<physical_memory_size_type>(mi.real_total);
}
// Helper function for tracing page sizes.
@ -2192,7 +2192,7 @@ jint os::init_2(void) {
os::Posix::init_2();
trcVerbose("processor count: %d", os::_processor_count);
trcVerbose("physical memory: %zu", Aix::_physical_memory);
trcVerbose("physical memory: " PHYS_MEM_TYPE_FORMAT, Aix::_physical_memory);
// Initially build up the loaded dll map.
LoadedLibraries::reload();

View File

@ -35,7 +35,7 @@ class os::Aix {
private:
static size_t _physical_memory;
static physical_memory_size_type _physical_memory;
static pthread_t _main_thread;
// 0 = uninitialized, otherwise 16 bit number:
@ -54,9 +54,9 @@ class os::Aix {
// 1 - EXTSHM=ON
static int _extshm;
static bool available_memory(size_t& value);
static bool free_memory(size_t& value);
static size_t physical_memory() { return _physical_memory; }
static bool available_memory(physical_memory_size_type& value);
static bool free_memory(physical_memory_size_type& value);
static physical_memory_size_type physical_memory() { return _physical_memory; }
static void initialize_system_info();
// OS recognitions (AIX OS level) call this before calling Aix::os_version().

View File

@ -114,7 +114,7 @@
////////////////////////////////////////////////////////////////////////////////
// global variables
size_t os::Bsd::_physical_memory = 0;
physical_memory_size_type os::Bsd::_physical_memory = 0;
#ifdef __APPLE__
mach_timebase_info_data_t os::Bsd::_timebase_info = {0, 0};
@ -133,19 +133,19 @@ static volatile int processor_id_next = 0;
////////////////////////////////////////////////////////////////////////////////
// utility functions
bool os::available_memory(size_t& value) {
bool os::available_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
bool os::free_memory(size_t& value) {
bool os::free_memory(physical_memory_size_type& value) {
return Bsd::available_memory(value);
}
// Available here means free. Note that this number is of no much use. As an estimate
// for future memory pressure it is far too conservative, since MacOS will use a lot
// of unused memory for caches, and return it willingly in case of needs.
bool os::Bsd::available_memory(size_t& value) {
uint64_t available = static_cast<uint64_t>(physical_memory() >> 2);
bool os::Bsd::available_memory(physical_memory_size_type& value) {
physical_memory_size_type available = physical_memory() >> 2;
#ifdef __APPLE__
mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
vm_statistics64_data_t vmstat;
@ -160,7 +160,7 @@ bool os::Bsd::available_memory(size_t& value) {
return false;
}
#endif
value = static_cast<size_t>(available);
value = available;
return true;
}
@ -180,35 +180,35 @@ void os::Bsd::print_uptime_info(outputStream* st) {
}
}
bool os::total_swap_space(size_t& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
if (sysctlbyname("vm.swapusage", &vmusage, &size, nullptr, 0) != 0) {
return false;
}
value = static_cast<size_t>(vmusage.xsu_total);
value = static_cast<physical_memory_size_type>(vmusage.xsu_total);
return true;
#else
return false;
#endif
}
bool os::free_swap_space(size_t& value) {
bool os::free_swap_space(physical_memory_size_type& value) {
#if defined(__APPLE__)
struct xsw_usage vmusage;
size_t size = sizeof(vmusage);
if (sysctlbyname("vm.swapusage", &vmusage, &size, nullptr, 0) != 0) {
return false;
}
value = static_cast<size_t>(vmusage.xsu_avail);
value = static_cast<physical_memory_size_type>(vmusage.xsu_avail);
return true;
#else
return false;
#endif
}
size_t os::physical_memory() {
physical_memory_size_type os::physical_memory() {
return Bsd::physical_memory();
}
@ -286,7 +286,7 @@ void os::Bsd::initialize_system_info() {
len = sizeof(mem_val);
if (sysctl(mib, 2, &mem_val, &len, nullptr, 0) != -1) {
assert(len == sizeof(mem_val), "unexpected data size");
_physical_memory = static_cast<size_t>(mem_val);
_physical_memory = static_cast<physical_memory_size_type>(mem_val);
} else {
_physical_memory = 256 * 1024 * 1024; // fallback (XXXBSD?)
}
@ -297,7 +297,7 @@ void os::Bsd::initialize_system_info() {
// datasize rlimit restricts us anyway.
struct rlimit limits;
getrlimit(RLIMIT_DATA, &limits);
_physical_memory = MIN2(_physical_memory, static_cast<size_t>(limits.rlim_cur));
_physical_memory = MIN2(_physical_memory, static_cast<physical_memory_size_type>(limits.rlim_cur));
}
#endif
}
@ -1469,12 +1469,12 @@ void os::print_memory_info(outputStream* st) {
st->print("Memory:");
st->print(" %zuk page", os::vm_page_size()>>10);
size_t phys_mem = os::physical_memory();
st->print(", physical %zuk",
physical_memory_size_type phys_mem = os::physical_memory();
st->print(", physical " PHYS_MEM_TYPE_FORMAT "k",
phys_mem >> 10);
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
(void)os::available_memory(avail_mem);
st->print("(%zuk free)",
st->print("(" PHYS_MEM_TYPE_FORMAT "k free)",
avail_mem >> 10);
if((sysctlbyname("vm.swapusage", &swap_usage, &size, nullptr, 0) == 0) || (errno == ENOMEM)) {

View File

@ -42,12 +42,12 @@ class os::Bsd {
protected:
static size_t _physical_memory;
static physical_memory_size_type _physical_memory;
static pthread_t _main_thread;
static bool available_memory(size_t& value);
static bool free_memory(size_t& value);
static size_t physical_memory() { return _physical_memory; }
static bool available_memory(physical_memory_size_type& value);
static bool free_memory(physical_memory_size_type& value);
static physical_memory_size_type physical_memory() { return _physical_memory; }
static void initialize_system_info();
static void rebuild_cpu_to_node_map();

View File

@ -154,7 +154,7 @@ enum CoredumpFilterBit {
////////////////////////////////////////////////////////////////////////////////
// global variables
size_t os::Linux::_physical_memory = 0;
physical_memory_size_type os::Linux::_physical_memory = 0;
address os::Linux::_initial_thread_stack_bottom = nullptr;
uintptr_t os::Linux::_initial_thread_stack_size = 0;
@ -228,15 +228,15 @@ julong os::Linux::available_memory_in_container() {
return avail_mem;
}
bool os::available_memory(size_t& value) {
bool os::available_memory(physical_memory_size_type& value) {
return Linux::available_memory(value);
}
bool os::Linux::available_memory(size_t& value) {
bool os::Linux::available_memory(physical_memory_size_type& value) {
julong avail_mem = available_memory_in_container();
if (avail_mem != static_cast<julong>(-1L)) {
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
value = static_cast<size_t>(avail_mem);
value = static_cast<physical_memory_size_type>(avail_mem);
return true;
}
@ -252,28 +252,28 @@ bool os::Linux::available_memory(size_t& value) {
fclose(fp);
}
if (avail_mem == static_cast<julong>(-1L)) {
size_t free_mem = 0;
physical_memory_size_type free_mem = 0;
if (!free_memory(free_mem)) {
return false;
}
avail_mem = static_cast<julong>(free_mem);
}
log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
value = static_cast<size_t>(avail_mem);
value = static_cast<physical_memory_size_type>(avail_mem);
return true;
}
bool os::free_memory(size_t& value) {
bool os::free_memory(physical_memory_size_type& value) {
return Linux::free_memory(value);
}
bool os::Linux::free_memory(size_t& value) {
bool os::Linux::free_memory(physical_memory_size_type& value) {
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
julong free_mem = available_memory_in_container();
if (free_mem != static_cast<julong>(-1L)) {
log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
value = static_cast<size_t>(free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
return true;
}
@ -283,16 +283,16 @@ bool os::Linux::free_memory(size_t& value) {
}
free_mem = (julong)si.freeram * si.mem_unit;
log_trace(os)("free memory: " JULONG_FORMAT, free_mem);
value = static_cast<size_t>(free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
return true;
}
bool os::total_swap_space(size_t& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
if (OSContainer::is_containerized()) {
jlong memory_and_swap_limit_in_bytes = OSContainer::memory_and_swap_limit_in_bytes();
jlong memory_limit_in_bytes = OSContainer::memory_limit_in_bytes();
if (memory_limit_in_bytes > 0 && memory_and_swap_limit_in_bytes > 0) {
value = static_cast<size_t>(memory_and_swap_limit_in_bytes - memory_limit_in_bytes);
value = static_cast<physical_memory_size_type>(memory_and_swap_limit_in_bytes - memory_limit_in_bytes);
return true;
}
} // fallback to the host swap space if the container did return the unbound value of -1
@ -302,30 +302,30 @@ bool os::total_swap_space(size_t& value) {
assert(false, "sysinfo failed in total_swap_space(): %s", os::strerror(errno));
return false;
}
value = static_cast<size_t>(si.totalswap * si.mem_unit);
value = static_cast<physical_memory_size_type>(si.totalswap) * si.mem_unit;
return true;
}
static bool host_free_swap_f(size_t& value) {
static bool host_free_swap_f(physical_memory_size_type& value) {
struct sysinfo si;
int ret = sysinfo(&si);
if (ret != 0) {
assert(false, "sysinfo failed in host_free_swap_f(): %s", os::strerror(errno));
return false;
}
value = static_cast<size_t>(si.freeswap * si.mem_unit);
value = static_cast<physical_memory_size_type>(si.freeswap) * si.mem_unit;
return true;
}
bool os::free_swap_space(size_t& value) {
bool os::free_swap_space(physical_memory_size_type& value) {
// os::total_swap_space() might return the containerized limit which might be
// less than host_free_swap(). The upper bound of free swap needs to be the lower of the two.
size_t total_swap_space = 0;
size_t host_free_swap = 0;
physical_memory_size_type total_swap_space = 0;
physical_memory_size_type host_free_swap = 0;
if (!os::total_swap_space(total_swap_space) || !host_free_swap_f(host_free_swap)) {
return false;
}
size_t host_free_swap_val = MIN2(total_swap_space, host_free_swap);
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
if (OSContainer::is_containerized()) {
jlong mem_swap_limit = OSContainer::memory_and_swap_limit_in_bytes();
jlong mem_limit = OSContainer::memory_limit_in_bytes();
@ -341,31 +341,31 @@ bool os::free_swap_space(size_t& value) {
jlong delta_usage = mem_swap_usage - mem_usage;
if (delta_usage >= 0) {
jlong free_swap = delta_limit - delta_usage;
value = free_swap >= 0 ? static_cast<size_t>(free_swap) : 0;
value = free_swap >= 0 ? static_cast<physical_memory_size_type>(free_swap) : 0;
return true;
}
}
}
// unlimited or not supported. Fall through to return host value
log_trace(os,container)("os::free_swap_space: container_swap_limit=" JLONG_FORMAT
" container_mem_limit=" JLONG_FORMAT " returning host value: %zu",
" container_mem_limit=" JLONG_FORMAT " returning host value: " PHYS_MEM_TYPE_FORMAT,
mem_swap_limit, mem_limit, host_free_swap_val);
}
value = host_free_swap_val;
return true;
}
size_t os::physical_memory() {
physical_memory_size_type os::physical_memory() {
if (OSContainer::is_containerized()) {
jlong mem_limit;
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
return static_cast<size_t>(mem_limit);
return static_cast<physical_memory_size_type>(mem_limit);
}
}
size_t phys_mem = Linux::physical_memory();
log_trace(os)("total system memory: %zu", phys_mem);
physical_memory_size_type phys_mem = Linux::physical_memory();
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
return phys_mem;
}
@ -549,7 +549,7 @@ void os::Linux::initialize_system_info() {
fclose(fp);
}
}
_physical_memory = static_cast<size_t>(sysconf(_SC_PHYS_PAGES)) * static_cast<size_t>(sysconf(_SC_PAGESIZE));
_physical_memory = static_cast<physical_memory_size_type>(sysconf(_SC_PHYS_PAGES)) * static_cast<physical_memory_size_type>(sysconf(_SC_PAGESIZE));
assert(processor_count() > 0, "linux error");
}
@ -2603,12 +2603,12 @@ void os::print_memory_info(outputStream* st) {
// values in struct sysinfo are "unsigned long"
struct sysinfo si;
sysinfo(&si);
size_t phys_mem = physical_memory();
st->print(", physical %zuk",
physical_memory_size_type phys_mem = physical_memory();
st->print(", physical " PHYS_MEM_TYPE_FORMAT "k",
phys_mem >> 10);
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
(void)os::available_memory(avail_mem);
st->print("(%zuk free)",
st->print("(" PHYS_MEM_TYPE_FORMAT "k free)",
avail_mem >> 10);
st->print(", swap " UINT64_FORMAT "k",
((jlong)si.totalswap * si.mem_unit) >> 10);

View File

@ -49,11 +49,11 @@ class os::Linux {
protected:
static size_t _physical_memory;
static physical_memory_size_type _physical_memory;
static pthread_t _main_thread;
static bool available_memory(size_t& value);
static bool free_memory(size_t& value);
static bool available_memory(physical_memory_size_type& value);
static bool free_memory(physical_memory_size_type& value);
static void initialize_system_info();
@ -116,7 +116,7 @@ class os::Linux {
static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; }
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
static size_t physical_memory() { return _physical_memory; }
static physical_memory_size_type physical_memory() { return _physical_memory; }
static julong host_swap();
static intptr_t* ucontext_get_sp(const ucontext_t* uc);

View File

@ -834,22 +834,22 @@ jlong os::elapsed_frequency() {
}
bool os::available_memory(size_t& value) {
bool os::available_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::free_memory(size_t& value) {
bool os::free_memory(physical_memory_size_type& value) {
return win32::available_memory(value);
}
bool os::win32::available_memory(size_t& value) {
bool os::win32::available_memory(physical_memory_size_type& value) {
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
// value if total memory is larger than 4GB
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
if (res == TRUE) {
value = static_cast<size_t>(ms.ullAvailPhys);
value = static_cast<physical_memory_size_type>(ms.ullAvailPhys);
return true;
} else {
assert(false, "GlobalMemoryStatusEx failed in os::win32::available_memory(): %lu", ::GetLastError());
@ -857,12 +857,12 @@ bool os::win32::available_memory(size_t& value) {
}
}
bool os::total_swap_space(size_t& value) {
bool os::total_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
if (res == TRUE) {
value = static_cast<size_t>(ms.ullTotalPageFile);
value = static_cast<physical_memory_size_type>(ms.ullTotalPageFile);
return true;
} else {
assert(false, "GlobalMemoryStatusEx failed in os::total_swap_space(): %lu", ::GetLastError());
@ -870,12 +870,12 @@ bool os::total_swap_space(size_t& value) {
}
}
bool os::free_swap_space(size_t& value) {
bool os::free_swap_space(physical_memory_size_type& value) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
BOOL res = GlobalMemoryStatusEx(&ms);
if (res == TRUE) {
value = static_cast<size_t>(ms.ullAvailPageFile);
value = static_cast<physical_memory_size_type>(ms.ullAvailPageFile);
return true;
} else {
assert(false, "GlobalMemoryStatusEx failed in os::free_swap_space(): %lu", ::GetLastError());
@ -883,7 +883,7 @@ bool os::free_swap_space(size_t& value) {
}
}
size_t os::physical_memory() {
physical_memory_size_type os::physical_memory() {
return win32::physical_memory();
}
@ -3947,25 +3947,25 @@ int os::current_process_id() {
return (_initial_pid ? _initial_pid : _getpid());
}
int os::win32::_processor_type = 0;
int os::win32::_processor_type = 0;
// Processor level is not available on non-NT systems, use vm_version instead
int os::win32::_processor_level = 0;
size_t os::win32::_physical_memory = 0;
int os::win32::_processor_level = 0;
physical_memory_size_type os::win32::_physical_memory = 0;
bool os::win32::_is_windows_server = false;
bool os::win32::_is_windows_server = false;
// 6573254
// Currently, the bug is observed across all the supported Windows releases,
// including the latest one (as of this writing - Windows Server 2012 R2)
bool os::win32::_has_exit_bug = true;
bool os::win32::_has_exit_bug = true;
int os::win32::_major_version = 0;
int os::win32::_minor_version = 0;
int os::win32::_build_number = 0;
int os::win32::_build_minor = 0;
int os::win32::_major_version = 0;
int os::win32::_minor_version = 0;
int os::win32::_build_number = 0;
int os::win32::_build_minor = 0;
bool os::win32::_processor_group_warning_displayed = false;
bool os::win32::_job_object_processor_group_warning_displayed = false;
bool os::win32::_processor_group_warning_displayed = false;
bool os::win32::_job_object_processor_group_warning_displayed = false;
void getWindowsInstallationType(char* buffer, int bufferSize) {
HKEY hKey;
@ -4184,7 +4184,7 @@ void os::win32::initialize_system_info() {
if (res != TRUE) {
assert(false, "GlobalMemoryStatusEx failed in os::win32::initialize_system_info(): %lu", ::GetLastError());
}
_physical_memory = static_cast<size_t>(ms.ullTotalPhys);
_physical_memory = static_cast<physical_memory_size_type>(ms.ullTotalPhys);
if (FLAG_IS_DEFAULT(MaxRAM)) {
// Adjust MaxRAM according to the maximum virtual address space available.

View File

@ -38,18 +38,18 @@ class os::win32 {
friend class os;
protected:
static int _processor_type;
static int _processor_level;
static size_t _physical_memory;
static bool _is_windows_server;
static bool _has_exit_bug;
static bool _processor_group_warning_displayed;
static bool _job_object_processor_group_warning_displayed;
static int _processor_type;
static int _processor_level;
static physical_memory_size_type _physical_memory;
static bool _is_windows_server;
static bool _has_exit_bug;
static bool _processor_group_warning_displayed;
static bool _job_object_processor_group_warning_displayed;
static int _major_version;
static int _minor_version;
static int _build_number;
static int _build_minor;
static int _major_version;
static int _minor_version;
static int _build_number;
static int _build_minor;
static void print_windows_version(outputStream* st);
static void print_uptime_info(outputStream* st);
@ -102,9 +102,9 @@ class os::win32 {
static int processor_level() {
return _processor_level;
}
static bool available_memory(size_t& value);
static bool free_memory(size_t& value);
static size_t physical_memory() { return _physical_memory; }
static bool available_memory(physical_memory_size_type& value);
static bool free_memory(physical_memory_size_type& value);
static physical_memory_size_type physical_memory() { return _physical_memory; }
// load dll from Windows system directory or Windows directory
static HINSTANCE load_Windows_dll(const char* name, char *ebuf, int ebuflen);

View File

@ -89,7 +89,24 @@
#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7
#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8
#define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF 9
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN 0
#define RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED 1
#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0
#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
#ifndef NR_riscv_hwprobe
#ifndef NR_arch_specific_syscall
@ -117,7 +134,11 @@ static struct riscv_hwprobe query[] = {{RISCV_HWPROBE_KEY_MVENDORID, 0},
{RISCV_HWPROBE_KEY_BASE_BEHAVIOR, 0},
{RISCV_HWPROBE_KEY_IMA_EXT_0, 0},
{RISCV_HWPROBE_KEY_CPUPERF_0, 0},
{RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE, 0}};
{RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE, 0},
{RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS, 0},
{RISCV_HWPROBE_KEY_TIME_CSR_FREQ, 0},
{RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF, 0},
{RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF, 0}};
bool RiscvHwprobe::probe_features() {
assert(!rw_hwprobe_completed, "Called twice.");
@ -246,9 +267,20 @@ void RiscvHwprobe::add_features_from_query_result() {
VM_Version::ext_Zicond.enable_feature();
}
#endif
// RISCV_HWPROBE_KEY_CPUPERF_0 is deprecated and returns similar values
// to RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF. Keep it there for backward
// compatibility with old kernels.
if (is_valid(RISCV_HWPROBE_KEY_CPUPERF_0)) {
VM_Version::unaligned_access.enable_feature(
VM_Version::unaligned_scalar.enable_feature(
query[RISCV_HWPROBE_KEY_CPUPERF_0].value & RISCV_HWPROBE_MISALIGNED_MASK);
} else if (is_valid(RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF)) {
VM_Version::unaligned_scalar.enable_feature(
query[RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF].value);
}
if (is_valid(RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF)) {
VM_Version::unaligned_vector.enable_feature(
query[RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF].value);
}
if (is_valid(RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE)) {
VM_Version::zicboz_block_size.enable_feature(query[RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE].value);

View File

@ -303,7 +303,7 @@ void VM_Version::rivos_features() {
ext_Zvfh.enable_feature();
unaligned_access.enable_feature(MISALIGNED_FAST);
unaligned_scalar.enable_feature(MISALIGNED_SCALAR_FAST);
satp_mode.enable_feature(VM_SV48);
// Features dependent on march/mimpid.

View File

@ -97,7 +97,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
};
friend class ClassLoaderDataGraph;
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphIteratorAtomic;
friend class Klass;
friend class MetaDataFactory;
friend class Method;

View File

@ -489,62 +489,25 @@ void ClassLoaderDataGraph::purge(bool at_safepoint) {
}
}
ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
: _next_klass(nullptr) {
ClassLoaderDataGraphIteratorAtomic::ClassLoaderDataGraphIteratorAtomic()
: _cld(nullptr) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* cld = ClassLoaderDataGraph::_head;
Klass* klass = nullptr;
// Find the first klass in the CLDG.
while (cld != nullptr) {
assert_locked_or_safepoint(cld->metaspace_lock());
klass = cld->_klasses;
if (klass != nullptr) {
_next_klass = klass;
return;
}
cld = cld->next();
}
_cld = AtomicAccess::load_acquire(&ClassLoaderDataGraph::_head);
}
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
Klass* next = klass->next_link();
if (next != nullptr) {
return next;
}
// No more klasses in the current CLD. Time to find a new CLD.
ClassLoaderData* cld = klass->class_loader_data();
assert_locked_or_safepoint(cld->metaspace_lock());
while (next == nullptr) {
cld = cld->next();
if (cld == nullptr) {
break;
ClassLoaderData* ClassLoaderDataGraphIteratorAtomic::next() {
ClassLoaderData* cur = AtomicAccess::load(&_cld);
for (;;) {
if (cur == nullptr) {
return nullptr;
}
next = cld->_klasses;
}
return next;
}
Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
Klass* head = _next_klass;
while (head != nullptr) {
Klass* next = next_klass_in_cldg(head);
Klass* old_head = AtomicAccess::cmpxchg(&_next_klass, head, next);
if (old_head == head) {
return head; // Won the CAS.
ClassLoaderData* next = cur->next();
ClassLoaderData* old;
if ((old = AtomicAccess::cmpxchg(&_cld, cur, next)) == cur) {
return cur;
}
head = old_head;
cur = old;
}
// Nothing more for the iterator to hand out.
assert(head == nullptr, "head is " PTR_FORMAT ", expected not null:", p2i(head));
return nullptr;
}
void ClassLoaderDataGraph::verify() {

View File

@ -34,7 +34,7 @@
class ClassLoaderDataGraph : public AllStatic {
friend class ClassLoaderData;
friend class ClassLoaderDataGraphKlassIteratorAtomic;
friend class ClassLoaderDataGraphIteratorAtomic;
friend class VMStructs;
private:
class ClassLoaderDataGraphIterator;
@ -140,14 +140,14 @@ public:
}
};
// An iterator that distributes Klasses to parallel worker threads.
class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
Klass* volatile _next_klass;
public:
ClassLoaderDataGraphKlassIteratorAtomic();
Klass* next_klass();
private:
static Klass* next_klass_in_cldg(Klass* klass);
// An iterator that distributes Klasses to parallel worker threads based on CLDs.
class ClassLoaderDataGraphIteratorAtomic : public StackObj {
ClassLoaderData* volatile _cld;
public:
ClassLoaderDataGraphIteratorAtomic();
ClassLoaderData* next();
};
#endif // SHARE_CLASSFILE_CLASSLOADERDATAGRAPH_HPP

View File

@ -72,10 +72,10 @@ CompactHashtableWriter::~CompactHashtableWriter() {
FREE_C_HEAP_ARRAY(GrowableArray<Entry>*, _buckets);
}
// Add a symbol entry to the temporary hash table
void CompactHashtableWriter::add(unsigned int hash, u4 value) {
// Add an entry to the temporary hash table
void CompactHashtableWriter::add(unsigned int hash, u4 encoded_value) {
int index = hash % _num_buckets;
_buckets[index]->append_if_missing(Entry(hash, value));
_buckets[index]->append_if_missing(Entry(hash, encoded_value));
_num_entries_written++;
}
@ -107,27 +107,28 @@ void CompactHashtableWriter::allocate_table() {
SharedSpaceObjectAlignment);
}
// Write the compact table's buckets
// Write the compact table's buckets and entries
void CompactHashtableWriter::dump_table(NumberSeq* summary) {
u4 offset = 0;
for (int index = 0; index < _num_buckets; index++) {
GrowableArray<Entry>* bucket = _buckets[index];
int bucket_size = bucket->length();
if (bucket_size == 1) {
// bucket with one entry is compacted and only has the symbol offset
_compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
Entry ent = bucket->at(0);
_compact_entries->at_put(offset++, ent.value());
// bucket with one entry is value_only and only has the encoded_value
_compact_entries->at_put(offset++, ent.encoded_value());
_num_value_only_buckets++;
} else {
// regular bucket, each entry is a symbol (hash, offset) pair
// regular bucket, it could contain zero or more than one entry,
// each entry is a <hash, encoded_value> pair
_compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
for (int i=0; i<bucket_size; i++) {
Entry ent = bucket->at(i);
_compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
_compact_entries->at_put(offset++, ent.value());
_compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
_compact_entries->at_put(offset++, ent.encoded_value()); // write entry encoded_value
}
if (bucket_size == 0) {
_num_empty_buckets++;
@ -189,15 +190,7 @@ void SimpleCompactHashtable::init(address base_address, u4 entry_count, u4 bucke
_entries = entries;
}
size_t SimpleCompactHashtable::calculate_header_size() {
// We have 5 fields. Each takes up sizeof(intptr_t). See WriteClosure::do_u4
size_t bytes = sizeof(intptr_t) * 5;
return bytes;
}
void SimpleCompactHashtable::serialize_header(SerializeClosure* soc) {
// NOTE: if you change this function, you MUST change the number 5 in
// calculate_header_size() accordingly.
soc->do_u4(&_entry_count);
soc->do_u4(&_bucket_count);
soc->do_ptr(&_buckets);

View File

@ -35,7 +35,7 @@
template <
typename K,
typename V,
V (*DECODE)(address base_address, u4 offset),
V (*DECODE)(address base_address, u4 encoded_value),
bool (*EQUALS)(V value, K key, int len)
>
class CompactHashtable;
@ -62,8 +62,9 @@ public:
// The compact hash table writer. Used at dump time for writing out
// the compact table to the shared archive.
//
// At dump time, the CompactHashtableWriter obtains all entries from the
// symbol/string table and adds them to a new temporary hash table. The hash
// At dump time, the CompactHashtableWriter obtains all entries from
// a table (the table could be in any form of a collection of <hash, encoded_value> pair)
// and adds them to a new temporary hash table (_buckets). The hash
// table size (number of buckets) is calculated using
// '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
// size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
@ -76,10 +77,10 @@ public:
// above the CompactHashtable class for the table layout detail. The bucket
// offsets are written to the archive as part of the compact table. The
// bucket offset is encoded in the low 30-bit (0-29) and the bucket type
// (regular or compact) are encoded in bit[31, 30]. For buckets with more
// than one entry, both hash and entry offset are written to the
// table. For buckets with only one entry, only the entry offset is written
// to the table and the buckets are tagged as compact in their type bits.
// (regular or value_only) are encoded in bit[31, 30]. For buckets with more
// than one entry, both hash and encoded_value are written to the
// table. For buckets with only one entry, only the encoded_value is written
// to the table and the buckets are tagged as value_only in their type bits.
// Buckets without entry are skipped from the table. Their offsets are
// still written out for faster lookup.
//
@ -87,21 +88,21 @@ class CompactHashtableWriter: public StackObj {
public:
class Entry {
unsigned int _hash;
u4 _value;
u4 _encoded_value;
public:
Entry() {}
Entry(unsigned int hash, u4 val) : _hash(hash), _value(val) {}
Entry(unsigned int hash, u4 encoded_value) : _hash(hash), _encoded_value(encoded_value) {}
u4 value() {
return _value;
u4 encoded_value() {
return _encoded_value;
}
unsigned int hash() {
return _hash;
}
bool operator==(const CompactHashtableWriter::Entry& other) {
return (_value == other._value && _hash == other._hash);
return (_encoded_value == other._encoded_value && _hash == other._hash);
}
}; // class CompactHashtableWriter::Entry
@ -121,7 +122,8 @@ public:
CompactHashtableWriter(int num_entries, CompactHashtableStats* stats);
~CompactHashtableWriter();
void add(unsigned int hash, u4 value);
void add(unsigned int hash, u4 encoded_value);
void dump(SimpleCompactHashtable *cht, const char* table_name);
private:
void allocate_table();
@ -131,9 +133,6 @@ private:
// calculation of num_buckets can result in zero buckets, we need at least one
return (num_buckets < 1) ? 1 : num_buckets;
}
public:
void dump(SimpleCompactHashtable *cht, const char* table_name);
};
#endif // INCLUDE_CDS
@ -148,7 +147,8 @@ public:
/////////////////////////////////////////////////////////////////////////////
//
// CompactHashtable is used to store the CDS archive's symbol/string tables.
// CompactHashtable is used to store the CDS archive's tables.
// A table could be in any form of a collection of <hash, encoded_value> pair.
//
// Because these tables are read-only (no entries can be added/deleted) at run-time
// and tend to have large number of entries, we try to minimize the footprint
@ -162,32 +162,47 @@ public:
// The size of buckets[] is 'num_buckets + 1'. Each entry of
// buckets[] is a 32-bit encoding of the bucket type and bucket offset,
// with the type in the left-most 2-bit and offset in the remaining 30-bit.
// The last entry is a special type. It contains the end of the last
// bucket.
//
// There are two types of buckets, regular buckets and value_only buckets. The
// value_only buckets have '01' in their highest 2-bit, and regular buckets have
// '00' in their highest 2-bit.
// There are three types of buckets: regular, value_only, and table_end.
// . The regular buckets have '00' in their highest 2-bit.
// . The value_only buckets have '01' in their highest 2-bit.
// . There is only a single table_end bucket that marks the end of buckets[].
// It has '11' in its highest 2-bit.
//
// For normal buckets, each entry is 8 bytes in the entries[]:
// u4 hash; /* symbol/string hash */
// union {
// u4 offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
// narrowOop str; /* String narrowOop encoding */
// }
// For regular buckets, each entry is 8 bytes in the entries[]:
// u4 hash; // entry hash
// u4 encoded_value; // A 32-bit encoding of the template type V. The template parameter DECODE
// // converts this to type V. Many CompactHashtables encode a pointer as a 32-bit offset, where
// // V entry = (V)(base_address + offset)
// // see StringTable, SymbolTable and AdapterHandlerLibrary for examples
//
// For value_only buckets, each entry has only the 4-byte 'encoded_value' in the entries[].
//
// For value_only buckets, each entry has only the 4-byte 'offset' in the entries[].
// The single table_end bucket has no corresponding entry.
//
// Example -- note that the second bucket is a VALUE_ONLY_BUCKET_TYPE so the hash code
// is skipped.
// buckets[0, 4, 5, ....]
// | | |
// | | +---+
// | | |
// | +----+ |
// v v v
// entries[H,O,H,O,O,H,O,H,O.....]
// The number of entries in bucket <i> can be calculated like this:
// my_offset = _buckets[i] & 0x3fffffff; // mask off top 2-bit
// next_offset = _buckets[i+1] & 0x3fffffff
// For REGULAR_BUCKET_TYPE
// num_entries = (next_offset - my_offset) / 8;
// For VALUE_ONLY_BUCKET_TYPE
// num_entries = (next_offset - my_offset) / 4;
//
// If bucket <i> is empty, we have my_offset == next_offset. Empty buckets are
// always encoded as regular buckets.
//
// In the following example:
// - Bucket #0 is a REGULAR_BUCKET_TYPE with two entries
// - Bucket #1 is a VALUE_ONLY_BUCKET_TYPE with one entry.
// - Bucket #2 is a REGULAR_BUCKET_TYPE with zero entries.
//
// buckets[0, 4, 5(empty), 5, ...., N(table_end)]
// | | | | |
// | | +---+-----+ |
// | | | |
// | +----+ + |
// v v v v
// entries[H,O,H,O,O,H,O,H,O........]
//
// See CompactHashtable::lookup() for how the table is searched at runtime.
// See CompactHashtableWriter::dump() for how the table is written at CDS
@ -230,21 +245,19 @@ public:
inline size_t entry_count() const {
return _entry_count;
}
static size_t calculate_header_size();
};
template <
typename K,
typename V,
V (*DECODE)(address base_address, u4 offset),
V (*DECODE)(address base_address, u4 encoded_value),
bool (*EQUALS)(V value, K key, int len)
>
class CompactHashtable : public SimpleCompactHashtable {
friend class VMStructs;
V decode(u4 offset) const {
return DECODE(_base_address, offset);
V decode(u4 encoded_value) const {
return DECODE(_base_address, encoded_value);
}
public:
@ -264,7 +277,7 @@ public:
}
} else {
// This is a regular bucket, which has more than one
// entries. Each entry is a pair of entry (hash, offset).
// entries. Each entry is a (hash, value) pair.
// Seek until the end of the bucket.
u4* entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
while (entry < entry_max) {

View File

@ -1064,7 +1064,7 @@ void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) {
if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return;
// Now, we do the more expensive operations.
size_t free_memory = 0;
physical_memory_size_type free_memory = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::free_memory(free_memory);
// If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All).

View File

@ -203,9 +203,7 @@ uint G1NUMA::index_for_region(G1HeapRegion* hr) const {
// * G1HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
// * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
if (!is_enabled()) {
return;
}
assert(is_enabled(), "must be, check before");
if (size_in_bytes == 0) {
return;

View File

@ -96,13 +96,15 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
const size_t start_page = (size_t)start_idx * _pages_per_region;
const size_t size_in_pages = num_regions * _pages_per_region;
bool zero_filled = _storage.commit(start_page, size_in_pages);
if (_memory_tag == mtJavaHeap) {
if (should_distribute_across_numa_nodes()) {
for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
void* address = _storage.page_start(region_index * _pages_per_region);
size_t size_in_bytes = _storage.page_size() * _pages_per_region;
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region_index);
}
}
if (AlwaysPreTouch) {
_storage.pretouch(start_page, size_in_pages, pretouch_workers);
}
@ -122,7 +124,7 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
// G1RegionToSpaceMapper implementation where the region granularity is smaller
// than the commit granularity.
// Basically, the contents of one OS page span several regions.
// Basically, the contents of one OS page spans several regions.
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t _regions_per_page;
// Lock to prevent bitmap updates and the actual underlying
@ -148,13 +150,18 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
return _region_commit_map.find_first_set_bit(region, region_limit) != region_limit;
}
void numa_request_on_node(size_t page_idx) {
if (_memory_tag == mtJavaHeap) {
uint region = (uint)(page_idx * _regions_per_page);
void* address = _storage.page_start(page_idx);
size_t size_in_bytes = _storage.page_size();
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
bool commit_pages(size_t start_page, size_t size_in_pages) {
bool result = _storage.commit(start_page, size_in_pages);
if (should_distribute_across_numa_nodes()) {
for (size_t page = start_page; page < start_page + size_in_pages; page++) {
uint region = checked_cast<uint>(page * _regions_per_page);
void* address = _storage.page_start(page);
size_t size_in_bytes = _storage.page_size();
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
}
}
return result;
}
public:
@ -171,6 +178,21 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
}
size_t find_first_uncommitted(size_t page, size_t end) {
assert(page < end, "must be");
while (page < end && is_page_committed(page)) {
page++;
}
return page;
}
size_t find_first_committed(size_t page, size_t end) {
while (page < end && !is_page_committed(page)) {
page++;
}
return MIN2(page, end);
}
virtual void commit_regions(uint start_idx, size_t num_regions, WorkerThreads* pretouch_workers) {
uint region_limit = (uint)(start_idx + num_regions);
assert(num_regions > 0, "Must commit at least one region");
@ -179,11 +201,11 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t const NoPage = SIZE_MAX;
size_t first_committed = NoPage;
size_t num_committed = 0;
size_t first_newly_committed = NoPage;
size_t num_committed_pages = 0;
size_t start_page = region_idx_to_page_idx(start_idx);
size_t end_page = region_idx_to_page_idx(region_limit - 1);
size_t const start_page = region_idx_to_page_idx(start_idx);
size_t const end_page = region_idx_to_page_idx(region_limit - 1) + 1;
bool all_zero_filled = true;
@ -191,34 +213,27 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
// underlying OS page. See lock declaration for more details.
{
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
for (size_t page = start_page; page <= end_page; page++) {
if (!is_page_committed(page)) {
// Page not committed.
if (num_committed == 0) {
first_committed = page;
}
num_committed++;
if (!_storage.commit(page, 1)) {
// Found dirty region during commit.
all_zero_filled = false;
}
size_t uncommitted_l = find_first_uncommitted(start_page, end_page);
size_t uncommitted_r = find_first_committed(uncommitted_l + 1, end_page);
// Move memory to correct NUMA node for the heap.
numa_request_on_node(page);
} else {
// Page already committed.
all_zero_filled = false;
}
first_newly_committed = uncommitted_l;
num_committed_pages = uncommitted_r - uncommitted_l;
if (num_committed_pages > 0 &&
!commit_pages(first_newly_committed, num_committed_pages)) {
all_zero_filled = false;
}
all_zero_filled &= (uncommitted_l == start_page) && (uncommitted_r == end_page);
// Update the commit map for the given range. Not using the par_set_range
// since updates to _region_commit_map for this mapper is protected by _lock.
_region_commit_map.set_range(start_idx, region_limit, BitMap::unknown_range);
}
if (AlwaysPreTouch && num_committed > 0) {
_storage.pretouch(first_committed, num_committed, pretouch_workers);
if (AlwaysPreTouch && num_committed_pages > 0) {
_storage.pretouch(first_newly_committed, num_committed_pages, pretouch_workers);
}
fire_on_commit(start_idx, num_regions, all_zero_filled);
@ -230,8 +245,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
assert(_region_commit_map.find_first_clear_bit(start_idx, region_limit) == region_limit,
"Should only be committed regions in the range [%u, %u)", start_idx, region_limit);
size_t start_page = region_idx_to_page_idx(start_idx);
size_t end_page = region_idx_to_page_idx(region_limit - 1);
size_t const start_page = region_idx_to_page_idx(start_idx);
size_t const end_page = region_idx_to_page_idx(region_limit - 1) + 1;
// Concurrent operations might operate on regions sharing the same
// underlying OS page. See lock declaration for more details.
@ -240,13 +255,16 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
// updates to _region_commit_map for this mapper is protected by _lock.
_region_commit_map.clear_range(start_idx, region_limit, BitMap::unknown_range);
for (size_t page = start_page; page <= end_page; page++) {
// We know all pages were committed before clearing the map. If the
// the page is still marked as committed after the clear we should
// not uncommit it.
if (!is_page_committed(page)) {
_storage.uncommit(page, 1);
}
// We know all pages were committed before clearing the map. If the
// the page is still marked as committed after the clear we should
// not uncommit it.
size_t uncommitted_l = find_first_uncommitted(start_page, end_page);
size_t uncommitted_r = find_first_committed(uncommitted_l + 1, end_page);
size_t num_uncommitted_pages_found = uncommitted_r - uncommitted_l;
if (num_uncommitted_pages_found > 0) {
_storage.uncommit(uncommitted_l, num_uncommitted_pages_found);
}
}
};
@ -257,6 +275,10 @@ void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, b
}
}
bool G1RegionToSpaceMapper::should_distribute_across_numa_nodes() const {
return _memory_tag == mtJavaHeap && G1NUMA::numa()->is_enabled();
}
G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs,
size_t actual_size,
size_t page_size,

View File

@ -58,6 +58,8 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {
G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemTag mem_tag);
void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
bool should_distribute_across_numa_nodes() const;
public:
MemRegion reserved() { return _storage.reserved(); }

View File

@ -62,8 +62,9 @@ void GCInitLogger::print_cpu() {
}
void GCInitLogger::print_memory() {
size_t memory = os::physical_memory();
log_info_p(gc, init)("Memory: " PROPERFMT, PROPERFMTARGS(memory));
physical_memory_size_type memory = os::physical_memory();
log_info_p(gc, init)("Memory: " PHYS_MEM_TYPE_FORMAT "%s",
byte_size_in_proper_unit(memory), proper_unit_for_byte_size(memory));
}
void GCInitLogger::print_large_pages() {

View File

@ -27,7 +27,7 @@
#include "code/codeCache.hpp"
#include "gc/shared/parallelCleaning.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.inline.hpp"
#include "runtime/atomicAccess.hpp"
CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_occurred) :
@ -94,38 +94,26 @@ void CodeCacheUnloadingTask::work(uint worker_id) {
}
}
KlassCleaningTask::KlassCleaningTask() :
_clean_klass_tree_claimed(false),
_klass_iterator() {
}
bool KlassCleaningTask::claim_clean_klass_tree_task() {
if (_clean_klass_tree_claimed) {
return false;
}
return !AtomicAccess::cmpxchg(&_clean_klass_tree_claimed, false, true);
}
InstanceKlass* KlassCleaningTask::claim_next_klass() {
Klass* klass;
do {
klass =_klass_iterator.next_klass();
} while (klass != nullptr && !klass->is_instance_klass());
// this can be null so don't call InstanceKlass::cast
return static_cast<InstanceKlass*>(klass);
}
void KlassCleaningTask::work() {
// One worker will clean the subklass/sibling klass tree.
if (claim_clean_klass_tree_task()) {
Klass::clean_weak_klass_links(true /* class_unloading_occurred */, false /* clean_alive_klasses */);
}
for (ClassLoaderData* cur = _cld_iterator_atomic.next(); cur != nullptr; cur = _cld_iterator_atomic.next()) {
class CleanKlasses : public KlassClosure {
public:
// All workers will help cleaning the classes,
InstanceKlass* klass;
while ((klass = claim_next_klass()) != nullptr) {
Klass::clean_weak_instanceklass_links(klass);
void do_klass(Klass* klass) override {
klass->clean_subklass(true);
Klass* sibling = klass->next_sibling(true);
klass->set_next_sibling(sibling);
if (klass->is_instance_klass()) {
Klass::clean_weak_instanceklass_links(InstanceKlass::cast(klass));
}
assert(klass->subklass() == nullptr || klass->subklass()->is_loader_alive(), "must be");
assert(klass->next_sibling(false) == nullptr || klass->next_sibling(false)->is_loader_alive(), "must be");
}
} cl;
cur->classes_do(&cl);
}
}

View File

@ -54,14 +54,10 @@ public:
// Cleans out the Klass tree from stale data.
class KlassCleaningTask : public StackObj {
volatile bool _clean_klass_tree_claimed;
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
bool claim_clean_klass_tree_task();
InstanceKlass* claim_next_klass();
ClassLoaderDataGraphIteratorAtomic _cld_iterator_atomic;
public:
KlassCleaningTask();
KlassCleaningTask() : _cld_iterator_atomic() { }
void work();
};

View File

@ -31,7 +31,7 @@ bool ZLargePages::_os_enforced_transparent_mode;
void ZLargePages::initialize() {
pd_initialize();
const size_t memory = os::physical_memory();
const size_t memory = static_cast<size_t>(os::physical_memory());
log_info_p(gc, init)("Memory: " PROPERFMT, PROPERFMTARGS(memory));
log_info_p(gc, init)("Large Page Support: %s", to_string());
}

View File

@ -423,7 +423,7 @@ JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
// We want the host swap memory, not the container value.
return os::Linux::host_swap();
#else
size_t total_swap_space = 0;
physical_memory_size_type total_swap_space = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::total_swap_space(total_swap_space);
return static_cast<jlong>(total_swap_space);

View File

@ -528,23 +528,23 @@ TRACE_REQUEST_FUNC(ThreadAllocationStatistics) {
* the total memory reported is the amount of memory configured for the guest OS by the hypervisor.
*/
TRACE_REQUEST_FUNC(PhysicalMemory) {
u8 totalPhysicalMemory = static_cast<u8>(os::physical_memory());
physical_memory_size_type totalPhysicalMemory = os::physical_memory();
EventPhysicalMemory event;
event.set_totalSize(totalPhysicalMemory);
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
event.set_usedSize(totalPhysicalMemory - static_cast<u8>(avail_mem));
event.set_usedSize(totalPhysicalMemory - avail_mem);
event.commit();
}
TRACE_REQUEST_FUNC(SwapSpace) {
EventSwapSpace event;
size_t total_swap_space = 0;
physical_memory_size_type total_swap_space = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::total_swap_space(total_swap_space);
event.set_totalSize(static_cast<s8>(total_swap_space));
size_t free_swap_space = 0;
physical_memory_size_type free_swap_space = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::free_swap_space(free_swap_space);
event.set_freeSize(static_cast<s8>(free_swap_space));

View File

@ -42,7 +42,8 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
MemTag mem_tag) {
NativeCallStackStorage::StackIndex sidx = _stack_storage.push(stack);
VMATree::RegionData regiondata(sidx, mem_tag);
VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata);
VMATree::SummaryDiff diff;
file->_tree.commit_mapping(offset, size, regiondata, diff);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);
@ -51,7 +52,8 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
}
void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) {
VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size);
VMATree::SummaryDiff diff;
file->_tree.release_mapping(offset, size, diff);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);

View File

@ -25,12 +25,12 @@
#include "nmt/regionsTree.inline.hpp"
#include "nmt/virtualMemoryTracker.hpp"
VMATree::SummaryDiff RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack) {
return commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), /*use tag inplace*/ true);
void RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack, VMATree::SummaryDiff& diff) {
commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), diff, /*use tag inplace*/ true);
}
VMATree::SummaryDiff RegionsTree::uncommit_region(address addr, size_t size) {
return uncommit_mapping((VMATree::position)addr, size, make_region_data(NativeCallStack::empty_stack(), mtNone));
void RegionsTree::uncommit_region(address addr, size_t size, VMATree::SummaryDiff& diff) {
uncommit_mapping((VMATree::position)addr, size, make_region_data(NativeCallStack::empty_stack(), mtNone), diff);
}
#ifdef ASSERT

View File

@ -47,8 +47,8 @@ class RegionsTree : public VMATree {
VirtualMemoryRegion find_reserved_region(address addr);
SummaryDiff commit_region(address addr, size_t size, const NativeCallStack& stack);
SummaryDiff uncommit_region(address addr, size_t size);
void commit_region(address addr, size_t size, const NativeCallStack& stack, SummaryDiff& diff);
void uncommit_region(address addr, size_t size, SummaryDiff& diff);
using Node = VMATree::TNode;

View File

@ -69,7 +69,8 @@ void VirtualMemoryTracker::Instance::add_reserved_region(address base_addr, size
void VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MemTag mem_tag) {
VMATree::SummaryDiff diff = tree()->reserve_mapping((size_t)base_addr, size, tree()->make_region_data(stack, mem_tag));
VMATree::SummaryDiff diff;
tree()->reserve_mapping((size_t)base_addr, size, tree()->make_region_data(stack, mem_tag), diff);
apply_summary_diff(diff);
}
@ -148,7 +149,8 @@ void VirtualMemoryTracker::Instance::add_committed_region(address addr, size_t s
void VirtualMemoryTracker::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
VMATree::SummaryDiff diff = tree()->commit_region(addr, size, stack);
VMATree::SummaryDiff diff;
tree()->commit_region(addr, size, stack, diff);
apply_summary_diff(diff);
}
@ -159,7 +161,8 @@ void VirtualMemoryTracker::Instance::remove_uncommitted_region(address addr, siz
void VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
MemTracker::assert_locked();
VMATree::SummaryDiff diff = tree()->uncommit_region(addr, size);
VMATree::SummaryDiff diff;
tree()->uncommit_region(addr, size, diff);
apply_summary_diff(diff);
}
@ -169,7 +172,8 @@ void VirtualMemoryTracker::Instance::remove_released_region(address addr, size_t
}
void VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
VMATree::SummaryDiff diff = tree()->release_mapping((VMATree::position)addr, size);
VMATree::SummaryDiff diff;
tree()->release_mapping((VMATree::position)addr, size, diff);
apply_summary_diff(diff);
}

View File

@ -242,14 +242,14 @@ void VMATree::update_region(TNode* n1, TNode* n2, const RequestInfo& req, Summar
}
VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateType state,
const RegionData& metadata, bool use_tag_inplace) {
void VMATree::register_mapping(position _A, position _B, StateType state,
const RegionData& metadata, VMATree::SummaryDiff& diff, bool use_tag_inplace) {
diff.clear();
if (_A == _B) {
return SummaryDiff();
return;
}
assert(_A < _B, "should be");
SummaryDiff diff;
RequestInfo req{_A, _B, state, metadata.mem_tag, metadata.stack_idx, use_tag_inplace};
IntervalChange stA{
IntervalState{StateType::Released, empty_regiondata},
@ -644,8 +644,6 @@ VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateTy
while(to_be_removed.length() != 0) {
_tree.remove(to_be_removed.pop());
}
return diff;
}
#ifdef ASSERT
@ -702,7 +700,8 @@ VMATree::SummaryDiff VMATree::set_tag(const position start, const size size, con
// Ignore any released ranges, these must be mtNone and have no stack
if (type != StateType::Released) {
RegionData new_data = RegionData(out.reserved_stack(), tag);
SummaryDiff result = register_mapping(from, end, type, new_data);
SummaryDiff result;
register_mapping(from, end, type, new_data, result);
diff.add(result);
}
@ -723,7 +722,8 @@ VMATree::SummaryDiff VMATree::set_tag(const position start, const size size, con
if (type != StateType::Released) {
RegionData new_data = RegionData(out.reserved_stack(), tag);
SummaryDiff result = register_mapping(from, end, type, new_data);
SummaryDiff result;
register_mapping(from, end, type, new_data, result);
diff.add(result);
}
remsize = remsize - (end - from);

View File

@ -241,6 +241,9 @@ public:
struct SummaryDiff {
SingleDiff tag[mt_number_of_tags];
SummaryDiff() {
clear();
}
void clear() {
for (int i = 0; i < mt_number_of_tags; i++) {
tag[i] = SingleDiff{0, 0};
}
@ -283,7 +286,7 @@ public:
};
private:
SummaryDiff register_mapping(position A, position B, StateType state, const RegionData& metadata, bool use_tag_inplace = false);
void register_mapping(position A, position B, StateType state, const RegionData& metadata, SummaryDiff& diff, bool use_tag_inplace = false);
StateType get_new_state(const StateType existinting_state, const RequestInfo& req) const;
MemTag get_new_tag(const MemTag existinting_tag, const RequestInfo& req) const;
SIndex get_new_reserve_callstack(const SIndex existinting_stack, const StateType ex, const RequestInfo& req) const;
@ -298,12 +301,12 @@ public:
}
public:
SummaryDiff reserve_mapping(position from, size size, const RegionData& metadata) {
return register_mapping(from, from + size, StateType::Reserved, metadata, false);
void reserve_mapping(position from, size size, const RegionData& metadata, SummaryDiff& diff ) {
register_mapping(from, from + size, StateType::Reserved, metadata, diff, false);
}
SummaryDiff commit_mapping(position from, size size, const RegionData& metadata, bool use_tag_inplace = false) {
return register_mapping(from, from + size, StateType::Committed, metadata, use_tag_inplace);
void commit_mapping(position from, size size, const RegionData& metadata, SummaryDiff& diff, bool use_tag_inplace = false) {
register_mapping(from, from + size, StateType::Committed, metadata, diff, use_tag_inplace);
}
// Given an interval and a tag, find all reserved and committed ranges at least
@ -312,12 +315,12 @@ public:
// Released regions are ignored.
SummaryDiff set_tag(position from, size size, MemTag tag);
SummaryDiff uncommit_mapping(position from, size size, const RegionData& metadata) {
return register_mapping(from, from + size, StateType::Reserved, metadata, true);
void uncommit_mapping(position from, size size, const RegionData& metadata, SummaryDiff& diff) {
register_mapping(from, from + size, StateType::Reserved, metadata, diff, true);
}
SummaryDiff release_mapping(position from, position sz) {
return register_mapping(from, from + sz, StateType::Released, VMATree::empty_regiondata);
void release_mapping(position from, position sz, SummaryDiff& diff) {
register_mapping(from, from + sz, StateType::Released, VMATree::empty_regiondata, diff);
}
public:

View File

@ -614,8 +614,7 @@ GrowableArray<Klass*>* Klass::compute_secondary_supers(int num_extra_slots,
// subklass links. Used by the compiler (and vtable initialization)
// May be cleaned concurrently, so must use the Compile_lock.
// The log parameter is for clean_weak_klass_links to report unlinked classes.
Klass* Klass::subklass(bool log) const {
Klass* Klass::subklass() const {
// Need load_acquire on the _subklass, because it races with inserts that
// publishes freshly initialized data.
for (Klass* chain = AtomicAccess::load_acquire(&_subklass);
@ -626,11 +625,6 @@ Klass* Klass::subklass(bool log) const {
{
if (chain->is_loader_alive()) {
return chain;
} else if (log) {
if (log_is_enabled(Trace, class, unload)) {
ResourceMark rm;
log_trace(class, unload)("unlinking class (subclass): %s", chain->external_name());
}
}
}
return nullptr;
@ -701,15 +695,20 @@ void Klass::append_to_sibling_list() {
DEBUG_ONLY(verify();)
}
void Klass::clean_subklass() {
// The log parameter is for clean_weak_klass_links to report unlinked classes.
Klass* Klass::clean_subklass(bool log) {
for (;;) {
// Need load_acquire, due to contending with concurrent inserts
Klass* subklass = AtomicAccess::load_acquire(&_subklass);
if (subklass == nullptr || subklass->is_loader_alive()) {
return;
return subklass;
}
if (log && log_is_enabled(Trace, class, unload)) {
ResourceMark rm;
log_trace(class, unload)("unlinking class (subclass): %s", subklass->external_name());
}
// Try to fix _subklass until it points at something not dead.
AtomicAccess::cmpxchg(&_subklass, subklass, subklass->next_sibling());
AtomicAccess::cmpxchg(&_subklass, subklass, subklass->next_sibling(log));
}
}
@ -728,8 +727,7 @@ void Klass::clean_weak_klass_links(bool unloading_occurred, bool clean_alive_kla
assert(current->is_loader_alive(), "just checking, this should be live");
// Find and set the first alive subklass
Klass* sub = current->subklass(true);
current->clean_subklass();
Klass* sub = current->clean_subklass(true);
if (sub != nullptr) {
stack.push(sub);
}

View File

@ -300,7 +300,7 @@ protected:
// Use InstanceKlass::contains_field_offset to classify field offsets.
// sub/superklass links
Klass* subklass(bool log = false) const;
Klass* subklass() const;
Klass* next_sibling(bool log = false) const;
void append_to_sibling_list(); // add newly created receiver to superklass' subklass list
@ -413,9 +413,9 @@ protected:
virtual ModuleEntry* module() const = 0;
virtual PackageEntry* package() const = 0;
void set_next_sibling(Klass* s);
protected: // internal accessors
void set_subklass(Klass* s);
void set_next_sibling(Klass* s);
private:
static uint8_t compute_hash_slot(Symbol* s);
@ -743,7 +743,7 @@ public:
inline bool is_loader_alive() const;
inline bool is_loader_present_and_alive() const;
void clean_subklass();
Klass* clean_subklass(bool log = false);
// Clean out unnecessary weak klass links from the whole klass hierarchy.
static void clean_weak_klass_links(bool unloading_occurred, bool clean_alive_klasses = true);

View File

@ -1206,6 +1206,11 @@ static const Type* mod_value(const PhaseGVN* phase, const Node* in1, const Node*
if (t1 == Type::TOP) { return Type::TOP; }
if (t2 == Type::TOP) { return Type::TOP; }
// Mod by zero? Throw exception at runtime!
if (t2 == TypeInteger::zero(bt)) {
return Type::TOP;
}
// We always generate the dynamic check for 0.
// 0 MOD X is 0
if (t1 == TypeInteger::zero(bt)) { return t1; }
@ -1215,11 +1220,6 @@ static const Type* mod_value(const PhaseGVN* phase, const Node* in1, const Node*
return TypeInteger::zero(bt);
}
// Mod by zero? Throw exception at runtime!
if (t2 == TypeInteger::zero(bt)) {
return Type::TOP;
}
const TypeInteger* i1 = t1->is_integer(bt);
const TypeInteger* i2 = t2->is_integer(bt);
if (i1->is_con() && i2->is_con()) {

View File

@ -1358,11 +1358,11 @@ jvmtiError VM_RedefineClasses::load_new_class_versions() {
// constant pools
HandleMark hm(current);
InstanceKlass* the_class = get_ik(_class_defs[i].klass);
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
log_debug(redefine, class, load)
("loading name=%s kind=%d (avail_mem=%zuK)",
("loading name=%s kind=%d (avail_mem=" PHYS_MEM_TYPE_FORMAT "K)",
the_class->external_name(), _class_load_kind, avail_mem >> 10);
ClassFileStream st((u1*)_class_defs[i].class_bytes,
@ -1530,7 +1530,7 @@ jvmtiError VM_RedefineClasses::load_new_class_versions() {
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
log_debug(redefine, class, load)
("loaded name=%s (avail_mem=%zuK)", the_class->external_name(), avail_mem >> 10);
("loaded name=%s (avail_mem=" PHYS_MEM_TYPE_FORMAT "K)", the_class->external_name(), avail_mem >> 10);
}
return JVMTI_ERROR_NONE;
@ -4438,11 +4438,11 @@ void VM_RedefineClasses::redefine_single_class(Thread* current, jclass the_jclas
ResourceMark rm(current);
// increment the classRedefinedCount field in the_class and in any
// direct and indirect subclasses of the_class
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
log_info(redefine, class, load)
("redefined name=%s, count=%d (avail_mem=%zuK)",
("redefined name=%s, count=%d (avail_mem=" PHYS_MEM_TYPE_FORMAT "K)",
the_class->external_name(), java_lang_Class::classRedefinedCount(the_class->java_mirror()), avail_mem >> 10);
Events::log_redefinition(current, "redefined class name=%s, count=%d",
the_class->external_name(),

View File

@ -2510,7 +2510,7 @@ WB_END
// Available memory of the host machine (container-aware)
WB_ENTRY(jlong, WB_HostAvailableMemory(JNIEnv* env, jobject o))
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
return static_cast<jlong>(avail_mem);

View File

@ -1649,7 +1649,7 @@ jint Arguments::set_aggressive_heap_flags() {
// Thus, we need to make sure we're using a julong for intermediate
// calculations.
julong initHeapSize;
size_t phys_mem = os::physical_memory();
physical_memory_size_type phys_mem = os::physical_memory();
julong total_memory = static_cast<julong>(phys_mem);
if (total_memory < (julong) 256 * M) {

View File

@ -1184,13 +1184,13 @@ void os::print_summary_info(outputStream* st, char* buf, size_t buflen) {
#endif // PRODUCT
get_summary_cpu_info(buf, buflen);
st->print("%s, ", buf);
size_t phys_mem = physical_memory();
size_t mem = phys_mem/G;
physical_memory_size_type phys_mem = physical_memory();
physical_memory_size_type mem = phys_mem/G;
if (mem == 0) { // for low memory systems
mem = phys_mem/M;
st->print("%d cores, %zuM, ", processor_count(), mem);
st->print("%d cores, " PHYS_MEM_TYPE_FORMAT "M, ", processor_count(), mem);
} else {
st->print("%d cores, %zuG, ", processor_count(), mem);
st->print("%d cores, " PHYS_MEM_TYPE_FORMAT "G, ", processor_count(), mem);
}
get_summary_os_info(buf, buflen);
st->print_raw(buf);
@ -1935,17 +1935,17 @@ bool os::is_server_class_machine() {
return true;
}
// Then actually look at the machine
bool result = false;
const unsigned int server_processors = 2;
const julong server_memory = 2UL * G;
bool result = false;
const unsigned int server_processors = 2;
const physical_memory_size_type server_memory = 2UL * G;
// We seem not to get our full complement of memory.
// We allow some part (1/8?) of the memory to be "missing",
// based on the sizes of DIMMs, and maybe graphics cards.
const julong missing_memory = 256UL * M;
size_t phys_mem = os::physical_memory();
const physical_memory_size_type missing_memory = 256UL * M;
physical_memory_size_type phys_mem = os::physical_memory();
/* Is this a server class machine? */
if ((os::active_processor_count() >= (int)server_processors) &&
(phys_mem >= (server_memory - missing_memory))) {
(phys_mem >= server_memory - missing_memory)) {
const unsigned int logical_processors =
VM_Version::logical_processors_per_package();
if (logical_processors > 1) {
@ -2204,22 +2204,22 @@ static void assert_nonempty_range(const char* addr, size_t bytes) {
p2i(addr), p2i(addr) + bytes);
}
bool os::used_memory(size_t& value) {
bool os::used_memory(physical_memory_size_type& value) {
#ifdef LINUX
if (OSContainer::is_containerized()) {
jlong mem_usage = OSContainer::memory_usage_in_bytes();
if (mem_usage > 0) {
value = static_cast<size_t>(mem_usage);
value = static_cast<physical_memory_size_type>(mem_usage);
return true;
} else {
return false;
}
}
#endif
size_t avail_mem = 0;
physical_memory_size_type avail_mem = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::available_memory(avail_mem);
size_t phys_mem = os::physical_memory();
physical_memory_size_type phys_mem = os::physical_memory();
value = phys_mem - avail_mem;
return true;
}

View File

@ -332,14 +332,14 @@ class os: AllStatic {
// For example, on Linux, "available" memory (`MemAvailable` in `/proc/meminfo`) is greater
// than "free" memory (`MemFree` in `/proc/meminfo`) because Linux can free memory
// aggressively (e.g. clear caches) so that it becomes available.
[[nodiscard]] static bool available_memory(size_t& value);
[[nodiscard]] static bool used_memory(size_t& value);
[[nodiscard]] static bool free_memory(size_t& value);
[[nodiscard]] static bool available_memory(physical_memory_size_type& value);
[[nodiscard]] static bool used_memory(physical_memory_size_type& value);
[[nodiscard]] static bool free_memory(physical_memory_size_type& value);
[[nodiscard]] static bool total_swap_space(size_t& value);
[[nodiscard]] static bool free_swap_space(size_t& value);
[[nodiscard]] static bool total_swap_space(physical_memory_size_type& value);
[[nodiscard]] static bool free_swap_space(physical_memory_size_type& value);
static size_t physical_memory();
static physical_memory_size_type physical_memory();
static bool is_server_class_machine();
static size_t rss();

View File

@ -2610,7 +2610,7 @@ int HeapDumper::dump(const char* path, outputStream* out, int compression, bool
// (DumpWriter buffer, DumperClassCacheTable, GZipCompressor buffers).
// For the OOM handling we may already be limited in memory.
// Lets ensure we have at least 20MB per thread.
size_t free_memory = 0;
physical_memory_size_type free_memory = 0;
// Return value ignored - defaulting to 0 on failure.
(void)os::free_memory(free_memory);
julong max_threads = free_memory / (20 * M);

View File

@ -135,6 +135,7 @@ class oopDesc;
#define UINT64_FORMAT_X_0 "0x%016" PRIx64
#define UINT64_FORMAT_W(width) "%" #width PRIu64
#define UINT64_FORMAT_0 "%016" PRIx64
#define PHYS_MEM_TYPE_FORMAT "%" PRIu64
// Format jlong, if necessary
#ifndef JLONG_FORMAT
@ -417,6 +418,11 @@ const uintx max_uintx = (uintx)-1;
typedef unsigned int uint; NEEDS_CLEANUP
// This typedef is to address the issue of running a 32-bit VM. In this case the amount
// of physical memory may not fit in size_t, so we have to have a larger type. Once 32-bit
// is deprecated, one can use size_t.
typedef uint64_t physical_memory_size_type;
//----------------------------------------------------------------------------------------------------
// Java type definitions

View File

@ -84,12 +84,11 @@ import jdk.internal.vm.annotation.AOTSafeClassInitializer;
import jdk.internal.vm.annotation.IntrinsicCandidate;
import jdk.internal.vm.annotation.Stable;
import sun.invoke.util.BytecodeDescriptor;
import sun.invoke.util.Wrapper;
import sun.reflect.generics.factory.CoreReflectionFactory;
import sun.reflect.generics.factory.GenericsFactory;
import sun.reflect.generics.repository.ClassRepository;
import sun.reflect.generics.repository.MethodRepository;
import sun.reflect.generics.repository.ConstructorRepository;
import sun.reflect.generics.scope.ClassScope;
import sun.reflect.annotation.*;
@ -1447,17 +1446,10 @@ public final class Class<T> implements java.io.Serializable,
if (!enclosingInfo.isMethod())
return null;
MethodRepository typeInfo = MethodRepository.make(enclosingInfo.getDescriptor(),
getFactory());
Class<?> returnType = toClass(typeInfo.getReturnType());
Type [] parameterTypes = typeInfo.getParameterTypes();
Class<?>[] parameterClasses = new Class<?>[parameterTypes.length];
// Convert Types to Classes; returned types *should*
// be class objects since the methodDescriptor's used
// don't have generics information
for(int i = 0; i < parameterClasses.length; i++)
parameterClasses[i] = toClass(parameterTypes[i]);
// Descriptor already validated by VM
List<Class<?>> types = BytecodeDescriptor.parseMethod(enclosingInfo.getDescriptor(), getClassLoader());
Class<?> returnType = types.removeLast();
Class<?>[] parameterClasses = types.toArray(EMPTY_CLASS_ARRAY);
final Class<?> enclosingCandidate = enclosingInfo.getEnclosingClass();
Method[] candidates = enclosingCandidate.privateGetDeclaredMethods(false);
@ -1576,17 +1568,10 @@ public final class Class<T> implements java.io.Serializable,
if (!enclosingInfo.isConstructor())
return null;
ConstructorRepository typeInfo = ConstructorRepository.make(enclosingInfo.getDescriptor(),
getFactory());
Type [] parameterTypes = typeInfo.getParameterTypes();
Class<?>[] parameterClasses = new Class<?>[parameterTypes.length];
// Convert Types to Classes; returned types *should*
// be class objects since the methodDescriptor's used
// don't have generics information
for (int i = 0; i < parameterClasses.length; i++)
parameterClasses[i] = toClass(parameterTypes[i]);
// Descriptor already validated by VM
List<Class<?>> types = BytecodeDescriptor.parseMethod(enclosingInfo.getDescriptor(), getClassLoader());
types.removeLast();
Class<?>[] parameterClasses = types.toArray(EMPTY_CLASS_ARRAY);
final Class<?> enclosingCandidate = enclosingInfo.getEnclosingClass();
Constructor<?>[] candidates = enclosingCandidate
@ -1892,7 +1877,7 @@ public final class Class<T> implements java.io.Serializable,
}
currentClass = currentClass.getSuperclass();
}
return list.toArray(new Class<?>[0]);
return list.toArray(EMPTY_CLASS_ARRAY);
}

View File

@ -37,12 +37,33 @@ public class BytecodeDescriptor {
private BytecodeDescriptor() { } // cannot instantiate
/**
* @param loader the class loader in which to look up the types (null means
* bootstrap class loader)
*/
public static List<Class<?>> parseMethod(String bytecodeSignature, ClassLoader loader) {
return parseMethod(bytecodeSignature, 0, bytecodeSignature.length(), loader);
/// Parses and validates a field descriptor string in the {@code loader} context.
///
/// @param descriptor a field descriptor string
/// @param loader the class loader in which to look up the types (null means
/// bootstrap class loader)
/// @throws IllegalArgumentException if the descriptor is invalid
/// @throws TypeNotPresentException if the descriptor is valid, but
/// the class cannot be found by the loader
public static Class<?> parseClass(String descriptor, ClassLoader loader) {
int[] i = {0};
var ret = parseSig(descriptor, i, descriptor.length(), loader);
if (i[0] != descriptor.length() || ret == null) {
parseError("not a class descriptor", descriptor);
}
return ret;
}
/// Parses and validates a method descriptor string in the {@code loader} context.
///
/// @param descriptor a method descriptor string
/// @param loader the class loader in which to look up the types (null means
/// bootstrap class loader)
/// @throws IllegalArgumentException if the descriptor is invalid
/// @throws TypeNotPresentException if a reference type cannot be found by
/// the loader (before the descriptor is found invalid)
public static List<Class<?>> parseMethod(String descriptor, ClassLoader loader) {
return parseMethod(descriptor, 0, descriptor.length(), loader);
}
/**
@ -77,10 +98,19 @@ public class BytecodeDescriptor {
throw new IllegalArgumentException("bad signature: "+str+": "+msg);
}
/**
* @param loader the class loader in which to look up the types (null means
* bootstrap class loader)
*/
/// Parse a single type in a descriptor. Results can be:
///
/// - A `Class` for successful parsing
/// - `null` for malformed descriptor format
/// - Throwing a [TypeNotPresentException] for valid class name,
/// but class cannot be found
///
/// @param str contains the string to parse
/// @param i cursor for the next token in the string, modified in-place
/// @param end the limit for parsing
/// @param loader the class loader in which to look up the types (null means
/// bootstrap class loader)
///
private static Class<?> parseSig(String str, int[] i, int end, ClassLoader loader) {
if (i[0] == end) return null;
char c = str.charAt(i[0]++);
@ -107,7 +137,14 @@ public class BytecodeDescriptor {
}
return t;
} else {
return Wrapper.forBasicType(c).primitiveType();
Wrapper w;
try {
w = Wrapper.forBasicType(c);
} catch (IllegalArgumentException ex) {
// Our reporting has better error message
return null;
}
return w.primitiveType();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,12 +34,7 @@ import java.util.function.Supplier;
import jdk.internal.reflect.ConstantPool;
import sun.reflect.generics.parser.SignatureParser;
import sun.reflect.generics.tree.TypeSignature;
import sun.reflect.generics.factory.GenericsFactory;
import sun.reflect.generics.factory.CoreReflectionFactory;
import sun.reflect.generics.visitor.Reifier;
import sun.reflect.generics.scope.ClassScope;
import sun.invoke.util.BytecodeDescriptor;
/**
* Parser for Java programming language annotations. Translates
@ -429,19 +424,11 @@ public class AnnotationParser {
}
private static Class<?> parseSig(String sig, Class<?> container) {
if (sig.equals("V")) return void.class;
SignatureParser parser = SignatureParser.make();
TypeSignature typeSig = parser.parseTypeSig(sig);
GenericsFactory factory = CoreReflectionFactory.make(container, ClassScope.make(container));
Reifier reify = Reifier.make(factory);
typeSig.accept(reify);
Type result = reify.getResult();
return toClass(result);
}
static Class<?> toClass(Type o) {
if (o instanceof GenericArrayType gat)
return toClass(gat.getGenericComponentType()).arrayType();
return (Class<?>) o;
try {
return BytecodeDescriptor.parseClass(sig, container.getClassLoader());
} catch (IllegalArgumentException ex) {
throw new GenericSignatureFormatError(ex.getMessage());
}
}
/**

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@
/* We should also include jdk_util.h here, for the prototype of JDK_Canonicalize.
This isn't possible though because canonicalize_md.c is as well used in
different contexts within Oracle.
*/
*/
#include "io_util_md.h"
/* Copy bytes to dst, not going past dend; return dst + number of bytes copied,
@ -139,7 +139,8 @@ lastErrorReportable()
|| (errval == ERROR_ACCESS_DENIED)
|| (errval == ERROR_NETWORK_UNREACHABLE)
|| (errval == ERROR_NETWORK_ACCESS_DENIED)
|| (errval == ERROR_NO_MORE_FILES)) {
|| (errval == ERROR_NO_MORE_FILES)
|| (errval == ERROR_NETNAME_DELETED)) {
return 0;
}
return 1;
@ -183,7 +184,7 @@ wcanonicalize(WCHAR *orig_path, WCHAR *result, int size)
/* Copy prefix, assuming path is absolute */
c = src[0];
if (((c <= L'z' && c >= L'a') || (c <= L'Z' && c >= L'A'))
&& (src[1] == L':') && (src[2] == L'\\')) {
&& (src[1] == L':') && (src[2] == L'\\')) {
/* Drive specifier */
*src = towupper(*src); /* Canonicalize drive letter */
if (!(dst = wcp(dst, dend, L'\0', src, src + 2))) {
@ -244,9 +245,9 @@ wcanonicalize(WCHAR *orig_path, WCHAR *result, int size)
continue;
} else {
if (!lastErrorReportable()) {
if (!(dst = wcp(dst, dend, L'\0', src, src + wcslen(src)))){
goto err;
}
if (!(dst = wcp(dst, dend, L'\0', src, src + wcslen(src)))){
goto err;
}
break;
} else {
goto err;
@ -255,7 +256,7 @@ wcanonicalize(WCHAR *orig_path, WCHAR *result, int size)
}
if (dst >= dend) {
errno = ENAMETOOLONG;
errno = ENAMETOOLONG;
goto err;
}
*dst = L'\0';
@ -366,7 +367,7 @@ JDK_Canonicalize(const char *orig, char *out, int len) {
// Change return value to success.
ret = 0;
finish:
finish:
free(wresult);
free(wpath);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -457,15 +457,17 @@ static inline void doDrawGlyphsPipe_fillGlyphAndAdvanceBuffers
}
}
if (positions != NULL) {
CGAffineTransform invTx = CGAffineTransformInvert(strike->fFontTx);
CGPoint prev;
prev.x = positions[0];
prev.y = positions[1];
prev = CGPointApplyAffineTransform(prev, invTx);
// <rdar://problem/4294061> take the first point, and move the context to that location
CGContextTranslateCTM(qsdo->cgRef, prev.x, prev.y);
CGAffineTransform invTx = CGAffineTransformInvert(strike->fFontTx);
// for each position, figure out the advance (since CG won't take positions directly)
size_t i;
for (i = 0; i < length - 1; i++)
@ -476,7 +478,7 @@ static inline void doDrawGlyphsPipe_fillGlyphAndAdvanceBuffers
pt.y = positions[i2+1];
pt = CGPointApplyAffineTransform(pt, invTx);
advances[i].width = pt.x - prev.x;
advances[i].height = -(pt.y - prev.y); // negative to translate to device space
advances[i].height = pt.y - prev.y;
prev.x = pt.x;
prev.y = pt.y;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,8 +73,8 @@ public class OutlineTextRenderer implements TextPipe {
public void drawString(SunGraphics2D g2d, String str, double x, double y) {
if ("".equals(str)) {
return; // TextLayout constructor throws IAE on "".
if (str.length() == 0) {
return;
}
TextLayout tl = new TextLayout(str, g2d.getFont(),
g2d.getFontRenderContext());

View File

@ -222,6 +222,16 @@ public final class Float16
*/
public static final int BYTES = SIZE / Byte.SIZE;
/**
* The overflow threshold (for round to nearest) is MAX_VALUE + 1/2 ulp.
*/
private static final double OVERFLOW_THRESH = 0x1.ffcp15 + 0x0.002p15;
/**
* The underflow threshold (for round to nearest) is MIN_VALUE * 0.5.
*/
private static final double UNDERFLOW_THRESH = 0x1.0p-24d * 0.5d;
/**
* Returns a string representation of the {@code Float16}
* argument.
@ -340,51 +350,51 @@ public final class Float16
* @param d a {@code double}
*/
public static Float16 valueOf(double d) {
long doppel = Double.doubleToRawLongBits(d);
short sign_bit = (short)((doppel & 0x8000_0000_0000_0000L) >> 48);
if (Double.isNaN(d)) {
// Have existing float code handle any attempts to
// preserve NaN bits.
return valueOf((float)d);
}
long doppel = Double.doubleToRawLongBits(d);
short sign_bit = (short)((doppel & 0x8000_0000_0000_0000L) >> (64 - 16));
double abs_d = Math.abs(d);
// The overflow threshold is binary16 MAX_VALUE + 1/2 ulp
if (abs_d >= (0x1.ffcp15 + 0x0.002p15) ) {
if (abs_d >= OVERFLOW_THRESH) {
// correctly signed infinity
return new Float16((short)(sign_bit | 0x7c00));
}
// Smallest magnitude nonzero representable binary16 value
// is equal to 0x1.0p-24; half-way and smaller rounds to zero.
if (abs_d <= 0x1.0p-24d * 0.5d) { // Covers double zeros and subnormals.
return new Float16(sign_bit); // Positive or negative zero
if (abs_d <= UNDERFLOW_THRESH) { // Covers double zeros and subnormals.
// positive or negative zero
return new Float16(sign_bit);
}
// Dealing with finite values in exponent range of binary16
// (when rounding is done, could still round up)
int exp = Math.getExponent(d);
assert -25 <= exp && exp <= 15;
assert
(MIN_EXPONENT - PRECISION) <= exp &&
exp <= MAX_EXPONENT;
// For binary16 subnormals, beside forcing exp to -15, retain
// the difference expdelta = E_min - exp. This is the excess
// shift value, in addition to 42, to be used in the
// For target format subnormals, beside forcing exp to
// MIN_EXPONENT-1, retain the difference expdelta = E_min -
// exp. This is the excess shift value, in addition to the
// difference in precision bits, to be used in the
// computations below. Further the (hidden) msb with value 1
// in d must be involved as well.
int expdelta = 0;
long msb = 0x0000_0000_0000_0000L;
if (exp < -14) {
expdelta = -14 - exp; // FIXME?
exp = -15;
msb = 0x0010_0000_0000_0000L; // should be 0x0020_... ?
if (exp < MIN_EXPONENT) {
expdelta = MIN_EXPONENT - exp;
exp = MIN_EXPONENT - 1;
msb = 0x0010_0000_0000_0000L;
}
long f_signif_bits = doppel & 0x000f_ffff_ffff_ffffL | msb;
int PRECISION_DIFF = Double.PRECISION - PRECISION; // 42
// Significand bits as if using rounding to zero (truncation).
short signif_bits = (short)(f_signif_bits >> (42 + expdelta));
short signif_bits = (short)(f_signif_bits >> (PRECISION_DIFF + expdelta));
// For round to nearest even, determining whether or not to
// round up (in magnitude) is a function of the least
@ -399,9 +409,9 @@ public final class Float16
// 1 1 1
// See "Computer Arithmetic Algorithms," Koren, Table 4.9
long lsb = f_signif_bits & (1L << 42 + expdelta);
long round = f_signif_bits & (1L << 41 + expdelta);
long sticky = f_signif_bits & ((1L << 41 + expdelta) - 1);
long lsb = f_signif_bits & (1L << (PRECISION_DIFF + expdelta));
long round = f_signif_bits & (1L << (PRECISION_DIFF - 1) + expdelta);
long sticky = f_signif_bits & ((1L << (PRECISION_DIFF - 1) + expdelta) - 1);
if (round != 0 && ((lsb | sticky) != 0 )) {
signif_bits++;
@ -412,7 +422,9 @@ public final class Float16
// to implement a carry out from rounding the significand.
assert (0xf800 & signif_bits) == 0x0;
return new Float16((short)(sign_bit | ( ((exp + 15) << 10) + signif_bits ) ));
// Exponent bias adjust in the representation is equal to MAX_EXPONENT.
return new Float16((short)(sign_bit |
( ((exp + MAX_EXPONENT) << (PRECISION - 1)) + signif_bits ) ));
}
/**

View File

@ -41,46 +41,69 @@ TEST_VM_F(NMTRegionsTreeTest, ReserveCommitTwice) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
VMATree::RegionData rd2 = rt.make_region_data(ncs, mtGC);
VMATree::SummaryDiff diff;
diff = rt.reserve_mapping(0, 100, rd);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
diff = rt.commit_region(0, 50, ncs);
diff = rt.reserve_mapping(0, 100, rd);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.reserve_mapping(0, 100, rd2);
EXPECT_EQ(-100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtGC)].reserve);
diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtGC)].reserve);
EXPECT_EQ(50, diff.tag[NMTUtil::tag_to_index(mtGC)].commit);
diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
{
VMATree::SummaryDiff diff;
rt.reserve_mapping(0, 100, rd, diff);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
}
{
VMATree::SummaryDiff diff, not_used;
rt.commit_region(0, 50, ncs, not_used);
rt.reserve_mapping(0, 100, rd, diff);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
{
VMATree::SummaryDiff diff;
rt.reserve_mapping(0, 100, rd2, diff);
EXPECT_EQ(-100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtGC)].reserve);
}
{
VMATree::SummaryDiff diff1, diff2;
rt.commit_region(0, 50, ncs, diff1);
EXPECT_EQ(0, diff1.tag[NMTUtil::tag_to_index(mtGC)].reserve);
EXPECT_EQ(50, diff1.tag[NMTUtil::tag_to_index(mtGC)].commit);
rt.commit_region(0, 50, ncs, diff2);
EXPECT_EQ(0, diff2.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(0, diff2.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
}
TEST_VM_F(NMTRegionsTreeTest, CommitUncommitRegion) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(0, 100, rd);
VMATree::SummaryDiff diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.commit_region((address)60, 10, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(10, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.uncommit_region(0, 50);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
VMATree::SummaryDiff not_used;
rt.reserve_mapping(0, 100, rd, not_used);
{
VMATree::SummaryDiff diff;
rt.commit_region(0, 50, ncs, diff);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
{
VMATree::SummaryDiff diff;
rt.commit_region((address)60, 10, ncs, diff);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(10, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
{
VMATree::SummaryDiff diff;
rt.uncommit_region(0, 50, diff);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
}
TEST_VM_F(NMTRegionsTreeTest, FindReservedRegion) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
VMATree::SummaryDiff not_used;
rt.reserve_mapping(1000, 50, rd, not_used);
rt.reserve_mapping(1200, 50, rd, not_used);
rt.reserve_mapping(1300, 50, rd, not_used);
rt.reserve_mapping(1400, 50, rd, not_used);
VirtualMemoryRegion rmr;
rmr = rt.find_reserved_region((address)1205);
EXPECT_EQ(rmr.base(), (address)1200);
@ -95,10 +118,11 @@ TEST_VM_F(NMTRegionsTreeTest, FindReservedRegion) {
TEST_VM_F(NMTRegionsTreeTest, VisitReservedRegions) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
VMATree::SummaryDiff not_used;
rt.reserve_mapping(1000, 50, rd, not_used);
rt.reserve_mapping(1200, 50, rd, not_used);
rt.reserve_mapping(1300, 50, rd, not_used);
rt.reserve_mapping(1400, 50, rd, not_used);
rt.visit_reserved_regions([&](const VirtualMemoryRegion& rgn) {
EXPECT_EQ(((size_t)rgn.base()) % 100, 0UL);
@ -110,15 +134,16 @@ TEST_VM_F(NMTRegionsTreeTest, VisitReservedRegions) {
TEST_VM_F(NMTRegionsTreeTest, VisitCommittedRegions) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
VMATree::SummaryDiff not_used;
rt.reserve_mapping(1000, 50, rd, not_used);
rt.reserve_mapping(1200, 50, rd, not_used);
rt.reserve_mapping(1300, 50, rd, not_used);
rt.reserve_mapping(1400, 50, rd, not_used);
rt.commit_region((address)1010, 5UL, ncs);
rt.commit_region((address)1020, 5UL, ncs);
rt.commit_region((address)1030, 5UL, ncs);
rt.commit_region((address)1040, 5UL, ncs);
rt.commit_region((address)1010, 5UL, ncs, not_used);
rt.commit_region((address)1020, 5UL, ncs, not_used);
rt.commit_region((address)1030, 5UL, ncs, not_used);
rt.commit_region((address)1040, 5UL, ncs, not_used);
VirtualMemoryRegion rmr((address)1000, 50);
size_t count = 0;
rt.visit_committed_regions(rmr, [&](VirtualMemoryRegion& crgn) {

View File

@ -92,14 +92,15 @@ public:
// Adjacent reservations are merged if the properties match.
void adjacent_2_nodes(const VMATree::RegionData& rd) {
Tree tree;
VMATree::SummaryDiff not_used;
for (int i = 0; i < 10; i++) {
tree.reserve_mapping(i * 100, 100, rd);
tree.reserve_mapping(i * 100, 100, rd, not_used);
}
EXPECT_EQ(2, count_nodes(tree));
// Reserving the exact same space again should result in still having only 2 nodes
for (int i = 0; i < 10; i++) {
tree.reserve_mapping(i * 100, 100, rd);
tree.reserve_mapping(i * 100, 100, rd, not_used);
}
EXPECT_EQ(2, count_nodes(tree));
@ -111,7 +112,7 @@ public:
// ...
// 0--100
for (int i = 9; i >= 0; i--) {
tree2.reserve_mapping(i * 100, 100, rd);
tree2.reserve_mapping(i * 100, 100, rd, not_used);
}
EXPECT_EQ(2, count_nodes(tree2));
}
@ -119,16 +120,17 @@ public:
// After removing all ranges we should be left with an entirely empty tree
void remove_all_leaves_empty_tree(const VMATree::RegionData& rd) {
Tree tree;
tree.reserve_mapping(0, 100 * 10, rd);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 100 * 10, rd, not_used);
for (int i = 0; i < 10; i++) {
tree.release_mapping(i * 100, 100);
tree.release_mapping(i * 100, 100, not_used);
}
EXPECT_EQ(nullptr, rbtree_root(tree));
// Other way around
tree.reserve_mapping(0, 100 * 10, rd);
tree.reserve_mapping(0, 100 * 10, rd, not_used);
for (int i = 9; i >= 0; i--) {
tree.release_mapping(i * 100, 100);
tree.release_mapping(i * 100, 100, not_used);
}
EXPECT_EQ(nullptr, rbtree_root(tree));
}
@ -136,9 +138,10 @@ public:
// Committing in a whole reserved range results in 2 nodes
void commit_whole(const VMATree::RegionData& rd) {
Tree tree;
tree.reserve_mapping(0, 100 * 10, rd);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 100 * 10, rd, not_used);
for (int i = 0; i < 10; i++) {
tree.commit_mapping(i * 100, 100, rd);
tree.commit_mapping(i * 100, 100, rd, not_used);
}
rbtree(tree).visit_in_order([&](TNode* x) {
VMATree::StateType in = in_type_of(x);
@ -153,8 +156,9 @@ public:
// Committing in middle of reservation ends with a sequence of 4 nodes
void commit_middle(const VMATree::RegionData& rd) {
Tree tree;
tree.reserve_mapping(0, 100, rd);
tree.commit_mapping(50, 25, rd);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 100, rd, not_used);
tree.commit_mapping(50, 25, rd, not_used);
size_t found[16];
size_t wanted[4] = {0, 50, 75, 100};
@ -342,8 +346,9 @@ public:
TEST_VM_F(NMTVMATreeTest, OverlappingReservationsResultInTwoNodes) {
VMATree::RegionData rd{si[0], mtTest};
Tree tree;
VMATree::SummaryDiff not_used;
for (int i = 99; i >= 0; i--) {
tree.reserve_mapping(i * 100, 101, rd);
tree.reserve_mapping(i * 100, 101, rd, not_used);
}
EXPECT_EQ(2, count_nodes(tree));
}
@ -351,8 +356,9 @@ TEST_VM_F(NMTVMATreeTest, OverlappingReservationsResultInTwoNodes) {
TEST_VM_F(NMTVMATreeTest, DuplicateReserve) {
VMATree::RegionData rd{si[0], mtTest};
Tree tree;
tree.reserve_mapping(100, 100, rd);
tree.reserve_mapping(100, 100, rd);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(100, 100, rd, not_used);
tree.reserve_mapping(100, 100, rd, not_used);
EXPECT_EQ(2, count_nodes(tree));
VMATree::VMARBTree::Range r = tree.tree().find_enclosing_range(110);
EXPECT_EQ(100, (int)(r.end->key() - r.start->key()));
@ -360,15 +366,16 @@ TEST_VM_F(NMTVMATreeTest, DuplicateReserve) {
TEST_VM_F(NMTVMATreeTest, UseTagInplace) {
Tree tree;
VMATree::SummaryDiff not_used;
VMATree::RegionData rd_Test_cs0(si[0], mtTest);
VMATree::RegionData rd_None_cs1(si[1], mtNone);
tree.reserve_mapping(0, 100, rd_Test_cs0);
tree.reserve_mapping(0, 100, rd_Test_cs0, not_used);
// reserve: 0---------------------100
// commit: 20**********70
// uncommit: 30--40
// post-cond: 0---20**30--40**70----100
tree.commit_mapping(20, 50, rd_None_cs1, true);
tree.uncommit_mapping(30, 10, rd_None_cs1);
tree.commit_mapping(20, 50, rd_None_cs1, not_used, true);
tree.uncommit_mapping(30, 10, rd_None_cs1, not_used);
tree.visit_in_order([&](const TNode* node) {
if (node->key() != 100) {
EXPECT_EQ(mtTest, node->val().out.mem_tag()) << "failed at: " << node->key();
@ -395,10 +402,11 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
{ // Identical operation but different metadata should not merge
Tree tree;
VMATree::SummaryDiff not_used;
VMATree::RegionData rd_Test_cs0{si[0], mtTest};
VMATree::RegionData rd_NMT_cs1{si[1], mtNMT};
tree.reserve_mapping(0, 100, rd_Test_cs0);
tree.reserve_mapping(100, 100, rd_NMT_cs1);
tree.reserve_mapping(0, 100, rd_Test_cs0, not_used);
tree.reserve_mapping(100, 100, rd_NMT_cs1, not_used);
EXPECT_EQ(3, count_nodes(tree));
int found_nodes = 0;
@ -406,10 +414,11 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
{ // Reserving after commit should overwrite commit
Tree tree;
VMATree::SummaryDiff not_used;
VMATree::RegionData rd_Test_cs0{si[0], mtTest};
VMATree::RegionData rd_NMT_cs1{si[1], mtNMT};
tree.commit_mapping(50, 50, rd_NMT_cs1);
tree.reserve_mapping(0, 100, rd_Test_cs0);
tree.commit_mapping(50, 50, rd_NMT_cs1, not_used);
tree.reserve_mapping(0, 100, rd_Test_cs0, not_used);
rbtree(tree).visit_in_order([&](const TNode* x) {
EXPECT_TRUE(x->key() == 0 || x->key() == 100);
if (x->key() == 0UL) {
@ -423,20 +432,22 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
{ // Split a reserved region into two different reserved regions
Tree tree;
VMATree::SummaryDiff not_used;
VMATree::RegionData rd_Test_cs0{si[0], mtTest};
VMATree::RegionData rd_NMT_cs1{si[1], mtNMT};
VMATree::RegionData rd_None_cs0{si[0], mtNone};
tree.reserve_mapping(0, 100, rd_Test_cs0);
tree.reserve_mapping(0, 50, rd_NMT_cs1);
tree.reserve_mapping(50, 50, rd_None_cs0);
tree.reserve_mapping(0, 100, rd_Test_cs0, not_used);
tree.reserve_mapping(0, 50, rd_NMT_cs1, not_used);
tree.reserve_mapping(50, 50, rd_None_cs0, not_used);
EXPECT_EQ(3, count_nodes(tree));
}
{ // One big reserve + release leaves an empty tree
VMATree::RegionData rd_NMT_cs0{si[0], mtNMT};
Tree tree;
tree.reserve_mapping(0, 500000, rd_NMT_cs0);
tree.release_mapping(0, 500000);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 500000, rd_NMT_cs0, not_used);
tree.release_mapping(0, 500000, not_used);
EXPECT_EQ(nullptr, rbtree_root(tree));
}
@ -446,8 +457,9 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
VMATree::RegionData rd_NMT_cs0{si[0], mtNMT};
VMATree::RegionData rd_Test_cs1{si[1], mtTest};
Tree tree;
tree.reserve_mapping(0, 100, rd_NMT_cs0);
tree.commit_mapping(0, 100, rd_Test_cs1);
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 100, rd_NMT_cs0, not_used);
tree.commit_mapping(0, 100, rd_Test_cs1, not_used);
rbtree(tree).visit_range_in_order(0, 99999, [&](TNode* x) {
if (x->key() == 0) {
EXPECT_EQ(mtTest, x->val().out.reserved_regiondata().mem_tag);
@ -461,10 +473,11 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
{ // Attempting to reserve or commit an empty region should not change the tree.
Tree tree;
VMATree::SummaryDiff not_used;
VMATree::RegionData rd_NMT_cs0{si[0], mtNMT};
tree.reserve_mapping(0, 0, rd_NMT_cs0);
tree.reserve_mapping(0, 0, rd_NMT_cs0, not_used);
EXPECT_EQ(nullptr, rbtree_root(tree));
tree.commit_mapping(0, 0, rd_NMT_cs0);
tree.commit_mapping(0, 0, rd_NMT_cs0, not_used);
EXPECT_EQ(nullptr, rbtree_root(tree));
}
}
@ -520,8 +533,9 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{500, 600, mtClassShared, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
tree.reserve_mapping(0, 600, rd);
tree.reserve_mapping(0, 600, rd, not_used);
tree.set_tag(0, 500, mtGC);
tree.set_tag(500, 100, mtClassShared);
@ -540,6 +554,7 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{575, 600, mtClassShared, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
// 0---------------------------------------------------600
// 100****225
@ -548,11 +563,11 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
// 0------100****225---------550***560---565***575-----600
// 0------100****225---500---550***560---565***575-----600
// <-------mtGC---------><-----------mtClassShared------->
tree.reserve_mapping(0, 600, rd);
tree.reserve_mapping(0, 600, rd, not_used);
// The committed areas
tree.commit_mapping(100, 125, rd);
tree.commit_mapping(550, 10, rd);
tree.commit_mapping(565, 10, rd);
tree.commit_mapping(100, 125, rd, not_used);
tree.commit_mapping(550, 10, rd, not_used);
tree.commit_mapping(565, 10, rd, not_used);
// OK, set tag
tree.set_tag(0, 500, mtGC);
tree.set_tag(500, 100, mtClassShared);
@ -564,10 +579,11 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{0, 200, mtGC, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData gc(si, mtGC);
Tree::RegionData compiler(si, mtCompiler);
tree.reserve_mapping(0, 100, gc);
tree.reserve_mapping(100, 100, compiler);
tree.reserve_mapping(0, 100, gc, not_used);
tree.reserve_mapping(100, 100, compiler, not_used);
tree.set_tag(0, 200, mtGC);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -580,10 +596,11 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{100, 200, mtGC, si2, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData gc(si1, mtGC);
Tree::RegionData compiler(si2, mtCompiler);
tree.reserve_mapping(0, 100, gc);
tree.reserve_mapping(100, 100, compiler);
tree.reserve_mapping(0, 100, gc, not_used);
tree.reserve_mapping(100, 100, compiler, not_used);
tree.set_tag(0, 200, mtGC);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -595,8 +612,9 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{150, 200, mtCompiler, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData compiler(si, mtCompiler);
tree.reserve_mapping(0, 200, compiler);
tree.reserve_mapping(0, 200, compiler, not_used);
tree.set_tag(100, 50, mtGC);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -608,10 +626,11 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{125, 200, mtCompiler, si, State::Reserved},
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData gc(si, mtGC);
Tree::RegionData compiler(si, mtCompiler);
tree.reserve_mapping(0, 100, gc);
tree.reserve_mapping(100, 100, compiler);
tree.reserve_mapping(0, 100, gc, not_used);
tree.reserve_mapping(100, 100, compiler, not_used);
tree.set_tag(75, 50, mtClass);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -624,9 +643,10 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{80, 100, mtClassShared, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData class_shared(si, mtClassShared);
tree.reserve_mapping(0, 50, class_shared);
tree.reserve_mapping(75, 25, class_shared);
tree.reserve_mapping(0, 50, class_shared, not_used);
tree.reserve_mapping(75, 25, class_shared, not_used);
tree.set_tag(0, 80, mtGC);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -636,8 +656,9 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{10, 20, mtCompiler, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData class_shared(si, mtClassShared);
tree.reserve_mapping(10, 10, class_shared);
tree.reserve_mapping(10, 10, class_shared, not_used);
tree.set_tag(0, 100, mtCompiler);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -651,10 +672,11 @@ TEST_VM_F(NMTVMATreeTest, SetTag) {
{99, 100, mtGC, si, State::Reserved}
};
VMATree tree;
VMATree::SummaryDiff not_used;
Tree::RegionData class_shared(si, mtClassShared);
tree.reserve_mapping(0, 100, class_shared);
tree.release_mapping(1, 49);
tree.release_mapping(75, 24);
tree.reserve_mapping(0, 100, class_shared, not_used);
tree.release_mapping(1, 49, not_used);
tree.release_mapping(75, 24, not_used);
tree.set_tag(0, 100, mtGC);
expect_equivalent_form(expected, tree, __LINE__);
}
@ -666,7 +688,8 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
Tree::RegionData rd_NMT_cs0(NCS::StackIndex(), mtNMT);
Tree tree;
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd_Test_cs0);
VMATree::SummaryDiff all_diff;
tree.reserve_mapping(0, 100, rd_Test_cs0, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA..........
@ -675,7 +698,7 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
// . - free
VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)];
EXPECT_EQ(100, diff.reserve);
all_diff = tree.reserve_mapping(50, 25, rd_NMT_cs0);
tree.reserve_mapping(50, 25, rd_NMT_cs0, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCCCCCCCC..........
@ -692,7 +715,8 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
{ // Fully release reserved mapping
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
Tree tree;
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd_Test_cs0);
VMATree::SummaryDiff all_diff;
tree.reserve_mapping(0, 100, rd_Test_cs0, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA..........
@ -701,7 +725,7 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
// . - free
VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)];
EXPECT_EQ(100, diff.reserve);
all_diff = tree.release_mapping(0, 100);
tree.release_mapping(0, 100, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// ..............................................................................................................
@ -712,7 +736,8 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
{ // Convert some of a released mapping to a committed one
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
Tree tree;
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd_Test_cs0);
VMATree::SummaryDiff all_diff;
tree.reserve_mapping(0, 100, rd_Test_cs0, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA..........
@ -721,7 +746,7 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
// . - free
VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)];
EXPECT_EQ(diff.reserve, 100);
all_diff = tree.commit_mapping(0, 100, rd_Test_cs0);
tree.commit_mapping(0, 100, rd_Test_cs0, all_diff);
// 1 2 3 4 5 6 7 8 9 10 11
// 01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa..........
@ -735,7 +760,8 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
{ // Adjacent reserved mappings with same type
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
Tree tree;
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 10, rd_Test_cs0);
VMATree::SummaryDiff all_diff;
tree.reserve_mapping(0, 10, rd_Test_cs0, all_diff);
// 1 2
// 01234567890123456789
// AAAAAAAAAA..........
@ -744,7 +770,7 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
// . - free
VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)];
EXPECT_EQ(diff.reserve, 10);
all_diff = tree.reserve_mapping(10, 10, rd_Test_cs0);
tree.reserve_mapping(10, 10, rd_Test_cs0, all_diff);
// 1 2 3
// 012345678901234567890123456789
// AAAAAAAAAAAAAAAAAAAA..........
@ -758,7 +784,8 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
Tree::RegionData rd_NMT_cs0(NCS::StackIndex(), mtNMT);
Tree tree;
VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 10, rd_Test_cs0);
VMATree::SummaryDiff all_diff;
tree.reserve_mapping(0, 10, rd_Test_cs0, all_diff);
// 1 2
// 01234567890123456789
// AAAAAAAAAA..........
@ -767,7 +794,7 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
// . - free
VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)];
EXPECT_EQ(diff.reserve, 10);
all_diff = tree.reserve_mapping(10, 10, rd_NMT_cs0);
tree.reserve_mapping(10, 10, rd_NMT_cs0, all_diff);
// 1 2 3
// 012345678901234567890123456789
// AAAAAAAAAABBBBBBBBBB..........
@ -784,22 +811,23 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
{ // A commit with two previous commits inside of it should only register
// the new memory in the commit diff.
Tree tree;
VMATree::SummaryDiff diff;
Tree::RegionData rd_Test_cs0(NCS::StackIndex(), mtTest);
tree.commit_mapping(16, 16, rd_Test_cs0);
tree.commit_mapping(16, 16, rd_Test_cs0, diff);
// 1 2 3 4
// 0123456789012345678901234567890123456789
// ................aaaaaaaaaaaaaaaa..........
// Legend:
// a - Test (committed)
// . - free
tree.commit_mapping(32, 32, rd_Test_cs0);
tree.commit_mapping(32, 32, rd_Test_cs0, diff);
// 1 2 3 4 5 6 7
// 0123456789012345678901234567890123456789012345678901234567890123456789
// ................aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa..........
// Legend:
// a - Test (committed)
// . - free
VMATree::SummaryDiff diff = tree.commit_mapping(0, 64, rd_Test_cs0);
tree.commit_mapping(0, 64, rd_Test_cs0, diff);
// 1 2 3 4 5 6 7
// 0123456789012345678901234567890123456789012345678901234567890123456789
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa..........
@ -814,11 +842,12 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
TEST_VM_F(NMTVMATreeTest, SummaryAccountingReserveAsUncommit) {
Tree tree;
Tree::RegionData rd(NCS::StackIndex(), mtTest);
VMATree::SummaryDiff diff1 = tree.reserve_mapping(1200, 100, rd);
VMATree::SummaryDiff diff2 = tree.commit_mapping(1210, 50, rd);
VMATree::SummaryDiff diff1, diff2, diff3;
tree.reserve_mapping(1200, 100, rd, diff1);
tree.commit_mapping(1210, 50, rd, diff2);
EXPECT_EQ(100, diff1.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(50, diff2.tag[NMTUtil::tag_to_index(mtTest)].commit);
VMATree::SummaryDiff diff3 = tree.reserve_mapping(1220, 20, rd);
tree.reserve_mapping(1220, 20, rd, diff3);
EXPECT_EQ(-20, diff3.tag[NMTUtil::tag_to_index(mtTest)].commit);
EXPECT_EQ(0, diff3.tag[NMTUtil::tag_to_index(mtTest)].reserve);
}
@ -951,13 +980,13 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) {
VMATree::SummaryDiff simple_diff;
if (kind == SimpleVMATracker::Reserved) {
simple_diff = tr->reserve(start, size, stack, mem_tag);
tree_diff = tree.reserve_mapping(start, size, data);
tree.reserve_mapping(start, size, data, tree_diff);
} else if (kind == SimpleVMATracker::Committed) {
simple_diff = tr->commit(start, size, stack, mem_tag);
tree_diff = tree.commit_mapping(start, size, data);
tree.commit_mapping(start, size, data, tree_diff);
} else {
simple_diff = tr->release(start, size);
tree_diff = tree.release_mapping(start, size);
tree.release_mapping(start, size, tree_diff);
}
for (int j = 0; j < mt_number_of_tags; j++) {
@ -1024,33 +1053,34 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) {
TEST_VM_F(NMTVMATreeTest, SummaryAccountingWhenUseTagInplace) {
Tree tree;
VMATree::SummaryDiff diff;
VMATree::RegionData rd_Test_cs0(si[0], mtTest);
VMATree::RegionData rd_None_cs1(si[1], mtNone);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// ..................................................
tree.reserve_mapping(0, 50, rd_Test_cs0);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
VMATree::SummaryDiff diff = tree.commit_mapping(0, 25, rd_None_cs1, true);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrr
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// ..................................................
tree.reserve_mapping(0, 50, rd_Test_cs0, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
tree.commit_mapping(0, 25, rd_None_cs1, diff, true);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrr
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(25, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = tree.commit_mapping(30, 5, rd_None_cs1, true);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCCCCCCrrrrrCCCCCrrrrrrrrrrrrrrr
tree.commit_mapping(30, 5, rd_None_cs1, diff, true);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCCCCCCrrrrrCCCCCrrrrrrrrrrrrrrr
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(5, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = tree.uncommit_mapping(0, 25, rd_None_cs1);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrCCCCCrrrrrrrrrrrrrrr
tree.uncommit_mapping(0, 25, rd_None_cs1, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrCCCCCrrrrrrrrrrrrrrr
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-25, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
@ -1079,7 +1109,8 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{// Check committing into a reserved region inherits the call stacks
Tree tree;
tree.reserve_mapping(0, 50, rd_Test_cs1); // reserve in an empty tree
VMATree::SummaryDiff diff;
tree.reserve_mapping(0, 50, rd_Test_cs1, diff); // reserve in an empty tree
// Pre: empty tree.
// Post:
// 1 2 3 4 5
@ -1091,7 +1122,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{-1 , si_1 , -1 },
{-1 , -1 , -1 }};
check_tree(tree, et1, __LINE__);
tree.commit_mapping(25, 10, rd_None_cs2, true); // commit at the middle of the region
tree.commit_mapping(25, 10, rd_None_cs2, diff, true); // commit at the middle of the region
// Post:
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
@ -1103,7 +1134,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{-1 , -1 , si_2 , -1 , -1 }};
check_tree(tree, et2, __LINE__);
tree.commit_mapping(0, 20, rd_None_cs2, true); // commit at the beginning of the region
tree.commit_mapping(0, 20, rd_None_cs2, diff, true); // commit at the beginning of the region
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCrrrrrCCCCCCCCCCrrrrrrrrrrrrrrr.
@ -1114,7 +1145,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{-1 , si_2 , -1 , si_2 , -1 , -1 }};
check_tree(tree, et3, __LINE__);
tree.commit_mapping(40, 10, rd_None_cs2, true); // commit at the end of the region
tree.commit_mapping(40, 10, rd_None_cs2, diff, true); // commit at the end of the region
// Post:
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
@ -1128,7 +1159,8 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
}
{// committing overlapped regions does not destroy the old call-stacks
Tree tree;
tree.reserve_mapping(0, 50, rd_Test_cs1); // reserving in an empty tree
VMATree::SummaryDiff diff;
tree.reserve_mapping(0, 50, rd_Test_cs1, diff); // reserving in an empty tree
// Pre: empty tree.
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
@ -1140,7 +1172,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{-1 , -1 , -1 }};
check_tree(tree, et1, __LINE__);
tree.commit_mapping(10, 10, rd_None_cs2, true);
tree.commit_mapping(10, 10, rd_None_cs2, diff, true);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1154,7 +1186,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
SIndex si_3 = si[2];
VMATree::RegionData rd_Test_cs3(si_3, mtTest);
// commit with overlap at the region's start
tree.commit_mapping(5, 10, rd_Test_cs3);
tree.commit_mapping(5, 10, rd_Test_cs3, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrCCCCCCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1168,7 +1200,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
SIndex si_4 = si[3];
VMATree::RegionData call_stack_4(si_4, mtTest);
// commit with overlap at the region's end
tree.commit_mapping(15, 10, call_stack_4);
tree.commit_mapping(15, 10, call_stack_4, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrCCCCCCCCCCCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1181,13 +1213,14 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
}
{// uncommit should not store any call-stack
Tree tree;
tree.reserve_mapping(0, 50, rd_Test_cs1);
VMATree::SummaryDiff diff;
tree.reserve_mapping(0, 50, rd_Test_cs1, diff);
tree.commit_mapping(10, 10, rd_None_cs2, true);
tree.commit_mapping(10, 10, rd_None_cs2, diff, true);
tree.commit_mapping(0, 5, rd_None_cs2, true);
tree.commit_mapping(0, 5, rd_None_cs2, diff, true);
tree.uncommit_mapping(0, 3, rd_None_cs2);
tree.uncommit_mapping(0, 3, rd_None_cs2, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrCCrrrrrCCCCCCCCCCrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1198,7 +1231,7 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
{-1 , -1 , si_2 , -1 , si_2 , -1 , -1 }};
check_tree(tree, et1, __LINE__);
tree.uncommit_mapping(5, 10, rd_None_cs2);
tree.uncommit_mapping(5, 10, rd_None_cs2, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrCCrrrrrrrrrrCCCCCrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr.
@ -1214,8 +1247,9 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
VMATree::RegionData call_stack_4(si_4, mtTest);
Tree tree;
tree.reserve_mapping(0, 50, rd_Test_cs1);
tree.reserve_mapping(10, 10, call_stack_4);
VMATree::SummaryDiff diff;
tree.reserve_mapping(0, 50, rd_Test_cs1, diff);
tree.reserve_mapping(10, 10, call_stack_4, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1228,7 +1262,8 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
}
{// commit without reserve
Tree tree;
tree.commit_mapping(0, 50, rd_Test_cs1);
VMATree::SummaryDiff diff;
tree.commit_mapping(0, 50, rd_Test_cs1, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
@ -1241,8 +1276,9 @@ TEST_VM_F(NMTVMATreeTest, SeparateStacksForCommitAndReserve) {
}
{// reserve after commit
Tree tree;
tree.commit_mapping(0, 50, rd_None_cs2);
tree.reserve_mapping(0, 50, rd_Test_cs1);
VMATree::SummaryDiff diff;
tree.commit_mapping(0, 50, rd_None_cs2, diff);
tree.reserve_mapping(0, 50, rd_Test_cs1, diff);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr
@ -1287,7 +1323,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows0To3) {
{-1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCCCCCCC..........................
@ -1314,7 +1351,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows0To3) {
{-1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCC...............................
@ -1359,7 +1397,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows4to7) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(20, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(20, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrr..........CCCCCCCCCCCCCCCCCCCC...........
@ -1386,7 +1425,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows4to7) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(10, 10, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(10, 10, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....rrrrrCCCCCCCCCC...............................
@ -1413,7 +1453,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows4to7) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(7, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(7, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrr..CCCCCCCCCCCCCCCCCCCC........................
@ -1440,7 +1481,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows4to7) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(7, 13, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(7, 13, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrr..CCCCCCCCCCCCC...............................
@ -1492,7 +1534,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows8to11) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(10, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(10, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrCCCCCCCCCCCCCCCCCCCC.....................
@ -1519,7 +1562,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows8to11) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(0, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(0, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// CCCCCCCCCCCCCCCCCCCC...............................
@ -1546,7 +1590,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows8to11) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCCCCCCC..........................
@ -1573,7 +1618,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows8to11) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCC...............................
@ -1619,7 +1665,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows12to15) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCCCCCCC.....rrrrrrrrrr...........
@ -1646,7 +1693,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows12to15) {
{-1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCCCCCCCrrrrr.....................
@ -1673,7 +1721,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows12to15) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCCCCCCC.....rrrrrrrrrr...........
@ -1700,7 +1749,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows12to15) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff ;
tree.commit_mapping(5, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// .....CCCCCCCCCCCCCCC..........rrrrrrrrrr...........
@ -1745,7 +1795,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows16to19) {
{-1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(15, 10, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(15, 10, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrr.....CCCCCCCCCC.....rrrrrrrrrr...........
@ -1772,7 +1823,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows16to19) {
{-1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(15, 10, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(15, 10, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrr.....CCCCCCCCCCrrrrr.....................
@ -1799,7 +1851,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows16to19) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(7, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(7, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrr..CCCCCCCCCCCCCCCCCCCC...rrrrrrrrrr...........
@ -1826,7 +1879,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows16to19) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(7, 13, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(7, 13, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrr..CCCCCCCCCCCCC..........rrrrrrrrrr...........
@ -1872,7 +1926,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows20to23) {
{-1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(10, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(10, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrCCCCCCCCCCCCCCC.....rrrrrrrrrr...........
@ -1899,7 +1954,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows20to23) {
{-1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(10, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(10, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrrrrrrCCCCCCCCCCCCCCCrrrrr.....................
@ -1926,7 +1982,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows20to23) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 20, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 20, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrCCCCCCCCCCCCCCCCCCCC.....rrrrrrrrrr...........
@ -1953,7 +2010,8 @@ TEST_VM_F(NMTVMATreeTest, OverlapTableRows20to23) {
{-1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 }
};
create_tree(tree, pre, __LINE__);
VMATree::SummaryDiff diff = tree.commit_mapping(5, 15, rd_Test_cs2, false);
VMATree::SummaryDiff diff;
tree.commit_mapping(5, 15, rd_Test_cs2, diff, false);
// 1 2 3 4 5
// 012345678901234567890123456789012345678901234567890
// rrrrrCCCCCCCCCCCCCCC..........rrrrrrrrrr...........

View File

@ -94,6 +94,7 @@ public:
RegionsTree* rtree = vmt.tree();
size_t size = 0x01000000;
const address addr = (address)0x0000A000;
VMATree::SummaryDiff diff;
vmt.add_reserved_region(addr, size, CALLER_PC, mtTest);
@ -115,45 +116,45 @@ public:
// Commit adjacent regions with same stack
{ // Commit one region
rtree->commit_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack, diff);
R r[] = { {addr + cs, cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent - lower address
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent - higher address
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack, diff);
R r[] = { {addr, 3 * cs} };
check(vmt,rmr, r);
}
// Cleanup
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 0u);
// Commit adjacent regions with different stacks
{ // Commit one region
rtree->commit_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack, diff);
R r[] = { {addr + cs, cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent - lower address
rtree->commit_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2, diff);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent - higher address
rtree->commit_region(addr + 2 * cs, cs, stack2);
rtree->commit_region(addr + 2 * cs, cs, stack2, diff);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
@ -161,12 +162,13 @@ public:
}
// Cleanup
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 0u);
}
static void test_add_committed_region_adjacent_overlapping() {
VirtualMemoryTracker vmt(true);
VMATree::SummaryDiff diff;
RegionsTree* rtree = vmt.tree();
size_t size = 0x01000000;
const address addr = (address)0x0000A000;
@ -190,46 +192,46 @@ public:
// Commit adjacent and overlapping regions with same stack
{ // Commit two non-adjacent regions
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack, diff);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack, diff);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent and overlapping
rtree->commit_region(addr + 2 * cs, 2 * cs, stack);
rtree->commit_region(addr + 2 * cs, 2 * cs, stack, diff);
R r[] = { {addr, 5 * cs} };
check(vmt, rmr, r);
}
// revert to two non-adjacent regions
rtree->uncommit_region(addr + 2 * cs, cs);
rtree->uncommit_region(addr + 2 * cs, cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 4 * cs);
{ // Commit overlapping and adjacent
rtree->commit_region(addr + cs, 2 * cs, stack);
rtree->commit_region(addr + cs, 2 * cs, stack, diff);
R r[] = { {addr, 5 * cs} };
check(vmt, rmr, r);
}
// Cleanup
rtree->uncommit_region(addr, 5 * cs);
rtree->uncommit_region(addr, 5 * cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 0u);
// Commit adjacent and overlapping regions with different stacks
{ // Commit two non-adjacent regions
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack, diff);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack, diff);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit adjacent and overlapping
rtree->commit_region(addr + 2 * cs, 2 * cs, stack2);
rtree->commit_region(addr + 2 * cs, 2 * cs, stack2, diff);
R r[] = { {addr, 2 * cs},
{addr + 2 * cs, 2 * cs},
{addr + 4 * cs, cs} };
@ -237,12 +239,12 @@ public:
}
// revert to two non-adjacent regions
rtree->commit_region(addr, 5 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, cs);
rtree->commit_region(addr, 5 * cs, stack, diff);
rtree->uncommit_region(addr + 2 * cs, cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 4 * cs);
{ // Commit overlapping and adjacent
rtree->commit_region(addr + cs, 2 * cs, stack2);
rtree->commit_region(addr + cs, 2 * cs, stack2, diff);
R r[] = { {addr, cs},
{addr + cs, 2 * cs},
{addr + 3 * cs, 2 * cs} };
@ -254,6 +256,7 @@ public:
static void test_add_committed_region_overlapping() {
VirtualMemoryTracker vmt(true);
VMATree::SummaryDiff diff;
RegionsTree* rtree = vmt.tree();
size_t size = 0x01000000;
const address addr = (address)0x0000A000;
@ -279,77 +282,77 @@ public:
// With same stack
{ // Commit one region
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
R r[] = { {addr, cs} };
check(vmt, rmr, r);
}
{ // Commit the same region
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
R r[] = { {addr, cs} };
check(vmt, rmr, r);
}
{ // Commit a succeeding region
rtree->commit_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit over two regions
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{// Commit first part of a region
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit second part of a region
rtree->commit_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit a third part
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack, diff);
R r[] = { {addr, 3 * cs} };
check(vmt, rmr, r);
}
{ // Commit in the middle of a region
rtree->commit_region(addr + 1 * cs, cs, stack);
rtree->commit_region(addr + 1 * cs, cs, stack, diff);
R r[] = { {addr, 3 * cs} };
check(vmt, rmr, r);
}
// Cleanup
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 0u);
// With preceding region
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr + 2 * cs, 3 * cs, stack);
rtree->commit_region(addr, cs, stack, diff);
rtree->commit_region(addr + 2 * cs, 3 * cs, stack, diff);
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack, diff);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(vmt, rmr, r);
}
rtree->commit_region(addr + 3 * cs, cs, stack);
rtree->commit_region(addr + 3 * cs, cs, stack, diff);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(vmt, rmr, r);
}
rtree->commit_region(addr + 4 * cs, cs, stack);
rtree->commit_region(addr + 4 * cs, cs, stack, diff);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
@ -357,57 +360,57 @@ public:
}
// Cleanup
rtree->uncommit_region(addr, 5 * cs);
rtree->uncommit_region(addr, 5 * cs, diff);
ASSERT_EQ(vmt.committed_size(&rmr), 0u);
// With different stacks
{ // Commit one region
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
R r[] = { {addr, cs} };
check(vmt, rmr, r);
}
{ // Commit the same region
rtree->commit_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2, diff);
R r[] = { {addr, cs} };
check(vmt, rmr, r);
}
{ // Commit a succeeding region
rtree->commit_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack, diff);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(vmt, rmr, r);
}
{ // Commit over two regions
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{// Commit first part of a region
rtree->commit_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2, diff);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(vmt, rmr, r);
}
{ // Commit second part of a region
rtree->commit_region(addr + cs, cs, stack2);
rtree->commit_region(addr + cs, cs, stack2, diff);
R r[] = { {addr, 2 * cs} };
check(vmt, rmr, r);
}
{ // Commit a third part
rtree->commit_region(addr + 2 * cs, cs, stack2);
rtree->commit_region(addr + 2 * cs, cs, stack2, diff);
R r[] = { {addr, 3 * cs} };
check(vmt, rmr, r);
}
{ // Commit in the middle of a region
rtree->commit_region(addr + 1 * cs, cs, stack);
rtree->commit_region(addr + 1 * cs, cs, stack, diff);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
@ -430,6 +433,7 @@ public:
static void test_remove_uncommitted_region() {
VirtualMemoryTracker vmt(true);
VMATree::SummaryDiff diff;
RegionsTree* rtree = vmt.tree();
size_t size = 0x01000000;
const address addr = (address)0x0000A000;
@ -451,105 +455,105 @@ public:
const size_t cs = 0x1000;
{ // Commit regions
rtree->commit_region(addr, 3 * cs, stack);
rtree->commit_region(addr, 3 * cs, stack, diff);
R r[] = { {addr, 3 * cs} };
check(vmt, rmr, r);
// Remove only existing
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}
{
rtree->commit_region(addr + 0 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 4 * cs, cs, stack);
rtree->commit_region(addr + 0 * cs, cs, stack, diff);
rtree->commit_region(addr + 2 * cs, cs, stack, diff);
rtree->commit_region(addr + 4 * cs, cs, stack, diff);
{ // Remove first
rtree->uncommit_region(addr, cs);
rtree->uncommit_region(addr, cs, diff);
R r[] = { {addr + 2 * cs, cs},
{addr + 4 * cs, cs} };
check(vmt, rmr, r);
}
// add back
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack, diff);
{ // Remove middle
rtree->uncommit_region(addr + 2 * cs, cs);
rtree->uncommit_region(addr + 2 * cs, cs, diff);
R r[] = { {addr + 0 * cs, cs},
{addr + 4 * cs, cs} };
check(vmt, rmr, r);
}
// add back
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack, diff);
{ // Remove end
rtree->uncommit_region(addr + 4 * cs, cs);
rtree->uncommit_region(addr + 4 * cs, cs, diff);
R r[] = { {addr + 0 * cs, cs},
{addr + 2 * cs, cs} };
check(vmt, rmr, r);
}
rtree->uncommit_region(addr, 5 * cs);
rtree->uncommit_region(addr, 5 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove larger region
rtree->commit_region(addr + 1 * cs, cs, stack);
rtree->uncommit_region(addr, 3 * cs);
rtree->commit_region(addr + 1 * cs, cs, stack, diff);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove smaller region - in the middle
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 1 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack, diff);
rtree->uncommit_region(addr + 1 * cs, cs, diff);
R r[] = { { addr + 0 * cs, cs},
{ addr + 2 * cs, cs} };
check(vmt, rmr, r);
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove smaller region - at the beginning
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 0 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack, diff);
rtree->uncommit_region(addr + 0 * cs, cs, diff);
R r[] = { { addr + 1 * cs, 2 * cs} };
check(vmt, rmr, r);
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove smaller region - at the end
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack, diff);
rtree->uncommit_region(addr + 2 * cs, cs, diff);
R r[] = { { addr, 2 * cs} };
check(vmt, rmr, r);
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove smaller, overlapping region - at the beginning
rtree->commit_region(addr + 1 * cs, 4 * cs, stack);
rtree->uncommit_region(addr, 2 * cs);
rtree->commit_region(addr + 1 * cs, 4 * cs, stack, diff);
rtree->uncommit_region(addr, 2 * cs, diff);
R r[] = { { addr + 2 * cs, 3 * cs} };
check(vmt, rmr, r);
rtree->uncommit_region(addr + 1 * cs, 4 * cs);
rtree->uncommit_region(addr + 1 * cs, 4 * cs, diff);
check_empty(vmt, rmr);
}
{ // Remove smaller, overlapping region - at the end
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, 2 * cs);
rtree->commit_region(addr, 3 * cs, stack, diff);
rtree->uncommit_region(addr + 2 * cs, 2 * cs, diff);
R r[] = { { addr, 2 * cs} };
check(vmt, rmr, r);
rtree->uncommit_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs, diff);
check_empty(vmt, rmr);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8367967
* @summary Ensure ModI/LNode::Value is monotonic with potential division by 0
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:CompileOnly=compiler.ccp.TestModValueMonotonic::test*
* -XX:+StressCCP -XX:RepeatCompilation=100 -Xcomp compiler.ccp.TestModValueMonotonic
* @run main compiler.ccp.TestModValueMonotonic
*/
package compiler.ccp;
public class TestModValueMonotonic {
static int iFld;
static long lFld;
static int limit = 1000;
static boolean flag;
public static void main(String[] args) {
testInt();
testLong();
}
static void testInt() {
int zero = 0;
// Make sure loop is not counted such that it is not removed. Created a more complex graph for CCP.
for (int i = 1; i < limit; i*=4) {
zero = 34;
}
int three = flag ? 0 : 3;
iFld = three % zero; // phi[0..3] % phi[0..34]
}
static void testLong() {
long zero = 0;
// Make sure loop is not counted such that it is not removed. Created a more complex graph for CCP.
for (int i = 1; i < limit; i*=4) {
zero = 34;
}
long three = flag ? 0 : 3;
lFld = three % zero; // phi[0..3] % phi[0..34]
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@
* @requires vm.flagless
* @modules java.base/jdk.internal.misc
* @library /test/lib
* @run driver MallocLimitTest compiler-limit-fatal
* @run driver/timeout=480 MallocLimitTest compiler-limit-fatal
*/
/*

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -345,12 +345,10 @@ public class EventHandler implements Runnable {
public boolean eventReceived(Event event) {
if (event instanceof VMDisconnectEvent) {
display("receieved VMDisconnect");
synchronized(EventHandler.this) {
vmDisconnected = true;
status = 0; // OK finish
EventHandler.this.notifyAll();
removeListener(this);
}
vmDisconnected = true;
status = 0; // OK finish
EventHandler.this.notifyAll();
removeListener(this);
return true;
}
return false;
@ -431,6 +429,10 @@ public class EventHandler implements Runnable {
}
public boolean eventReceived(Event event) {
if (en.event != null) {
// If we already got the requested event, don't handle this one.
return false;
}
EventSet set = en.set;
en.set = null; // We'll reset it below if the event matches a request.
for (int i = 0; i < requests.length; i++) {
@ -441,11 +443,9 @@ public class EventHandler implements Runnable {
if (request.equals(event.request())) {
display("waitForRequestedEventCommon: Received event(" + event +
") for request(" + request + ")");
synchronized (EventHandler.this) {
en.event = event;
en.set = set;
EventHandler.this.notifyAll();
}
en.event = event;
en.set = set;
EventHandler.this.notifyAll();
return true; // event was handled
}
}

View File

@ -204,11 +204,9 @@ public abstract class TestDebuggerType1 {
new EventHandler.EventListener() {
public boolean eventReceived(Event event) {
if (event instanceof BreakpointEvent && bpRequest.equals(event.request())) {
synchronized(eventHandler) {
display("Received communication breakpoint event.");
bpCount++;
eventHandler.notifyAll();
}
display("Received communication breakpoint event.");
bpCount++;
eventHandler.notifyAll();
return true;
}
return false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
/*
* @test
* @bug 6425068 7157659 8029204 8132890 8148334 8344637
* @bug 6425068 7156751 7157659 8029204 8132890 8148334 8344637
* @key printer
* @summary Confirm that text prints where we expect to the length we expect.
* @library /java/awt/regtesthelpers

View File

@ -28,7 +28,6 @@
* @run main/othervm -esa RacingSBThreads read
* @run main/othervm -esa RacingSBThreads insert
* @run main/othervm -esa RacingSBThreads append
* @run main/othervm -Xcomp RacingSBThreads
*/
import java.nio.CharBuffer;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,9 +22,31 @@
*/
/*
* @test
* @test id=dialog_double
* @bug 4269775 8341535
* @summary Check that different text rendering APIs agree
* @run main/othervm TestDevTransform DIALOG DOUBLE
*/
/*
* @test id=dialog_float
* @bug 4269775 8341535
* @summary Check that different text rendering APIs agree
* @run main/othervm TestDevTransform DIALOG FLOAT
*/
/*
* @test id=monospaced_double
* @bug 4269775 8341535
* @summary Check that different text rendering APIs agree
* @run main/othervm TestDevTransform MONOSPACED DOUBLE
*/
/*
* @test id=monospaced_float
* @bug 4269775 8341535
* @summary Check that different text rendering APIs agree
* @run main/othervm TestDevTransform MONOSPACED FLOAT
*/
/**
@ -66,6 +88,8 @@ public class TestDevTransform {
static String test = "This is only a test";
static double angle = Math.PI / 6.0; // Rotate 30 degrees
static final int W = 400, H = 400;
static boolean useDialog;
static boolean useDouble;
static void draw(Graphics2D g2d, TextLayout layout,
float x, float y, float scalex) {
@ -101,9 +125,19 @@ public class TestDevTransform {
g2d.setColor(Color.white);
g2d.fillRect(0, 0, W, H);
g2d.setColor(Color.black);
g2d.scale(1.481f, 1.481); // Convert to 108 dpi
if (useDouble) {
g2d.scale(1.481, 1.481); // Convert to 108 dpi
} else {
g2d.scale(1.481f, 1.481f); // Convert to 108 dpi
}
g2d.addRenderingHints(hints);
Font font = new Font(Font.DIALOG, Font.PLAIN, 12);
String name;
if (useDialog) {
name = Font.DIALOG;
} else {
name = Font.MONOSPACED;
}
Font font = new Font(name, Font.PLAIN, 12);
g2d.setFont(font);
}
@ -135,6 +169,12 @@ public class TestDevTransform {
}
public static void main(String args[]) throws Exception {
if (args[0].equals("DIALOG")) {
useDialog = true;
}
if (args[1].equals("DOUBLE")) {
useDouble = true;
}
BufferedImage tl_Image = new BufferedImage(W, H, BufferedImage.TYPE_INT_RGB);
{

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8356022
* @summary Tests for sun.invoke.util.BytecodeDescriptor
* @library /test/lib
* @modules java.base/sun.invoke.util
* @run junit BytecodeDescriptorTest
*/
import java.lang.classfile.ClassFile;
import java.lang.constant.ClassDesc;
import java.util.List;
import java.util.Map;
import jdk.test.lib.ByteCodeLoader;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import sun.invoke.util.BytecodeDescriptor;
import static org.junit.jupiter.api.Assertions.*;
class BytecodeDescriptorTest {
private static final String FOO_NAME = "dummy.Foo";
private static final String BAR_NAME = "dummy.Bar";
private static final String FOO_DESC = "L" + FOO_NAME.replace('.', '/') + ";";
private static final String BAR_DESC = "L" + BAR_NAME.replace('.', '/') + ";";
private static final String DOES_NOT_EXIST_DESC = "Ldoes/not/Exist;";
static Class<?> foo1, foo2, bar1;
static ClassLoader cl1, cl2;
@BeforeAll
static void setup() throws Throwable {
var fooBytes = ClassFile.of().build(ClassDesc.of(FOO_NAME), _ -> {});
var barBytes = ClassFile.of().build(ClassDesc.of(BAR_NAME), _ -> {});
cl1 = new ByteCodeLoader(Map.of(FOO_NAME, fooBytes, BAR_NAME, barBytes), ClassLoader.getSystemClassLoader());
foo1 = cl1.loadClass(FOO_NAME);
bar1 = cl1.loadClass(BAR_NAME);
foo2 = ByteCodeLoader.load(FOO_NAME, fooBytes);
cl2 = foo2.getClassLoader();
// Sanity
assertNotSame(foo1, foo2);
assertNotSame(cl1, cl2);
assertSame(cl1, foo1.getClassLoader());
assertSame(cl1, bar1.getClassLoader());
assertNotSame(cl1, foo2.getClassLoader());
assertEquals(FOO_DESC, foo1.descriptorString());
assertEquals(FOO_DESC, foo2.descriptorString());
assertEquals(BAR_DESC, bar1.descriptorString());
}
@Test
void testParseClass() throws ReflectiveOperationException {
assertSame(void.class, BytecodeDescriptor.parseClass("V", null), "void");
assertSame(int.class, BytecodeDescriptor.parseClass("I", null), "primitive");
assertSame(long[][].class, BytecodeDescriptor.parseClass("[[J", null), "array");
assertSame(Object.class, BytecodeDescriptor.parseClass("Ljava/lang/Object;", null), "class or interface");
assertThrows(IllegalArgumentException.class, () -> BytecodeDescriptor.parseClass("java/lang/Object", null), "internal name");
assertThrows(IllegalArgumentException.class, () -> BytecodeDescriptor.parseClass("[V", null), "bad array");
assertSame(Class.forName("[".repeat(255) + "I"), BytecodeDescriptor.parseClass("[".repeat(255) + "I", null), "good array");
assertThrows(IllegalArgumentException.class, () -> BytecodeDescriptor.parseClass("[".repeat(256) + "I", null), "bad array");
assertSame(foo2, BytecodeDescriptor.parseClass(FOO_DESC, cl2), "class loader");
assertThrows(TypeNotPresentException.class, () -> BytecodeDescriptor.parseClass(DOES_NOT_EXIST_DESC, null), "not existent");
assertThrows(TypeNotPresentException.class, () -> BytecodeDescriptor.parseClass(BAR_DESC, cl2), "cross loader");
}
@Test
void testParseMethod() {
assertEquals(List.of(void.class),
BytecodeDescriptor.parseMethod("()V", null),
"no-arg");
assertEquals(List.of(int.class, Object.class, long[].class, void.class),
BytecodeDescriptor.parseMethod("(ILjava/lang/Object;[J)V", null),
"sanity");
assertThrows(IllegalArgumentException.class,
() -> BytecodeDescriptor.parseMethod("()", null),
"no return");
assertThrows(IllegalArgumentException.class,
() -> BytecodeDescriptor.parseMethod("(V)V", null),
"bad arg");
var voidInMsgIAE = assertThrows(IllegalArgumentException.class,
() -> BytecodeDescriptor.parseMethod("([V)I", null),
"bad arg");
assertTrue(voidInMsgIAE.getMessage().contains("[V"), () -> "missing [V type in: '%s'".formatted(voidInMsgIAE.getMessage()));
assertThrows(IllegalArgumentException.class,
() -> BytecodeDescriptor.parseClass("([".repeat(256) + "I)J", null),
"bad arg");
assertEquals(List.of(foo1, bar1),
BytecodeDescriptor.parseMethod("(" + FOO_DESC + ")" + BAR_DESC, cl1),
"class loader");
assertThrows(TypeNotPresentException.class,
() -> BytecodeDescriptor.parseMethod("(" + FOO_DESC + ")" + BAR_DESC, cl2),
"no bar");
assertThrows(TypeNotPresentException.class,
() -> BytecodeDescriptor.parseMethod("(" + FOO_DESC + "V)V", null),
"first encounter TNPE");
assertThrows(IllegalArgumentException.class,
() -> BytecodeDescriptor.parseMethod("(V" + FOO_DESC + ")V", null),
"first encounter IAE");
}
}

View File

@ -32,6 +32,7 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@ -74,10 +75,23 @@ public final class MacHelper {
Path mountPoint = null;
try {
// The first "dict" item of "system-entities" array property contains "mount-point" string property.
var plist = readPList(attachExecutor.getOutput()).queryArrayValue("system-entities", false).findFirst().map(PListReader.class::cast).orElseThrow();
mountPoint = Path.of(plist.queryValue("mount-point"));
// One of "dict" items of "system-entities" array property should contain "mount-point" string property.
mountPoint = readPList(attachExecutor.getOutput()).queryArrayValue("system-entities", false).map(PListReader.class::cast).map(dict -> {
try {
return dict.queryValue("mount-point");
} catch (NoSuchElementException ex) {
return (String)null;
}
}).filter(Objects::nonNull).map(Path::of).findFirst().orElseThrow();
} finally {
if (mountPoint == null) {
TKit.trace("Unexpected plist file missing `system-entities` array:");
attachExecutor.getOutput().forEach(TKit::trace);
TKit.trace("Done");
}
}
try {
// code here used to copy just <runtime name> or <app name>.app
// We now have option to include arbitrary content, so we copy
// everything in the mounted image.