mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8363998: Implement Compressed Class Pointers for 32-bit
Reviewed-by: rkennke, coleenp
This commit is contained in:
parent
f40381e41d
commit
819de07117
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -95,8 +95,6 @@
|
||||
}
|
||||
|
||||
static int adjust_reg_range(int range) {
|
||||
// Reduce the number of available regs (to free Rheap_base) in case of compressed oops
|
||||
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
|
||||
return range;
|
||||
}
|
||||
|
||||
|
||||
@ -2229,16 +2229,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// We don't know the array types are compatible
|
||||
if (basic_type != T_OBJECT) {
|
||||
// Simple test for basic type arrays
|
||||
if (UseCompressedClassPointers) {
|
||||
// We don't need decode because we just need to compare
|
||||
__ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
__ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
__ cmp_32(tmp, tmp2);
|
||||
} else {
|
||||
__ load_klass(tmp, src);
|
||||
__ load_klass(tmp2, dst);
|
||||
__ cmp(tmp, tmp2);
|
||||
}
|
||||
__ load_klass(tmp, src);
|
||||
__ load_klass(tmp2, dst);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ b(*stub->entry(), ne);
|
||||
} else {
|
||||
// For object arrays, if src is a sub class of dst then we can
|
||||
@ -2461,12 +2454,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
if (UseCompressedClassPointers) { // On 32 bit arm??
|
||||
__ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
@ -245,7 +245,7 @@ static bool shared_base_too_high(char* specified_base, char* aligned_base, size_
|
||||
static char* compute_shared_base(size_t cds_max) {
|
||||
char* specified_base = (char*)SharedBaseAddress;
|
||||
size_t alignment = MetaspaceShared::core_region_alignment();
|
||||
if (UseCompressedClassPointers) {
|
||||
if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) {
|
||||
alignment = MAX2(alignment, Metaspace::reserve_alignment());
|
||||
}
|
||||
|
||||
|
||||
@ -834,14 +834,20 @@ void Metaspace::global_initialize() {
|
||||
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
#else
|
||||
// +UseCompressedClassPointers on 32-bit: does not need class space. Klass can live wherever.
|
||||
if (UseCompressedClassPointers) {
|
||||
const address start = (address)os::vm_min_address(); // but not in the zero page
|
||||
const address end = (address)CompressedKlassPointers::max_klass_range_size();
|
||||
CompressedKlassPointers::initialize(start, end - start);
|
||||
}
|
||||
#endif // __LP64
|
||||
|
||||
// Initialize non-class virtual space list, and its chunk manager:
|
||||
MetaspaceContext::initialize_nonclass_space_context();
|
||||
|
||||
_tracer = new MetaspaceTracer();
|
||||
|
||||
#ifdef _LP64
|
||||
if (UseCompressedClassPointers) {
|
||||
// Note: "cds" would be a better fit but keep this for backward compatibility.
|
||||
LogTarget(Info, gc, metaspace) lt;
|
||||
@ -852,8 +858,6 @@ void Metaspace::global_initialize() {
|
||||
CompressedKlassPointers::print_mode(&ls);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
void Metaspace::post_initialize() {
|
||||
|
||||
@ -260,6 +260,16 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
// On 32-bit, with +UseCompressedClassPointers, the whole address space is the encoding range. We therefore
|
||||
// don't need a class space. However, as a pragmatic workaround for pesty overflow problems on 32-bit, we leave
|
||||
// a small area at the end of the address space out of the encoding range. We just assume no Klass will ever live
|
||||
// there (it won't, for no OS we support on 32-bit has user-addressable memory up there).
|
||||
assert(!UseCompressedClassPointers ||
|
||||
rs.end() <= (char*)CompressedKlassPointers::max_klass_range_size(), "Weirdly high address");
|
||||
#endif // _LP64
|
||||
|
||||
MemTracker::record_virtual_memory_tag(rs, mtMetaspace);
|
||||
assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
|
||||
InternalStats::inc_num_vsnodes_births();
|
||||
|
||||
@ -44,15 +44,19 @@ narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlas
|
||||
narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
|
||||
size_t CompressedKlassPointers::_protection_zone_size = 0;
|
||||
|
||||
#ifdef _LP64
|
||||
|
||||
size_t CompressedKlassPointers::max_klass_range_size() {
|
||||
// We disallow klass range sizes larger than 4GB even if the encoding
|
||||
// range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB).
|
||||
// That is because many CPU-specific compiler decodings do not want the
|
||||
// shifted narrow Klass to spill over into the third quadrant of the 64-bit target
|
||||
// address, e.g. to use a 16-bit move for a simplified base addition.
|
||||
return MIN2(4 * G, max_encoding_range_size());
|
||||
#ifdef _LP64
|
||||
const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
|
||||
constexpr size_t cap = 4 * G;
|
||||
return MIN2(encoding_allows, cap);
|
||||
#else
|
||||
// 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
|
||||
// Klass pointers here, coding needs to be revised.
|
||||
// We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
|
||||
// this is irrelevant because these upper address space parts are not user-addressable on
|
||||
// any of our 32-bit platforms.
|
||||
return align_down(UINT_MAX, os::vm_page_size());
|
||||
#endif
|
||||
}
|
||||
|
||||
void CompressedKlassPointers::pre_initialize() {
|
||||
@ -60,8 +64,13 @@ void CompressedKlassPointers::pre_initialize() {
|
||||
_narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
|
||||
_max_shift = max_shift_coh;
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
_narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
|
||||
_max_shift = max_shift_noncoh;
|
||||
#else
|
||||
_narrow_klass_pointer_bits = 32;
|
||||
_max_shift = 0;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -84,6 +93,10 @@ void CompressedKlassPointers::sanity_check_after_initialization() {
|
||||
ASSERT_HERE(_base != (address)-1);
|
||||
ASSERT_HERE(_shift != -1);
|
||||
|
||||
// We should need a class space if address space is larger than what narrowKlass can address
|
||||
const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits();
|
||||
ASSERT_HERE(should_need_class_space == needs_class_space());
|
||||
|
||||
const size_t klass_align = klass_alignment_in_bytes();
|
||||
|
||||
// must be aligned enough hold 64-bit data
|
||||
@ -96,7 +109,9 @@ void CompressedKlassPointers::sanity_check_after_initialization() {
|
||||
|
||||
// Check that Klass range is fully engulfed in the encoding range
|
||||
const address encoding_start = _base;
|
||||
const address encoding_end = (address)(p2u(_base) + (uintptr_t)nth_bit(narrow_klass_pointer_bits() + _shift));
|
||||
const address encoding_end = (address)
|
||||
LP64_ONLY(p2u(_base) + (uintptr_t)nth_bit(narrow_klass_pointer_bits() + _shift))
|
||||
NOT_LP64(max_klass_range_size());
|
||||
ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
|
||||
"Resulting encoding range does not fully cover the class range");
|
||||
|
||||
@ -239,6 +254,7 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
|
||||
|
||||
} else {
|
||||
|
||||
#ifdef _LP64
|
||||
// Traditional (non-compact) header mode
|
||||
const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
|
||||
const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
|
||||
@ -250,6 +266,7 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
|
||||
address const end = addr + len;
|
||||
_base = (end <= (address)unscaled_max) ? nullptr : addr;
|
||||
#else
|
||||
|
||||
// We try, in order of preference:
|
||||
// -unscaled (base=0 shift=0)
|
||||
// -zero-based (base=0 shift>0)
|
||||
@ -270,11 +287,19 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
|
||||
}
|
||||
}
|
||||
#endif // AARCH64
|
||||
#else
|
||||
// 32-bit "compressed class pointer" mode
|
||||
_base = nullptr;
|
||||
_shift = 0;
|
||||
// as our "protection zone", we just assume the lowest protected parts of
|
||||
// the user address space.
|
||||
_protection_zone_size = os::vm_min_address();
|
||||
#endif // LP64
|
||||
}
|
||||
|
||||
calc_lowest_highest_narrow_klass_id();
|
||||
|
||||
// Initialize klass decode mode and check compability with decode instructions
|
||||
// Initialize JIT-specific decoding settings
|
||||
if (!set_klass_decode_mode()) {
|
||||
|
||||
// Give fatal error if this is a specified address
|
||||
@ -288,9 +313,8 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
|
||||
p2i(_base), _shift);
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
sanity_check_after_initialization();
|
||||
#endif
|
||||
|
||||
DEBUG_ONLY(sanity_check_after_initialization();)
|
||||
}
|
||||
|
||||
void CompressedKlassPointers::print_mode(outputStream* st) {
|
||||
@ -341,4 +365,3 @@ bool CompressedKlassPointers::is_in_protection_zone(address addr) {
|
||||
(addr >= base() && addr < base() + _protection_zone_size) : false;
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
@ -143,6 +143,7 @@ class CompressedKlassPointers : public AllStatic {
|
||||
static char* reserve_address_space_for_unscaled_encoding(size_t size, bool aslr);
|
||||
static char* reserve_address_space_for_zerobased_encoding(size_t size, bool aslr);
|
||||
static char* reserve_address_space_for_16bit_move(size_t size, bool aslr);
|
||||
|
||||
static void calc_lowest_highest_narrow_klass_id();
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -187,12 +188,16 @@ public:
|
||||
// The maximum possible shift; the actual shift employed later can be smaller (see initialize())
|
||||
static int max_shift() { check_init(_max_shift); return _max_shift; }
|
||||
|
||||
// Returns the maximum encoding range, given the current geometry (narrow klass bit size and shift)
|
||||
static size_t max_encoding_range_size() { return nth_bit(narrow_klass_pointer_bits() + max_shift()); }
|
||||
|
||||
// Returns the maximum allowed klass range size.
|
||||
// Returns the maximum allowed klass range size. It is calculated from the length of the encoding range
|
||||
// resulting from the current encoding settings (base, shift), capped to a certain max. value.
|
||||
static size_t max_klass_range_size();
|
||||
|
||||
// On 64-bit, we need the class space to confine Klass structures to the encoding range, which is determined
|
||||
// by bit size of narrowKlass IDs and the shift. On 32-bit, we support compressed class pointer only
|
||||
// "pro-forma": narrowKlass have the same size as addresses (32 bits), and therefore the encoding range is
|
||||
// equal to the address space size. Here, we don't need a class space.
|
||||
static constexpr bool needs_class_space() { return LP64_ONLY(true) NOT_LP64(false); }
|
||||
|
||||
// Reserve a range of memory that is to contain Klass strucutures which are referenced by narrow Klass IDs.
|
||||
// If optimize_for_zero_base is true, the implementation will attempt to reserve optimized for zero-based encoding.
|
||||
static char* reserve_address_space_for_compressed_classes(size_t size, bool aslr, bool optimize_for_zero_base);
|
||||
@ -201,6 +206,7 @@ public:
|
||||
// set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
|
||||
// archived heap objects. In this case, we don't have the freedom to choose base and shift; they are handed to
|
||||
// us from CDS.
|
||||
// Note: CDS with +UCCP for 32-bit currently unsupported.
|
||||
static void initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift);
|
||||
|
||||
// Given an address range [addr, addr+len) which the encoding is supposed to
|
||||
|
||||
@ -99,8 +99,12 @@ inline bool CompressedKlassPointers::is_valid_narrow_klass_id(narrowKlass nk) {
|
||||
}
|
||||
|
||||
inline address CompressedKlassPointers::encoding_range_end() {
|
||||
#ifdef _LP64
|
||||
const int max_bits = narrow_klass_pointer_bits() + _shift;
|
||||
return (address)((uintptr_t)_base + nth_bit(max_bits));
|
||||
#else
|
||||
return (address)SIZE_MAX;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_COMPRESSEDKLASS_INLINE_HPP
|
||||
|
||||
@ -32,6 +32,7 @@ int ObjLayout::_oop_base_offset_in_bytes = 0;
|
||||
bool ObjLayout::_oop_has_klass_gap = false;
|
||||
|
||||
void ObjLayout::initialize() {
|
||||
#ifdef _LP64
|
||||
assert(_klass_mode == Undefined, "ObjLayout initialized twice");
|
||||
if (UseCompactObjectHeaders) {
|
||||
_klass_mode = Compact;
|
||||
@ -46,4 +47,13 @@ void ObjLayout::initialize() {
|
||||
_oop_base_offset_in_bytes = sizeof(markWord) + sizeof(Klass*);
|
||||
_oop_has_klass_gap = false;
|
||||
}
|
||||
#else
|
||||
assert(_klass_mode == Undefined, "ObjLayout initialized twice");
|
||||
assert(!UseCompactObjectHeaders, "COH unsupported on 32-bit");
|
||||
// We support +-UseCompressedClassPointers on 32-bit, but the layout
|
||||
// is exactly the same as it was with uncompressed klass pointers
|
||||
_klass_mode = UseCompressedClassPointers ? Compressed : Uncompressed;
|
||||
_oop_base_offset_in_bytes = sizeof(markWord) + sizeof(Klass*);
|
||||
_oop_has_klass_gap = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -38,11 +38,7 @@ inline ObjLayout::Mode ObjLayout::klass_mode() {
|
||||
assert(_klass_mode == Uncompressed, "Klass mode does not match flags");
|
||||
}
|
||||
#endif
|
||||
#ifdef _LP64
|
||||
return _klass_mode;
|
||||
#else
|
||||
return Uncompressed;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_OBJLAYOUT_INLINE_HPP
|
||||
|
||||
@ -124,10 +124,6 @@ const size_t minimumSymbolTableSize = 1024;
|
||||
"Use 32-bit object references in 64-bit VM. " \
|
||||
"lp64_product means flag is always constant in 32 bit VM") \
|
||||
\
|
||||
product(bool, UseCompressedClassPointers, true, \
|
||||
"(Deprecated) Use 32-bit class pointers in 64-bit VM. " \
|
||||
"lp64_product means flag is always constant in 32 bit VM") \
|
||||
\
|
||||
product(bool, UseCompactObjectHeaders, false, \
|
||||
"Use compact 64-bit object headers in 64-bit VM") \
|
||||
\
|
||||
@ -146,7 +142,6 @@ const size_t minimumSymbolTableSize = 1024;
|
||||
range, \
|
||||
constraint)
|
||||
const bool UseCompressedOops = false;
|
||||
const bool UseCompressedClassPointers = false;
|
||||
const bool UseCompactObjectHeaders = false;
|
||||
const int ObjectAlignmentInBytes = 8;
|
||||
|
||||
@ -1398,6 +1393,9 @@ const int ObjectAlignmentInBytes = 8;
|
||||
"Maximum size of Metaspaces (in bytes)") \
|
||||
constraint(MaxMetaspaceSizeConstraintFunc,AfterErgo) \
|
||||
\
|
||||
product(bool, UseCompressedClassPointers, true, \
|
||||
"(Deprecated) Use 32-bit class pointers.") \
|
||||
\
|
||||
product(size_t, CompressedClassSpaceSize, 1*G, \
|
||||
"Maximum size of class area in Metaspace when compressed " \
|
||||
"class pointers are used") \
|
||||
|
||||
@ -1061,6 +1061,15 @@ const intptr_t OneBit = 1; // only right_most bit set in a word
|
||||
#define nth_bit(n) (((n) >= BitsPerWord) ? 0 : (OneBit << (n)))
|
||||
#define right_n_bits(n) (nth_bit(n) - 1)
|
||||
|
||||
// same as nth_bit(n), but allows handing in a type as template parameter. Allows
|
||||
// us to use nth_bit with 64-bit types on 32-bit platforms
|
||||
template<class T> inline T nth_bit_typed(int n) {
|
||||
return ((T)1) << n;
|
||||
}
|
||||
template<class T> inline T right_n_bits_typed(int n) {
|
||||
return nth_bit_typed<T>(n) - 1;
|
||||
}
|
||||
|
||||
// bit-operations using a mask m
|
||||
inline void set_bits (intptr_t& x, intptr_t m) { x |= m; }
|
||||
inline void clear_bits (intptr_t& x, intptr_t m) { x &= ~m; }
|
||||
|
||||
@ -1382,6 +1382,7 @@ void VMError::print_vm_info(outputStream* st) {
|
||||
CompressedOops::print_mode(st);
|
||||
st->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
// STEP("printing compressed class ptrs mode")
|
||||
if (UseCompressedClassPointers) {
|
||||
@ -1390,7 +1391,6 @@ void VMError::print_vm_info(outputStream* st) {
|
||||
CompressedKlassPointers::print_mode(st);
|
||||
st->cr();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Take heap lock over heap, GC and metaspace printing so that information
|
||||
// is consistent.
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
#include "oops/compressedKlass.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
TEST_VM(metaspace, MetaspaceUtils_reserved) {
|
||||
@ -37,15 +38,13 @@ TEST_VM(metaspace, MetaspaceUtils_reserved) {
|
||||
}
|
||||
|
||||
TEST_VM(metaspace, MetaspaceUtils_reserved_compressed_class_pointers) {
|
||||
if (!UseCompressedClassPointers) {
|
||||
return;
|
||||
if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) {
|
||||
size_t reserved = MetaspaceUtils::reserved_bytes();
|
||||
EXPECT_GT(reserved, 0UL);
|
||||
size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
|
||||
EXPECT_GT(reserved_class, 0UL);
|
||||
EXPECT_LE(reserved_class, reserved);
|
||||
}
|
||||
size_t reserved = MetaspaceUtils::reserved_bytes();
|
||||
EXPECT_GT(reserved, 0UL);
|
||||
|
||||
size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
|
||||
EXPECT_GT(reserved_class, 0UL);
|
||||
EXPECT_LE(reserved_class, reserved);
|
||||
}
|
||||
|
||||
TEST_VM(metaspace, MetaspaceUtils_committed) {
|
||||
@ -61,15 +60,13 @@ TEST_VM(metaspace, MetaspaceUtils_committed) {
|
||||
}
|
||||
|
||||
TEST_VM(metaspace, MetaspaceUtils_committed_compressed_class_pointers) {
|
||||
if (!UseCompressedClassPointers) {
|
||||
return;
|
||||
if (UseCompressedClassPointers && CompressedKlassPointers::needs_class_space()) {
|
||||
size_t committed = MetaspaceUtils::committed_bytes();
|
||||
EXPECT_GT(committed, 0UL);
|
||||
size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
|
||||
EXPECT_GT(committed_class, 0UL);
|
||||
EXPECT_LE(committed_class, committed);
|
||||
}
|
||||
size_t committed = MetaspaceUtils::committed_bytes();
|
||||
EXPECT_GT(committed, 0UL);
|
||||
|
||||
size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
|
||||
EXPECT_GT(committed_class, 0UL);
|
||||
EXPECT_LE(committed_class, committed);
|
||||
}
|
||||
|
||||
TEST_VM(metaspace, MetaspaceUtils_non_compressed_class_pointers) {
|
||||
@ -105,7 +102,7 @@ TEST_VM(MetaspaceUtils, MetaspaceUtils_get_statistics) {
|
||||
check_metaspace_stats_are_not_null(combined_stats.non_class_space_stats());
|
||||
check_metaspace_stats_are_consistent(combined_stats.non_class_space_stats());
|
||||
|
||||
if (UseCompressedClassPointers) {
|
||||
if (CompressedKlassPointers::needs_class_space() && UseCompressedClassPointers) {
|
||||
check_metaspace_stats_are_not_null(combined_stats.class_space_stats());
|
||||
check_metaspace_stats_are_consistent(combined_stats.class_space_stats());
|
||||
} else {
|
||||
|
||||
@ -37,6 +37,7 @@ TEST_VM(CompressedKlass, basics) {
|
||||
ASSERT_LT(CompressedKlassPointers::klass_range_start(), CompressedKlassPointers::klass_range_end());
|
||||
ASSERT_LE(CompressedKlassPointers::klass_range_end(), CompressedKlassPointers::encoding_range_end());
|
||||
|
||||
#ifdef _LP64
|
||||
switch (CompressedKlassPointers::shift()) {
|
||||
case 0:
|
||||
ASSERT_EQ(CompressedKlassPointers::encoding_range_end() - CompressedKlassPointers::base(), (ptrdiff_t)(4 * G));
|
||||
@ -48,6 +49,10 @@ TEST_VM(CompressedKlass, basics) {
|
||||
const size_t expected_size = nth_bit(CompressedKlassPointers::narrow_klass_pointer_bits() + CompressedKlassPointers::shift());
|
||||
ASSERT_EQ(CompressedKlassPointers::encoding_range_end() - CompressedKlassPointers::base(), (ptrdiff_t)expected_size);
|
||||
}
|
||||
#else
|
||||
ASSERT_EQ(CompressedKlassPointers::base(), (address)0);
|
||||
ASSERT_EQ(CompressedKlassPointers::encoding_range_end(), (address)(UINT_MAX));
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
TEST_VM(CompressedKlass, ccp_off) {
|
||||
|
||||
@ -33,6 +33,7 @@
|
||||
|
||||
/* @test id=use-zero-based-encoding
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "64"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.xml
|
||||
* @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:-UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=128m
|
||||
@ -40,6 +41,7 @@
|
||||
|
||||
/* @test id=ccp_off
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "64"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.xml
|
||||
* @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:-UseCompressedClassPointers -Xlog:metaspace* -Xmx6g -Xms128m
|
||||
@ -47,6 +49,7 @@
|
||||
|
||||
/* @test id=use-zero-based-encoding-coh
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "64"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.xml
|
||||
* @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:+UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=128m
|
||||
@ -54,7 +57,20 @@
|
||||
|
||||
/* @test id=use-zero-based-encoding-coh-large-class-space
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "64"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.xml
|
||||
* @run main/native GTestWrapper --gtest_filter=CompressedKlass* -XX:+UseCompactObjectHeaders -Xlog:metaspace* -Xmx6g -Xms128m -Xshare:off -XX:CompressedClassSpaceSize=4g
|
||||
*/
|
||||
|
||||
/* Very basic test on 32-bit, where we only support a pro-forma Compressed Class Pointers implementation without
|
||||
* class space.
|
||||
*/
|
||||
|
||||
/* @test id=32-bit
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "32"
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.xml
|
||||
* @run main/native GTestWrapper --gtest_filter=CompressedKlass* -Xlog:metaspace* -Xmx128m -Xms128m -Xshare:off
|
||||
*/
|
||||
|
||||
@ -23,14 +23,26 @@
|
||||
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @test id=coh-off
|
||||
* @summary Test that we see VM configs reported correctly in hs_err file
|
||||
* @library /test/lib
|
||||
* @requires vm.flagless
|
||||
* @requires vm.debug
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run driver TestVMConfigInHsErrFile
|
||||
* @run driver TestVMConfigInHsErrFile coh-off
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=coh-on
|
||||
* @summary Test that we see VM configs reported correctly in hs_err file
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == "64"
|
||||
* @requires vm.flagless
|
||||
* @requires vm.debug
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @run driver TestVMConfigInHsErrFile coh-on
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
@ -42,8 +54,10 @@ import java.util.regex.Pattern;
|
||||
public class TestVMConfigInHsErrFile {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
testCompactObjectHeaders();
|
||||
testCompressedClassPointers();
|
||||
switch (args[0]) {
|
||||
case "coh-on" -> testCompactObjectHeaders();
|
||||
case "coh-off" -> testCompressedClassPointers();
|
||||
}
|
||||
}
|
||||
|
||||
private static void testCompactObjectHeaders() throws Exception {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user