mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8350770: [BACKOUT] Protection zone for easier detection of accidental zero-nKlass use
Reviewed-by: mdoerr, rkennke
This commit is contained in:
parent
bd112c4fab
commit
3e46480dcf
@ -153,6 +153,7 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
|
||||
ArchiveBuilder::ArchiveBuilder() :
|
||||
_current_dump_region(nullptr),
|
||||
_buffer_bottom(nullptr),
|
||||
_num_dump_regions_used(0),
|
||||
_requested_static_archive_bottom(nullptr),
|
||||
_requested_static_archive_top(nullptr),
|
||||
_requested_dynamic_archive_bottom(nullptr),
|
||||
@ -160,7 +161,6 @@ ArchiveBuilder::ArchiveBuilder() :
|
||||
_mapped_static_archive_bottom(nullptr),
|
||||
_mapped_static_archive_top(nullptr),
|
||||
_buffer_to_requested_delta(0),
|
||||
_pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
|
||||
_rw_region("rw", MAX_SHARED_DELTA),
|
||||
_ro_region("ro", MAX_SHARED_DELTA),
|
||||
_ptrmap(mtClassShared),
|
||||
@ -323,14 +323,9 @@ address ArchiveBuilder::reserve_buffer() {
|
||||
_shared_rs = rs;
|
||||
|
||||
_buffer_bottom = buffer_bottom;
|
||||
|
||||
if (CDSConfig::is_dumping_static_archive()) {
|
||||
_current_dump_region = &_pz_region;
|
||||
_current_dump_region->init(&_shared_rs, &_shared_vs);
|
||||
} else {
|
||||
_current_dump_region = &_rw_region;
|
||||
_current_dump_region->init(&_shared_rs, &_shared_vs);
|
||||
}
|
||||
_current_dump_region = &_rw_region;
|
||||
_num_dump_regions_used = 1;
|
||||
_current_dump_region->init(&_shared_rs, &_shared_vs);
|
||||
|
||||
ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
|
||||
|
||||
@ -371,8 +366,7 @@ address ArchiveBuilder::reserve_buffer() {
|
||||
if (CDSConfig::is_dumping_static_archive()) {
|
||||
// We don't want any valid object to be at the very bottom of the archive.
|
||||
// See ArchivePtrMarker::mark_pointer().
|
||||
_pz_region.allocate(MetaspaceShared::protection_zone_size());
|
||||
start_dump_region(&_rw_region);
|
||||
rw_region()->allocate(16);
|
||||
}
|
||||
|
||||
return buffer_bottom;
|
||||
@ -550,6 +544,7 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
|
||||
void ArchiveBuilder::start_dump_region(DumpRegion* next) {
|
||||
current_dump_region()->pack(next);
|
||||
_current_dump_region = next;
|
||||
_num_dump_regions_used ++;
|
||||
}
|
||||
|
||||
char* ArchiveBuilder::ro_strdup(const char* s) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -96,6 +96,7 @@ class ArchiveBuilder : public StackObj {
|
||||
protected:
|
||||
DumpRegion* _current_dump_region;
|
||||
address _buffer_bottom; // for writing the contents of rw/ro regions
|
||||
int _num_dump_regions_used;
|
||||
|
||||
// These are the addresses where we will request the static and dynamic archives to be
|
||||
// mapped at run time. If the request fails (due to ASLR), we will map the archives at
|
||||
@ -209,12 +210,6 @@ private:
|
||||
ReservedSpace _shared_rs;
|
||||
VirtualSpace _shared_vs;
|
||||
|
||||
// The "pz" region is used only during static dumps to reserve an unused space between SharedBaseAddress and
|
||||
// the bottom of the rw region. During runtime, this space will be filled with a reserved area that disallows
|
||||
// read/write/exec, so we can track for bad CompressedKlassPointers encoding.
|
||||
// Note: this region does NOT exist in the cds archive.
|
||||
DumpRegion _pz_region;
|
||||
|
||||
DumpRegion _rw_region;
|
||||
DumpRegion _ro_region;
|
||||
|
||||
@ -275,6 +270,9 @@ private:
|
||||
|
||||
protected:
|
||||
virtual void iterate_roots(MetaspaceClosure* it) = 0;
|
||||
|
||||
static const int _total_dump_regions = 2;
|
||||
|
||||
void start_dump_region(DumpRegion* next);
|
||||
|
||||
public:
|
||||
@ -369,7 +367,6 @@ public:
|
||||
void remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref);
|
||||
static void serialize_dynamic_archivable_items(SerializeClosure* soc);
|
||||
|
||||
DumpRegion* pz_region() { return &_pz_region; }
|
||||
DumpRegion* rw_region() { return &_rw_region; }
|
||||
DumpRegion* ro_region() { return &_ro_region; }
|
||||
|
||||
|
||||
@ -73,39 +73,34 @@ void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
|
||||
}
|
||||
|
||||
void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) {
|
||||
address* buff_bottom = (address*)ArchiveBuilder::current()->buffer_bottom();
|
||||
address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
|
||||
address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
|
||||
|
||||
// The bit in _ptrmap that cover the very first word in the rw/ro regions.
|
||||
size_t rw_start = rw_bottom - buff_bottom;
|
||||
size_t ro_start = ro_bottom - buff_bottom;
|
||||
|
||||
// The number of bits used by the rw/ro ptrmaps. We might have lots of zero
|
||||
// bits at the bottom and top of rrw/ro ptrmaps, but these zeros will be
|
||||
// removed by FileMapInfo::write_bitmap_region().
|
||||
size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address);
|
||||
size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address);
|
||||
|
||||
// The last (exclusive) bit in _ptrmap that covers the rw/ro regions.
|
||||
// Note: _ptrmap is dynamically expanded only when an actual pointer is written, so
|
||||
// it may not be as large as we want.
|
||||
size_t rw_end = MIN2<size_t>(rw_start + rw_size, _ptrmap->size());
|
||||
size_t ro_end = MIN2<size_t>(ro_start + ro_size, _ptrmap->size());
|
||||
|
||||
rw_ptrmap->initialize(rw_size);
|
||||
ro_ptrmap->initialize(ro_size);
|
||||
|
||||
for (size_t rw_bit = rw_start; rw_bit < rw_end; rw_bit++) {
|
||||
rw_ptrmap->at_put(rw_bit - rw_start, _ptrmap->at(rw_bit));
|
||||
}
|
||||
|
||||
for(size_t ro_bit = ro_start; ro_bit < ro_end; ro_bit++) {
|
||||
ro_ptrmap->at_put(ro_bit - ro_start, _ptrmap->at(ro_bit));
|
||||
}
|
||||
address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
|
||||
address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
|
||||
|
||||
_rw_ptrmap = rw_ptrmap;
|
||||
_ro_ptrmap = ro_ptrmap;
|
||||
|
||||
size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address);
|
||||
size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address);
|
||||
// ro_start is the first bit in _ptrmap that covers the pointer that would sit at ro_bottom.
|
||||
// E.g., if rw_bottom = (address*)100
|
||||
// ro_bottom = (address*)116
|
||||
// then for 64-bit platform:
|
||||
// ro_start = ro_bottom - rw_bottom = (116 - 100) / sizeof(address) = 2;
|
||||
size_t ro_start = ro_bottom - rw_bottom;
|
||||
|
||||
// Note: ptrmap is big enough only to cover the last pointer in ro_region.
|
||||
// See ArchivePtrMarker::compact()
|
||||
_rw_ptrmap->initialize(rw_size);
|
||||
_ro_ptrmap->initialize(_ptrmap->size() - ro_start);
|
||||
|
||||
for (size_t rw_bit = 0; rw_bit < _rw_ptrmap->size(); rw_bit++) {
|
||||
_rw_ptrmap->at_put(rw_bit, _ptrmap->at(rw_bit));
|
||||
}
|
||||
|
||||
for(size_t ro_bit = ro_start; ro_bit < _ptrmap->size(); ro_bit++) {
|
||||
_ro_ptrmap->at_put(ro_bit-ro_start, _ptrmap->at(ro_bit));
|
||||
}
|
||||
assert(_ptrmap->size() - ro_start == _ro_ptrmap->size(), "must be");
|
||||
}
|
||||
|
||||
void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
|
||||
|
||||
@ -170,6 +170,7 @@ public:
|
||||
|
||||
post_dump();
|
||||
|
||||
assert(_num_dump_regions_used == _total_dump_regions, "must be");
|
||||
verify_universe("After CDS dynamic dump");
|
||||
}
|
||||
|
||||
|
||||
@ -400,7 +400,7 @@ public:
|
||||
// The offset of the (exclusive) end of the last core region in this archive, relative to SharedBaseAddress
|
||||
size_t mapping_end_offset() const { return last_core_region()->mapping_end_offset(); }
|
||||
|
||||
char* mapped_base() const { return header()->mapped_base_address(); }
|
||||
char* mapped_base() const { return first_core_region()->mapped_base(); }
|
||||
char* mapped_end() const { return last_core_region()->mapped_end(); }
|
||||
|
||||
// Non-zero if the archive needs to be mapped a non-default location due to ASLR.
|
||||
|
||||
@ -147,10 +147,6 @@ size_t MetaspaceShared::core_region_alignment() {
|
||||
return os::cds_core_region_alignment();
|
||||
}
|
||||
|
||||
size_t MetaspaceShared::protection_zone_size() {
|
||||
return os::cds_core_region_alignment();
|
||||
}
|
||||
|
||||
static bool shared_base_valid(char* shared_base) {
|
||||
// We check user input for SharedBaseAddress at dump time.
|
||||
|
||||
@ -1284,7 +1280,6 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
|
||||
ReservedSpace total_space_rs, archive_space_rs, class_space_rs;
|
||||
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
|
||||
size_t prot_zone_size = 0;
|
||||
char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo,
|
||||
dynamic_mapinfo,
|
||||
use_requested_addr,
|
||||
@ -1296,29 +1291,14 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
|
||||
} else {
|
||||
|
||||
if (Metaspace::using_class_space()) {
|
||||
prot_zone_size = protection_zone_size();
|
||||
#ifdef ASSERT
|
||||
// Before mapping the core regions into the newly established address space, we mark
|
||||
// start and the end of the future protection zone with canaries. That way we easily
|
||||
// catch mapping errors (accidentally mapping data into the future protection zone).
|
||||
os::commit_memory(mapped_base_address, prot_zone_size, false);
|
||||
*(mapped_base_address) = 'P';
|
||||
*(mapped_base_address + prot_zone_size - 1) = 'P';
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// Some sanity checks after reserving address spaces for archives
|
||||
// and class space.
|
||||
assert(archive_space_rs.is_reserved(), "Sanity");
|
||||
if (Metaspace::using_class_space()) {
|
||||
assert(archive_space_rs.base() == mapped_base_address &&
|
||||
archive_space_rs.size() > protection_zone_size(),
|
||||
"Archive space must lead and include the protection zone");
|
||||
// Class space must closely follow the archive space. Both spaces
|
||||
// must be aligned correctly.
|
||||
assert(class_space_rs.is_reserved() && class_space_rs.size() > 0,
|
||||
assert(class_space_rs.is_reserved(),
|
||||
"A class space should have been reserved");
|
||||
assert(class_space_rs.base() >= archive_space_rs.end(),
|
||||
"class space should follow the cds archive space");
|
||||
@ -1331,9 +1311,8 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes%s",
|
||||
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size(),
|
||||
(prot_zone_size > 0 ? " (includes protection zone)" : ""));
|
||||
log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes",
|
||||
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
|
||||
log_info(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes",
|
||||
p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
|
||||
|
||||
@ -1405,40 +1384,38 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
if (result == MAP_ARCHIVE_SUCCESS) {
|
||||
SharedBaseAddress = (size_t)mapped_base_address;
|
||||
#ifdef _LP64
|
||||
if (Metaspace::using_class_space()) {
|
||||
assert(*(mapped_base_address) == 'P' &&
|
||||
*(mapped_base_address + prot_zone_size - 1) == 'P',
|
||||
"Protection zone was overwritten?");
|
||||
if (Metaspace::using_class_space()) {
|
||||
// Set up ccs in metaspace.
|
||||
Metaspace::initialize_class_space(class_space_rs);
|
||||
|
||||
// Set up ccs in metaspace.
|
||||
Metaspace::initialize_class_space(class_space_rs);
|
||||
|
||||
// Set up compressed Klass pointer encoding: the encoding range must
|
||||
// cover both archive and class space.
|
||||
const address encoding_base = (address)mapped_base_address;
|
||||
const address klass_range_start = encoding_base + prot_zone_size;
|
||||
const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start;
|
||||
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
|
||||
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
|
||||
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
|
||||
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
|
||||
//
|
||||
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
|
||||
// mapping start (including protection zone), shift should be the shift used at archive generation time.
|
||||
CompressedKlassPointers::initialize_for_given_encoding(
|
||||
klass_range_start, klass_range_size,
|
||||
encoding_base, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
|
||||
);
|
||||
} else {
|
||||
// Let JVM freely chose encoding base and shift
|
||||
CompressedKlassPointers::initialize(klass_range_start, klass_range_size);
|
||||
}
|
||||
CompressedKlassPointers::establish_protection_zone(encoding_base, prot_zone_size);
|
||||
|
||||
// map_or_load_heap_region() compares the current narrow oop and klass encodings
|
||||
// with the archived ones, so it must be done after all encodings are determined.
|
||||
static_mapinfo->map_or_load_heap_region();
|
||||
}
|
||||
// Set up compressed Klass pointer encoding: the encoding range must
|
||||
// cover both archive and class space.
|
||||
address cds_base = (address)static_mapinfo->mapped_base();
|
||||
address ccs_end = (address)class_space_rs.end();
|
||||
assert(ccs_end > cds_base, "Sanity check");
|
||||
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
|
||||
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
|
||||
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
|
||||
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
|
||||
//
|
||||
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
|
||||
// mapping start, shift the shift used at archive generation time.
|
||||
address precomputed_narrow_klass_base = cds_base;
|
||||
const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
|
||||
CompressedKlassPointers::initialize_for_given_encoding(
|
||||
cds_base, ccs_end - cds_base, // Klass range
|
||||
precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder
|
||||
);
|
||||
} else {
|
||||
// Let JVM freely chose encoding base and shift
|
||||
CompressedKlassPointers::initialize (
|
||||
cds_base, ccs_end - cds_base // Klass range
|
||||
);
|
||||
}
|
||||
// map_or_load_heap_region() compares the current narrow oop and klass encodings
|
||||
// with the archived ones, so it must be done after all encodings are determined.
|
||||
static_mapinfo->map_or_load_heap_region();
|
||||
}
|
||||
#endif // _LP64
|
||||
log_info(cds)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled");
|
||||
log_info(cds)("initial full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled");
|
||||
@ -1520,6 +1497,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
const size_t archive_space_alignment = core_region_alignment();
|
||||
|
||||
// Size and requested location of the archive_space_rs (for both static and dynamic archives)
|
||||
assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
|
||||
size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
|
||||
size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
|
||||
|
||||
|
||||
@ -139,7 +139,6 @@ public:
|
||||
// Alignment for the 2 core CDS regions (RW/RO) only.
|
||||
// (Heap region alignments are decided by GC).
|
||||
static size_t core_region_alignment();
|
||||
static size_t protection_zone_size();
|
||||
static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
|
||||
// print loaded classes names to file.
|
||||
static void dump_loaded_classes(const char* file_name, TRAPS);
|
||||
|
||||
@ -70,10 +70,7 @@ typedef struct CDSFileMapRegion {
|
||||
size_t _ptrmap_offset; // Bitmap for relocating native pointer fields in archived heap objects.
|
||||
// (The base address is the bottom of the BM region).
|
||||
size_t _ptrmap_size_in_bits;
|
||||
char* _mapped_base; // Actually mapped address used for mapping the core regions. At that address the
|
||||
// zero nklass protection zone is established; following that (at offset
|
||||
// MetaspaceShared::protection_zone_size()) the lowest core region (rw for the
|
||||
// static archive) is is mapped.
|
||||
char* _mapped_base; // Actually mapped address (null if this region is not mapped).
|
||||
bool _in_reserved_space; // Is this region in a ReservedSpace
|
||||
} CDSFileMapRegion;
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2023, 2025, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,6 @@
|
||||
#include "memory/metaspace/chunkManager.hpp"
|
||||
#include "memory/metaspace/commitLimiter.hpp"
|
||||
#include "memory/metaspace/internalStats.hpp"
|
||||
#include "memory/metaspace/metachunk.hpp"
|
||||
#include "memory/metaspace/metaspaceCommon.hpp"
|
||||
#include "memory/metaspace/metaspaceContext.hpp"
|
||||
#include "memory/metaspace/metaspaceReporter.hpp"
|
||||
@ -805,37 +804,29 @@ void Metaspace::global_initialize() {
|
||||
// Set up compressed class pointer encoding.
|
||||
// In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination.
|
||||
CompressedKlassPointers::initialize((address)rs.base(), rs.size());
|
||||
|
||||
// After narrowKlass encoding scheme is decided: if the encoding base points to class space start,
|
||||
// establish a protection zone. Accidentally decoding a zero nKlass ID and then using it will result
|
||||
// in an immediate segmentation fault instead of a delayed error much later.
|
||||
if (CompressedKlassPointers::base() == (address)rs.base()) {
|
||||
// Let the protection zone be a whole commit granule. Otherwise, buddy allocator may later place neighboring
|
||||
// chunks in the same granule, see that the granule is not yet committed, and commit it, which would replace
|
||||
// the protection mapping and make the zone readable.
|
||||
// Alternatively, we could commit the chunk right now, but that is a tiny bit more fiddly, since we are not
|
||||
// fully set up yet at this point.
|
||||
const size_t protzone_size = metaspace::Settings::commit_granule_bytes(); // granule size >= page size
|
||||
const size_t protzone_wordsize = protzone_size / BytesPerWord;
|
||||
const metaspace::chunklevel_t lvl = metaspace::chunklevel::level_fitting_word_size(protzone_wordsize);
|
||||
metaspace::Metachunk* const chunk = MetaspaceContext::context_class()->cm()->get_chunk(lvl);
|
||||
const address protzone = (address) chunk->base();
|
||||
assert(protzone == (address)rs.base(), "The very first chunk should be located at the class space start?");
|
||||
assert(chunk->word_size() == protzone_wordsize, "Weird chunk size");
|
||||
CompressedKlassPointers::establish_protection_zone(protzone, protzone_size);
|
||||
} else {
|
||||
assert(CompressedKlassPointers::base() == nullptr, "Zero-based encoding expected");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
#endif
|
||||
|
||||
// Initialize non-class virtual space list, and its chunk manager:
|
||||
MetaspaceContext::initialize_nonclass_space_context();
|
||||
|
||||
_tracer = new MetaspaceTracer();
|
||||
|
||||
// We must prevent the very first address of the ccs from being used to store
|
||||
// metadata, since that address would translate to a narrow pointer of 0, and the
|
||||
// VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start
|
||||
// of ccs".
|
||||
// Before Elastic Metaspace that did not happen due to the fact that every Metachunk
|
||||
// had a header and therefore could not allocate anything at offset 0.
|
||||
#ifdef _LP64
|
||||
if (using_class_space()) {
|
||||
// The simplest way to fix this is to allocate a tiny dummy chunk right at the
|
||||
// start of ccs and do not use it for anything.
|
||||
MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _LP64
|
||||
if (UseCompressedClassPointers) {
|
||||
// Note: "cds" would be a better fit but keep this for backward compatibility.
|
||||
|
||||
@ -42,7 +42,6 @@ address CompressedKlassPointers::_klass_range_start = nullptr;
|
||||
address CompressedKlassPointers::_klass_range_end = nullptr;
|
||||
narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
|
||||
narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
|
||||
size_t CompressedKlassPointers::_protection_zone_size = 0;
|
||||
|
||||
#ifdef _LP64
|
||||
|
||||
@ -163,6 +162,11 @@ void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t
|
||||
vm_exit_during_initialization(ss.base());
|
||||
}
|
||||
|
||||
// Note: While it would be technically valid for the encoding base to precede the start of the Klass range,
|
||||
// we never do this here. This is used at CDS runtime to re-instate the scheme used to precompute the
|
||||
// narrow Klass IDs in the archive, and the requested base should point to the start of the Klass range.
|
||||
assert(requested_base == addr, "Invalid requested base");
|
||||
|
||||
// Remember Klass range:
|
||||
_klass_range_start = addr;
|
||||
_klass_range_end = addr + len;
|
||||
@ -300,32 +304,9 @@ void CompressedKlassPointers::print_mode(outputStream* st) {
|
||||
st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end));
|
||||
st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1,
|
||||
_highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id);
|
||||
if (_protection_zone_size > 0) {
|
||||
st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size));
|
||||
} else {
|
||||
st->print_cr("No protection zone.");
|
||||
}
|
||||
} else {
|
||||
st->print_cr("UseCompressedClassPointers off");
|
||||
}
|
||||
}
|
||||
|
||||
// Protect a zone a the start of the encoding range
|
||||
void CompressedKlassPointers::establish_protection_zone(address addr, size_t size) {
|
||||
assert(_protection_zone_size == 0, "just once");
|
||||
assert(addr == base(), "Protection zone not at start of encoding range?");
|
||||
assert(size > 0 && is_aligned(size, os::vm_page_size()), "Protection zone not page sized");
|
||||
const bool rc = os::protect_memory((char*)addr, size, os::MEM_PROT_NONE, false);
|
||||
assert(rc, "Failed to protect the Class space protection zone");
|
||||
log_info(metaspace)("%s Narrow Klass Protection zone " RANGEFMT,
|
||||
(rc ? "Established" : "FAILED to establish "),
|
||||
RANGEFMTARGS(addr, size));
|
||||
_protection_zone_size = size;
|
||||
}
|
||||
|
||||
bool CompressedKlassPointers::is_in_protection_zone(address addr) {
|
||||
return _protection_zone_size > 0 ?
|
||||
(addr >= base() && addr < base() + _protection_zone_size) : false;
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -134,8 +134,6 @@ class CompressedKlassPointers : public AllStatic {
|
||||
static narrowKlass _lowest_valid_narrow_klass_id;
|
||||
static narrowKlass _highest_valid_narrow_klass_id;
|
||||
|
||||
// Protection zone size (0 if not set up)
|
||||
static size_t _protection_zone_size;
|
||||
|
||||
// Helper function for common cases.
|
||||
static char* reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr);
|
||||
@ -233,7 +231,6 @@ public:
|
||||
static bool is_null(narrowKlass v) { return v == 0; }
|
||||
|
||||
// Versions without asserts
|
||||
static inline Klass* decode_not_null_without_asserts(narrowKlass v);
|
||||
static inline Klass* decode_without_asserts(narrowKlass v);
|
||||
static inline Klass* decode_not_null(narrowKlass v);
|
||||
static inline Klass* decode(narrowKlass v);
|
||||
@ -261,12 +258,6 @@ public:
|
||||
is_aligned(addr, klass_alignment_in_bytes());
|
||||
}
|
||||
|
||||
// Protect a zone a the start of the encoding range
|
||||
static void establish_protection_zone(address addr, size_t size);
|
||||
|
||||
// Returns true if address points into protection zone (for error reporting)
|
||||
static bool is_in_protection_zone(address addr);
|
||||
|
||||
#if defined(AARCH64) && !defined(ZERO)
|
||||
// Check that with the given base, shift and range, aarch64 code can encode and decode the klass pointer.
|
||||
static bool check_klass_decode_mode(address base, int shift, const size_t range);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -40,10 +40,6 @@ inline narrowKlass CompressedKlassPointers::encode_not_null_without_asserts(Klas
|
||||
return (narrowKlass)(pointer_delta(k, narrow_base, 1) >> shift);
|
||||
}
|
||||
|
||||
inline Klass* CompressedKlassPointers::decode_not_null_without_asserts(narrowKlass v) {
|
||||
return decode_not_null_without_asserts(v, base(), shift());
|
||||
}
|
||||
|
||||
inline Klass* CompressedKlassPointers::decode_without_asserts(narrowKlass v) {
|
||||
return is_null(v) ? nullptr : decode_not_null_without_asserts(v, base(), shift());
|
||||
}
|
||||
|
||||
@ -325,17 +325,6 @@ WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
|
||||
*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ));
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_DecodeNKlassAndAccessKlass(JNIEnv* env, jobject o, jint nKlass))
|
||||
assert(UseCompressedClassPointers, "Should only call for UseCompressedClassPointers");
|
||||
const narrowKlass nk = (narrowKlass)nKlass;
|
||||
const Klass* const k = CompressedKlassPointers::decode_not_null_without_asserts(nKlass);
|
||||
printf("WB_DecodeNKlassAndAccessKlass: nk %u k " PTR_FORMAT "\n", nk, p2i(k));
|
||||
printf("Will attempt to crash now...\n");
|
||||
fflush(stdout); // flush now - we will crash below
|
||||
// Access k by calling a virtual function - will result in loading the vtable from *k
|
||||
k->print_on(tty);
|
||||
WB_END
|
||||
|
||||
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
|
||||
size_t magnitude, size_t iterations) {
|
||||
size_t granularity = os::vm_allocation_granularity();
|
||||
@ -2744,7 +2733,6 @@ static JNINativeMethod methods[] = {
|
||||
(void*)&WB_GetCompressedOopsMaxHeapSize},
|
||||
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
|
||||
{CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea},
|
||||
{CC"decodeNKlassAndAccessKlass",CC"(I)V", (void*)&WB_DecodeNKlassAndAccessKlass},
|
||||
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
|
||||
#if INCLUDE_CDS
|
||||
{CC"getCDSOffsetForName0", CC"(Ljava/lang/String;)I", (void*)&WB_GetCDSOffsetForName},
|
||||
|
||||
@ -1291,12 +1291,6 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
|
||||
|
||||
bool accessible = is_readable_pointer(addr);
|
||||
|
||||
// Check if addr points into the narrow Klass protection zone
|
||||
if (UseCompressedClassPointers && CompressedKlassPointers::is_in_protection_zone(addr)) {
|
||||
st->print_cr(PTR_FORMAT " points into nKlass protection zone", p2i(addr));
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if addr is a JNI handle.
|
||||
if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) {
|
||||
if (JNIHandles::is_global_handle((jobject) addr)) {
|
||||
|
||||
@ -1,203 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Red Hat, Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=no_coh_no_cds
|
||||
* @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone
|
||||
* @library /test/lib
|
||||
* @requires vm.bits == 64 & vm.debug == true
|
||||
* @requires vm.flagless
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build jdk.test.whitebox.WhiteBox
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
|
||||
* @run driver AccessZeroNKlassHitsProtectionZone no_coh_no_cds
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=no_coh_cds
|
||||
* @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone
|
||||
* @requires vm.bits == 64 & vm.debug == true & vm.flagless
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build jdk.test.whitebox.WhiteBox
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
|
||||
* @run driver AccessZeroNKlassHitsProtectionZone no_coh_cds
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=coh_no_cds
|
||||
* @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone
|
||||
* @requires vm.bits == 64 & vm.debug == true & vm.flagless
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build jdk.test.whitebox.WhiteBox
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
|
||||
* @run driver AccessZeroNKlassHitsProtectionZone coh_no_cds
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test id=coh_cds
|
||||
* @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone
|
||||
* @requires vm.bits == 64 & vm.debug == true & vm.flagless
|
||||
* @library /test/lib
|
||||
* @modules java.base/jdk.internal.misc
|
||||
* java.management
|
||||
* @build jdk.test.whitebox.WhiteBox
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
|
||||
* @run driver AccessZeroNKlassHitsProtectionZone coh_cds
|
||||
*/
|
||||
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
import jdk.test.whitebox.WhiteBox;
|
||||
import jtreg.SkippedException;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
// Test that dereferencing a Klass that is the result of a narrowKlass=0 will give us immediate crashes
|
||||
// that hit the protection zone at encoding base.
|
||||
public class AccessZeroNKlassHitsProtectionZone {
|
||||
|
||||
private static OutputAnalyzer run_test(boolean COH, boolean CDS, String forceBaseString) throws IOException, SkippedException {
|
||||
ArrayList<String> args = new ArrayList<>();
|
||||
args.add("-Xbootclasspath/a:.");
|
||||
args.add("-XX:+UnlockDiagnosticVMOptions");
|
||||
args.add("-XX:+WhiteBoxAPI");
|
||||
args.add("-XX:CompressedClassSpaceSize=128m");
|
||||
args.add("-Xmx128m");
|
||||
args.add("-XX:-CreateCoredumpOnCrash");
|
||||
args.add("-Xlog:metaspace*");
|
||||
if (COH) {
|
||||
args.add("-XX:+UnlockExperimentalVMOptions");
|
||||
args.add("-XX:+UseCompactObjectHeaders");
|
||||
}
|
||||
if (CDS) {
|
||||
args.add("-Xshare:on");
|
||||
} else {
|
||||
args.add("-Xshare:off");
|
||||
args.add("-XX:CompressedClassSpaceBaseAddress=" + forceBaseString);
|
||||
}
|
||||
args.add(AccessZeroNKlassHitsProtectionZone.class.getName());
|
||||
args.add("runwb");
|
||||
ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(args.toArray(new String[0]));
|
||||
|
||||
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||
output.reportDiagnosticSummary();
|
||||
return output;
|
||||
}
|
||||
|
||||
private static void run_test(boolean COH, boolean CDS) throws IOException, SkippedException {
|
||||
// Notes:
|
||||
// We want to enforce zero-based encoding, to test the protection page in that case. For zero-based encoding,
|
||||
// protection page is at address zero, no need to test that.
|
||||
// If CDS is on, we never use zero-based, forceBase is ignored.
|
||||
// If CDS is off, we use forceBase to (somewhat) reliably force the encoding base to beyond 32G,
|
||||
// in order to prevent zero-based encoding. Since that may fail, we try several times.
|
||||
OutputAnalyzer output = null;
|
||||
long forceBase = -1;
|
||||
if (CDS) {
|
||||
output = run_test(COH, CDS, "");
|
||||
} else {
|
||||
long g4 = 0x1_0000_0000L;
|
||||
long start = g4 * 8; // 32g
|
||||
long step = g4;
|
||||
long end = start + step * 16;
|
||||
for (forceBase = start; forceBase < end; forceBase += step) {
|
||||
String thisBaseString = String.format("0x%016X", forceBase).toLowerCase();
|
||||
output = run_test(COH, CDS, thisBaseString);
|
||||
if (output.contains("CompressedClassSpaceBaseAddress=" + thisBaseString + " given, but reserving class space failed.")) {
|
||||
// try next one
|
||||
} else if (output.contains("Successfully forced class space address to " + thisBaseString)) {
|
||||
break;
|
||||
} else {
|
||||
throw new RuntimeException("Unexpected");
|
||||
}
|
||||
}
|
||||
if (forceBase >= end) {
|
||||
throw new SkippedException("Failed to force ccs to any of the given bases. Skipping test.");
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the encoding base from the output. In case of CDS, it depends on ASLR. Even in case of CDS=off, we want
|
||||
// to double-check it is the force address.
|
||||
String nKlassBaseString = output.firstMatch("Narrow klass base: 0x([0-9a-f]+)", 1);
|
||||
if (nKlassBaseString == null) {
|
||||
throw new RuntimeException("did not find Narrow klass base in log output");
|
||||
}
|
||||
long nKlassBase = Long.valueOf(nKlassBaseString, 16);
|
||||
|
||||
if (!CDS && nKlassBase != forceBase) {
|
||||
throw new RuntimeException("Weird - we should have mapped at force base"); // .. otherwise we would have skipped out above
|
||||
}
|
||||
if (nKlassBase == 0) {
|
||||
throw new RuntimeException("We should not be running zero-based at this point.");
|
||||
}
|
||||
|
||||
// Calculate the expected crash address pattern. The precise crash address is unknown, but should be located
|
||||
// in the lower part of the guard page following the encoding base. We just accept any address matching the
|
||||
// upper 52 digits (leaving 4K = 12 bits = 4 nibbles of wiggle room)
|
||||
String expectedCrashAddressString = nKlassBaseString.substring(0, nKlassBaseString.length() - 3);
|
||||
|
||||
// output from whitebox function: Klass* should point to encoding base
|
||||
output.shouldMatch("WB_DecodeNKlassAndAccessKlass: nk 0 k 0x" + nKlassBaseString);
|
||||
|
||||
// Then, we should have crashed
|
||||
output.shouldNotHaveExitValue(0);
|
||||
output.shouldContain("# A fatal error has been detected");
|
||||
|
||||
// The hs-err file should contain a reference to the nKlass protection zone, like this:
|
||||
// "RDI=0x0000000800000000 points into nKlass protection zone"
|
||||
File hsErrFile = HsErrFileUtils.openHsErrFileFromOutput(output);
|
||||
|
||||
ArrayList<Pattern> hsErrPatternList = new ArrayList<>();
|
||||
hsErrPatternList.add(Pattern.compile(".*(SIGBUS|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).*"));
|
||||
|
||||
hsErrPatternList.add(Pattern.compile(".*siginfo:.*" + expectedCrashAddressString + ".*"));
|
||||
hsErrPatternList.add(Pattern.compile(".*" + expectedCrashAddressString + ".*points into nKlass protection zone.*"));
|
||||
Pattern[] hsErrPattern = hsErrPatternList.toArray(new Pattern[0]);
|
||||
HsErrFileUtils.checkHsErrFileContent(hsErrFile, hsErrPattern, true);
|
||||
}
|
||||
|
||||
enum Argument { runwb, no_coh_no_cds, no_coh_cds, coh_no_cds, coh_cds };
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args.length != 1) {
|
||||
throw new RuntimeException("Expecting one argument");
|
||||
}
|
||||
Argument arg = Argument.valueOf(args[0]);
|
||||
System.out.println(arg);
|
||||
switch (arg) {
|
||||
case runwb -> WhiteBox.getWhiteBox().decodeNKlassAndAccessKlass(0);
|
||||
case no_coh_no_cds -> run_test(false, false);
|
||||
case no_coh_cds -> run_test(false, true);
|
||||
case coh_no_cds -> run_test(true, false);
|
||||
case coh_cds -> run_test(true, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -642,8 +642,6 @@ public class WhiteBox {
|
||||
// Tests on ReservedSpace/VirtualSpace classes
|
||||
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
|
||||
public native void readFromNoaccessArea();
|
||||
|
||||
public native void decodeNKlassAndAccessKlass(int nKlass);
|
||||
public native long getThreadStackSize();
|
||||
public native long getThreadRemainingStackSize();
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user