From 59629f88e6fad9c1ff91be4cfea83f78f0ea503c Mon Sep 17 00:00:00 2001 From: Thomas Stuefe Date: Sun, 30 Mar 2025 16:42:38 +0000 Subject: [PATCH] 8351040: [REDO] Protection zone for easier detection of accidental zero-nKlass use Reviewed-by: mbaesken, iklam --- src/hotspot/share/cds/archiveBuilder.cpp | 14 +- src/hotspot/share/cds/archiveBuilder.hpp | 13 +- src/hotspot/share/cds/archiveUtils.cpp | 55 +++-- src/hotspot/share/cds/dynamicArchive.cpp | 1 - src/hotspot/share/cds/filemap.hpp | 2 +- src/hotspot/share/cds/metaspaceShared.cpp | 111 ++++++--- src/hotspot/share/cds/metaspaceShared.hpp | 1 + src/hotspot/share/include/cds.h | 5 +- src/hotspot/share/memory/metaspace.cpp | 41 ++-- src/hotspot/share/oops/compressedKlass.cpp | 37 ++- src/hotspot/share/oops/compressedKlass.hpp | 11 +- .../share/oops/compressedKlass.inline.hpp | 6 +- src/hotspot/share/prims/whitebox.cpp | 12 + src/hotspot/share/runtime/os.cpp | 6 + .../AccessZeroNKlassHitsProtectionZone.java | 214 ++++++++++++++++++ test/lib/jdk/test/whitebox/WhiteBox.java | 4 +- 16 files changed, 436 insertions(+), 97 deletions(-) create mode 100644 test/hotspot/jtreg/runtime/ErrorHandling/AccessZeroNKlassHitsProtectionZone.java diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp index 48e843e473f..ba823afa238 100644 --- a/src/hotspot/share/cds/archiveBuilder.cpp +++ b/src/hotspot/share/cds/archiveBuilder.cpp @@ -153,7 +153,6 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) { ArchiveBuilder::ArchiveBuilder() : _current_dump_region(nullptr), _buffer_bottom(nullptr), - _num_dump_regions_used(0), _requested_static_archive_bottom(nullptr), _requested_static_archive_top(nullptr), _requested_dynamic_archive_bottom(nullptr), @@ -161,6 +160,7 @@ ArchiveBuilder::ArchiveBuilder() : _mapped_static_archive_bottom(nullptr), _mapped_static_archive_top(nullptr), _buffer_to_requested_delta(0), + _pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive. _rw_region("rw", MAX_SHARED_DELTA), _ro_region("ro", MAX_SHARED_DELTA), _ptrmap(mtClassShared), @@ -323,8 +323,12 @@ address ArchiveBuilder::reserve_buffer() { _shared_rs = rs; _buffer_bottom = buffer_bottom; - _current_dump_region = &_rw_region; - _num_dump_regions_used = 1; + + if (CDSConfig::is_dumping_static_archive()) { + _current_dump_region = &_pz_region; + } else { + _current_dump_region = &_rw_region; + } _current_dump_region->init(&_shared_rs, &_shared_vs); ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs); @@ -366,7 +370,8 @@ address ArchiveBuilder::reserve_buffer() { if (CDSConfig::is_dumping_static_archive()) { // We don't want any valid object to be at the very bottom of the archive. // See ArchivePtrMarker::mark_pointer(). - rw_region()->allocate(16); + _pz_region.allocate(MetaspaceShared::protection_zone_size()); + start_dump_region(&_rw_region); } return buffer_bottom; @@ -544,7 +549,6 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref void ArchiveBuilder::start_dump_region(DumpRegion* next) { current_dump_region()->pack(next); _current_dump_region = next; - _num_dump_regions_used ++; } char* ArchiveBuilder::ro_strdup(const char* s) { diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp index e3efedd46f1..5913ae29c78 100644 --- a/src/hotspot/share/cds/archiveBuilder.hpp +++ b/src/hotspot/share/cds/archiveBuilder.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -96,7 +96,6 @@ class ArchiveBuilder : public StackObj { protected: DumpRegion* _current_dump_region; address _buffer_bottom; // for writing the contents of rw/ro regions - int _num_dump_regions_used; // These are the addresses where we will request the static and dynamic archives to be // mapped at run time. If the request fails (due to ASLR), we will map the archives at @@ -210,6 +209,12 @@ private: ReservedSpace _shared_rs; VirtualSpace _shared_vs; + // The "pz" region is used only during static dumps to reserve an unused space between SharedBaseAddress and + // the bottom of the rw region. During runtime, this space will be filled with a reserved area that disallows + // read/write/exec, so we can track for bad CompressedKlassPointers encoding. + // Note: this region does NOT exist in the cds archive. + DumpRegion _pz_region; + DumpRegion _rw_region; DumpRegion _ro_region; @@ -270,9 +275,6 @@ private: protected: virtual void iterate_roots(MetaspaceClosure* it) = 0; - - static const int _total_dump_regions = 2; - void start_dump_region(DumpRegion* next); public: @@ -367,6 +369,7 @@ public: void remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref); static void serialize_dynamic_archivable_items(SerializeClosure* soc); + DumpRegion* pz_region() { return &_pz_region; } DumpRegion* rw_region() { return &_rw_region; } DumpRegion* ro_region() { return &_ro_region; } diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 605a19a224c..217b30e401d 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -74,34 +74,39 @@ void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) { } void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) { - address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base(); - address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base(); + address* buff_bottom = (address*)ArchiveBuilder::current()->buffer_bottom(); + address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base(); + address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base(); + + // The bit in _ptrmap that cover the very first word in the rw/ro regions. + size_t rw_start = rw_bottom - buff_bottom; + size_t ro_start = ro_bottom - buff_bottom; + + // The number of bits used by the rw/ro ptrmaps. We might have lots of zero + // bits at the bottom and top of rw/ro ptrmaps, but these zeros will be + // removed by FileMapInfo::write_bitmap_region(). + size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address); + size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address); + + // The last (exclusive) bit in _ptrmap that covers the rw/ro regions. + // Note: _ptrmap is dynamically expanded only when an actual pointer is written, so + // it may not be as large as we want. + size_t rw_end = MIN2(rw_start + rw_size, _ptrmap->size()); + size_t ro_end = MIN2(ro_start + ro_size, _ptrmap->size()); + + rw_ptrmap->initialize(rw_size); + ro_ptrmap->initialize(ro_size); + + for (size_t rw_bit = rw_start; rw_bit < rw_end; rw_bit++) { + rw_ptrmap->at_put(rw_bit - rw_start, _ptrmap->at(rw_bit)); + } + + for(size_t ro_bit = ro_start; ro_bit < ro_end; ro_bit++) { + ro_ptrmap->at_put(ro_bit - ro_start, _ptrmap->at(ro_bit)); + } _rw_ptrmap = rw_ptrmap; _ro_ptrmap = ro_ptrmap; - - size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address); - size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address); - // ro_start is the first bit in _ptrmap that covers the pointer that would sit at ro_bottom. - // E.g., if rw_bottom = (address*)100 - // ro_bottom = (address*)116 - // then for 64-bit platform: - // ro_start = ro_bottom - rw_bottom = (116 - 100) / sizeof(address) = 2; - size_t ro_start = ro_bottom - rw_bottom; - - // Note: ptrmap is big enough only to cover the last pointer in ro_region. - // See ArchivePtrMarker::compact() - _rw_ptrmap->initialize(rw_size); - _ro_ptrmap->initialize(_ptrmap->size() - ro_start); - - for (size_t rw_bit = 0; rw_bit < _rw_ptrmap->size(); rw_bit++) { - _rw_ptrmap->at_put(rw_bit, _ptrmap->at(rw_bit)); - } - - for(size_t ro_bit = ro_start; ro_bit < _ptrmap->size(); ro_bit++) { - _ro_ptrmap->at_put(ro_bit-ro_start, _ptrmap->at(ro_bit)); - } - assert(_ptrmap->size() - ro_start == _ro_ptrmap->size(), "must be"); } void ArchivePtrMarker::mark_pointer(address* ptr_loc) { diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp index 9b1e99512c6..3cf1019d9ea 100644 --- a/src/hotspot/share/cds/dynamicArchive.cpp +++ b/src/hotspot/share/cds/dynamicArchive.cpp @@ -173,7 +173,6 @@ public: post_dump(); - assert(_num_dump_regions_used == _total_dump_regions, "must be"); verify_universe("After CDS dynamic dump"); } diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp index 095faba69f3..50af87a5da7 100644 --- a/src/hotspot/share/cds/filemap.hpp +++ b/src/hotspot/share/cds/filemap.hpp @@ -399,7 +399,7 @@ public: // The offset of the (exclusive) end of the last core region in this archive, relative to SharedBaseAddress size_t mapping_end_offset() const { return last_core_region()->mapping_end_offset(); } - char* mapped_base() const { return first_core_region()->mapped_base(); } + char* mapped_base() const { return header()->mapped_base_address(); } char* mapped_end() const { return last_core_region()->mapped_end(); } // Non-zero if the archive needs to be mapped a non-default location due to ASLR. diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index 7ce423e6f87..3ca107b81ed 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -149,6 +149,10 @@ size_t MetaspaceShared::core_region_alignment() { return os::cds_core_region_alignment(); } +size_t MetaspaceShared::protection_zone_size() { + return os::cds_core_region_alignment(); +} + static bool shared_base_valid(char* shared_base) { // We check user input for SharedBaseAddress at dump time. @@ -1253,6 +1257,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File ReservedSpace total_space_rs, archive_space_rs, class_space_rs; MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE; + size_t prot_zone_size = 0; char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo, use_requested_addr, @@ -1264,14 +1269,21 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr); } else { + if (Metaspace::using_class_space()) { + prot_zone_size = protection_zone_size(); + } + #ifdef ASSERT // Some sanity checks after reserving address spaces for archives // and class space. assert(archive_space_rs.is_reserved(), "Sanity"); if (Metaspace::using_class_space()) { + assert(archive_space_rs.base() == mapped_base_address && + archive_space_rs.size() > protection_zone_size(), + "Archive space must lead and include the protection zone"); // Class space must closely follow the archive space. Both spaces // must be aligned correctly. - assert(class_space_rs.is_reserved(), + assert(class_space_rs.is_reserved() && class_space_rs.size() > 0, "A class space should have been reserved"); assert(class_space_rs.base() >= archive_space_rs.end(), "class space should follow the cds archive space"); @@ -1284,8 +1296,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File } #endif // ASSERT - log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes", - p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size()); + log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes%s", + p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size(), + (prot_zone_size > 0 ? " (includes protection zone)" : "")); log_info(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes", p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size()); @@ -1312,8 +1325,35 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File MemoryReserver::release(archive_space_rs); // Mark as not reserved archive_space_rs = {}; + // The protection zone is part of the archive: + // See comment above, the Windows way of loading CDS is to mmap the individual + // parts of the archive into the address region we just vacated. The protection + // zone will not be mapped (and, in fact, does not exist as physical region in + // the archive). Therefore, after removing the archive space above, we must + // re-reserve the protection zone part lest something else gets mapped into that + // area later. + if (prot_zone_size > 0) { + assert(prot_zone_size >= os::vm_allocation_granularity(), "must be"); // not just page size! + char* p = os::attempt_reserve_memory_at(mapped_base_address, prot_zone_size, + false, MemTag::mtClassShared); + assert(p == mapped_base_address || p == nullptr, "must be"); + if (p == nullptr) { + log_debug(cds)("Failed to re-reserve protection zone"); + return MAP_ARCHIVE_MMAP_FAILURE; + } + } } } + + if (prot_zone_size > 0) { + os::commit_memory(mapped_base_address, prot_zone_size, false); // will later be protected + // Before mapping the core regions into the newly established address space, we mark + // start and the end of the future protection zone with canaries. That way we easily + // catch mapping errors (accidentally mapping data into the future protection zone). + *(mapped_base_address) = 'P'; + *(mapped_base_address + prot_zone_size - 1) = 'P'; + } + MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs); MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ? map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE; @@ -1357,38 +1397,40 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File if (result == MAP_ARCHIVE_SUCCESS) { SharedBaseAddress = (size_t)mapped_base_address; #ifdef _LP64 - if (Metaspace::using_class_space()) { - // Set up ccs in metaspace. - Metaspace::initialize_class_space(class_space_rs); + if (Metaspace::using_class_space()) { + assert(prot_zone_size > 0 && + *(mapped_base_address) == 'P' && + *(mapped_base_address + prot_zone_size - 1) == 'P', + "Protection zone was overwritten?"); + // Set up ccs in metaspace. + Metaspace::initialize_class_space(class_space_rs); - // Set up compressed Klass pointer encoding: the encoding range must - // cover both archive and class space. - address cds_base = (address)static_mapinfo->mapped_base(); - address ccs_end = (address)class_space_rs.end(); - assert(ccs_end > cds_base, "Sanity check"); - if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { - // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: - // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) - // - every archived Klass' prototype (only if +UseCompactObjectHeaders) - // - // In order for those IDs to still be valid, we need to dictate base and shift: base should be the - // mapping start, shift the shift used at archive generation time. - address precomputed_narrow_klass_base = cds_base; - const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift(); - CompressedKlassPointers::initialize_for_given_encoding( - cds_base, ccs_end - cds_base, // Klass range - precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder - ); - } else { - // Let JVM freely chose encoding base and shift - CompressedKlassPointers::initialize ( - cds_base, ccs_end - cds_base // Klass range - ); - } - // map_or_load_heap_region() compares the current narrow oop and klass encodings - // with the archived ones, so it must be done after all encodings are determined. - static_mapinfo->map_or_load_heap_region(); - } + // Set up compressed Klass pointer encoding: the encoding range must + // cover both archive and class space. + const address encoding_base = (address)mapped_base_address; + const address klass_range_start = encoding_base + prot_zone_size; + const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start; + if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) { + // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time: + // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP) + // - every archived Klass' prototype (only if +UseCompactObjectHeaders) + // + // In order for those IDs to still be valid, we need to dictate base and shift: base should be the + // mapping start (including protection zone), shift should be the shift used at archive generation time. + CompressedKlassPointers::initialize_for_given_encoding( + klass_range_start, klass_range_size, + encoding_base, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder + ); + } else { + // Let JVM freely choose encoding base and shift + CompressedKlassPointers::initialize(klass_range_start, klass_range_size); + } + CompressedKlassPointers::establish_protection_zone(encoding_base, prot_zone_size); + + // map_or_load_heap_region() compares the current narrow oop and klass encodings + // with the archived ones, so it must be done after all encodings are determined. + static_mapinfo->map_or_load_heap_region(); + } #endif // _LP64 log_info(cds)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled"); log_info(cds)("initial full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled"); @@ -1470,7 +1512,6 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma const size_t archive_space_alignment = core_region_alignment(); // Size and requested location of the archive_space_rs (for both static and dynamic archives) - assert(static_mapinfo->mapping_base_offset() == 0, "Must be"); size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset(); size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment); diff --git a/src/hotspot/share/cds/metaspaceShared.hpp b/src/hotspot/share/cds/metaspaceShared.hpp index 6d5f273041a..27df816833c 100644 --- a/src/hotspot/share/cds/metaspaceShared.hpp +++ b/src/hotspot/share/cds/metaspaceShared.hpp @@ -139,6 +139,7 @@ public: // Alignment for the 2 core CDS regions (RW/RO) only. // (Heap region alignments are decided by GC). static size_t core_region_alignment(); + static size_t protection_zone_size(); static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik); // print loaded classes names to file. static void dump_loaded_classes(const char* file_name, TRAPS); diff --git a/src/hotspot/share/include/cds.h b/src/hotspot/share/include/cds.h index 2a80dd68abd..8819e4c00be 100644 --- a/src/hotspot/share/include/cds.h +++ b/src/hotspot/share/include/cds.h @@ -70,7 +70,10 @@ typedef struct CDSFileMapRegion { size_t _ptrmap_offset; // Bitmap for relocating native pointer fields in archived heap objects. // (The base address is the bottom of the BM region). size_t _ptrmap_size_in_bits; - char* _mapped_base; // Actually mapped address (null if this region is not mapped). + char* _mapped_base; // Actually mapped address used for mapping the core regions. At that address the + // zero nklass protection zone is established; following that (at offset + // MetaspaceShared::protection_zone_size()) the lowest core region (rw for the + // static archive) is is mapped. bool _in_reserved_space; // Is this region in a ReservedSpace } CDSFileMapRegion; diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index e80e5b8a1e6..3615c3dc7ee 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -1,7 +1,7 @@ /* * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017, 2021 SAP SE. All rights reserved. - * Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved. + * Copyright (c) 2023, 2025, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "memory/metaspace/chunkManager.hpp" #include "memory/metaspace/commitLimiter.hpp" #include "memory/metaspace/internalStats.hpp" +#include "memory/metaspace/metachunk.hpp" #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/metaspaceReporter.hpp" @@ -804,29 +805,37 @@ void Metaspace::global_initialize() { // Set up compressed class pointer encoding. // In CDS=off mode, we give the JVM some leeway to choose a favorable base/shift combination. CompressedKlassPointers::initialize((address)rs.base(), rs.size()); + + // After narrowKlass encoding scheme is decided: if the encoding base points to class space start, + // establish a protection zone. Accidentally decoding a zero nKlass ID and then using it will result + // in an immediate segmentation fault instead of a delayed error much later. + if (CompressedKlassPointers::base() == (address)rs.base()) { + // Let the protection zone be a whole commit granule. Otherwise, buddy allocator may later place neighboring + // chunks in the same granule, see that the granule is not yet committed, and commit it, which would replace + // the protection mapping and make the zone readable. + // Alternatively, we could commit the chunk right now, but that is a tiny bit more fiddly, since we are not + // fully set up yet at this point. + const size_t protzone_size = metaspace::Settings::commit_granule_bytes(); // granule size >= page size + const size_t protzone_wordsize = protzone_size / BytesPerWord; + const metaspace::chunklevel_t lvl = metaspace::chunklevel::level_fitting_word_size(protzone_wordsize); + metaspace::Metachunk* const chunk = MetaspaceContext::context_class()->cm()->get_chunk(lvl); + const address protzone = (address) chunk->base(); + assert(protzone == (address)rs.base(), "The very first chunk should be located at the class space start?"); + assert(chunk->word_size() == protzone_wordsize, "Weird chunk size"); + CompressedKlassPointers::establish_protection_zone(protzone, protzone_size); + } else { + assert(CompressedKlassPointers::base() == nullptr, "Zero-based encoding expected"); + } + } -#endif +#endif // _LP64 // Initialize non-class virtual space list, and its chunk manager: MetaspaceContext::initialize_nonclass_space_context(); _tracer = new MetaspaceTracer(); - // We must prevent the very first address of the ccs from being used to store - // metadata, since that address would translate to a narrow pointer of 0, and the - // VM does not distinguish between "narrow 0 as in null" and "narrow 0 as in start - // of ccs". - // Before Elastic Metaspace that did not happen due to the fact that every Metachunk - // had a header and therefore could not allocate anything at offset 0. -#ifdef _LP64 - if (using_class_space()) { - // The simplest way to fix this is to allocate a tiny dummy chunk right at the - // start of ccs and do not use it for anything. - MetaspaceContext::context_class()->cm()->get_chunk(metaspace::chunklevel::HIGHEST_CHUNK_LEVEL); - } -#endif - #ifdef _LP64 if (UseCompressedClassPointers) { // Note: "cds" would be a better fit but keep this for backward compatibility. diff --git a/src/hotspot/share/oops/compressedKlass.cpp b/src/hotspot/share/oops/compressedKlass.cpp index fc8c0513ca0..df639388a5e 100644 --- a/src/hotspot/share/oops/compressedKlass.cpp +++ b/src/hotspot/share/oops/compressedKlass.cpp @@ -42,6 +42,7 @@ address CompressedKlassPointers::_klass_range_start = nullptr; address CompressedKlassPointers::_klass_range_end = nullptr; narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1; narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1; +size_t CompressedKlassPointers::_protection_zone_size = 0; #ifdef _LP64 @@ -162,11 +163,6 @@ void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t vm_exit_during_initialization(ss.base()); } - // Note: While it would be technically valid for the encoding base to precede the start of the Klass range, - // we never do this here. This is used at CDS runtime to re-instate the scheme used to precompute the - // narrow Klass IDs in the archive, and the requested base should point to the start of the Klass range. - assert(requested_base == addr, "Invalid requested base"); - // Remember Klass range: _klass_range_start = addr; _klass_range_end = addr + len; @@ -304,9 +300,40 @@ void CompressedKlassPointers::print_mode(outputStream* st) { st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end)); st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1, _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id); + if (_protection_zone_size > 0) { + st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size)); + } else { + st->print_cr("No protection zone."); + } } else { st->print_cr("UseCompressedClassPointers off"); } } +// On AIX, we cannot mprotect archive space or class space since they are reserved with SystemV shm. +static constexpr bool can_mprotect_archive_space = NOT_AIX(true) AIX_ONLY(false); + +// Protect a zone a the start of the encoding range +void CompressedKlassPointers::establish_protection_zone(address addr, size_t size) { + assert(_protection_zone_size == 0, "just once"); + assert(addr == base(), "Protection zone not at start of encoding range?"); + assert(size > 0 && is_aligned(size, os::vm_page_size()), "Protection zone not page sized"); + const bool rc = can_mprotect_archive_space && os::protect_memory((char*)addr, size, os::MEM_PROT_NONE, false); + log_info(metaspace)("%s Narrow Klass Protection zone " RANGEFMT, + (rc ? "Established" : "FAILED to establish "), + RANGEFMTARGS(addr, size)); + if (!rc) { + // If we fail to establish the protection zone, we fill it with a clear pattern to make it + // stick out in register values (0x50 aka 'P', repeated) + os::commit_memory((char*)addr, size, false); + memset(addr, 'P', size); + } + _protection_zone_size = size; +} + +bool CompressedKlassPointers::is_in_protection_zone(address addr) { + return _protection_zone_size > 0 ? + (addr >= base() && addr < base() + _protection_zone_size) : false; +} + #endif // _LP64 diff --git a/src/hotspot/share/oops/compressedKlass.hpp b/src/hotspot/share/oops/compressedKlass.hpp index dd54c8130eb..4ce644d9cef 100644 --- a/src/hotspot/share/oops/compressedKlass.hpp +++ b/src/hotspot/share/oops/compressedKlass.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,6 +134,8 @@ class CompressedKlassPointers : public AllStatic { static narrowKlass _lowest_valid_narrow_klass_id; static narrowKlass _highest_valid_narrow_klass_id; + // Protection zone size (0 if not set up) + static size_t _protection_zone_size; // Helper function for common cases. static char* reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr); @@ -231,6 +233,7 @@ public: static bool is_null(narrowKlass v) { return v == 0; } // Versions without asserts + static inline Klass* decode_not_null_without_asserts(narrowKlass v); static inline Klass* decode_without_asserts(narrowKlass v); static inline Klass* decode_not_null(narrowKlass v); static inline Klass* decode(narrowKlass v); @@ -258,6 +261,12 @@ public: is_aligned(addr, klass_alignment_in_bytes()); } + // Protect a zone a the start of the encoding range + static void establish_protection_zone(address addr, size_t size); + + // Returns true if address points into protection zone (for error reporting) + static bool is_in_protection_zone(address addr); + #if defined(AARCH64) && !defined(ZERO) // Check that with the given base, shift and range, aarch64 code can encode and decode the klass pointer. static bool check_klass_decode_mode(address base, int shift, const size_t range); diff --git a/src/hotspot/share/oops/compressedKlass.inline.hpp b/src/hotspot/share/oops/compressedKlass.inline.hpp index 7c5da48a494..94a78ed1894 100644 --- a/src/hotspot/share/oops/compressedKlass.inline.hpp +++ b/src/hotspot/share/oops/compressedKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,10 @@ inline narrowKlass CompressedKlassPointers::encode_not_null_without_asserts(Klas return (narrowKlass)(pointer_delta(k, narrow_base, 1) >> shift); } +inline Klass* CompressedKlassPointers::decode_not_null_without_asserts(narrowKlass v) { + return decode_not_null_without_asserts(v, base(), shift()); +} + inline Klass* CompressedKlassPointers::decode_without_asserts(narrowKlass v) { return is_null(v) ? nullptr : decode_not_null_without_asserts(v, base(), shift()); } diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 4f57718f200..e6aa4b90ac4 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -325,6 +325,17 @@ WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) *(vs.low_boundary() - rhs.noaccess_prefix() / 2 )); WB_END +WB_ENTRY(void, WB_DecodeNKlassAndAccessKlass(JNIEnv* env, jobject o, jint nKlass)) + assert(UseCompressedClassPointers, "Should only call for UseCompressedClassPointers"); + const narrowKlass nk = (narrowKlass)nKlass; + const Klass* const k = CompressedKlassPointers::decode_not_null_without_asserts(nKlass); + printf("WB_DecodeNKlassAndAccessKlass: nk %u k " PTR_FORMAT "\n", nk, p2i(k)); + printf("Will attempt to crash now...\n"); + fflush(stdout); // flush now - we will crash below + // Access k by calling a virtual function - will result in loading the vtable from *k + k->print_on(tty); +WB_END + static jint wb_stress_virtual_space_resize(size_t reserved_space_size, size_t magnitude, size_t iterations) { size_t granularity = os::vm_allocation_granularity(); @@ -2733,6 +2744,7 @@ static JNINativeMethod methods[] = { (void*)&WB_GetCompressedOopsMaxHeapSize}, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, {CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea}, + {CC"decodeNKlassAndAccessKlass",CC"(I)V", (void*)&WB_DecodeNKlassAndAccessKlass}, {CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize}, #if INCLUDE_CDS {CC"getCDSOffsetForName0", CC"(Ljava/lang/String;)I", (void*)&WB_GetCDSOffsetForName}, diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 4eb0621ff8a..cbe40ca214b 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -1291,6 +1291,12 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) { bool accessible = is_readable_pointer(addr); + // Check if addr points into the narrow Klass protection zone + if (UseCompressedClassPointers && CompressedKlassPointers::is_in_protection_zone(addr)) { + st->print_cr(PTR_FORMAT " points into nKlass protection zone", p2i(addr)); + return; + } + // Check if addr is a JNI handle. if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) { if (JNIHandles::is_global_handle((jobject) addr)) { diff --git a/test/hotspot/jtreg/runtime/ErrorHandling/AccessZeroNKlassHitsProtectionZone.java b/test/hotspot/jtreg/runtime/ErrorHandling/AccessZeroNKlassHitsProtectionZone.java new file mode 100644 index 00000000000..189f1c08916 --- /dev/null +++ b/test/hotspot/jtreg/runtime/ErrorHandling/AccessZeroNKlassHitsProtectionZone.java @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2025, Red Hat, Inc. + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test id=no_coh_no_cds + * @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone + * @library /test/lib + * @requires vm.bits == 64 & vm.debug == true & vm.flagless + * @requires os.family != "aix" + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run driver AccessZeroNKlassHitsProtectionZone no_coh_no_cds + */ + +/* + * @test id=no_coh_cds + * @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone + * @requires vm.bits == 64 & vm.debug == true & vm.flagless + * @requires os.family != "aix" + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run driver AccessZeroNKlassHitsProtectionZone no_coh_cds + */ + +/* + * @test id=coh_no_cds + * @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone + * @requires vm.bits == 64 & vm.debug == true & vm.flagless + * @requires os.family != "aix" + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run driver AccessZeroNKlassHitsProtectionZone coh_no_cds + */ + +/* + * @test id=coh_cds + * @summary Test that dereferencing a Klass that is the result of a decode(0) crashes accessing the nKlass guard zone + * @requires vm.bits == 64 & vm.debug == true & vm.flagless + * @requires os.family != "aix" + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run driver AccessZeroNKlassHitsProtectionZone coh_cds + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import jdk.test.whitebox.WhiteBox; +import jtreg.SkippedException; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.regex.Pattern; + +// Test that dereferencing a Klass that is the result of a narrowKlass=0 will give us immediate crashes +// that hit the protection zone at encoding base. +public class AccessZeroNKlassHitsProtectionZone { + + private static OutputAnalyzer run_test(boolean COH, boolean CDS, String forceBaseString) throws IOException, SkippedException { + ArrayList args = new ArrayList<>(); + args.add("-Xbootclasspath/a:."); + args.add("-XX:+UnlockDiagnosticVMOptions"); + args.add("-XX:+WhiteBoxAPI"); + args.add("-XX:CompressedClassSpaceSize=128m"); + args.add("-Xmx128m"); + args.add("-XX:-CreateCoredumpOnCrash"); + args.add("-Xlog:metaspace*"); + args.add("-Xlog:cds"); + if (COH) { + args.add("-XX:+UnlockExperimentalVMOptions"); + args.add("-XX:+UseCompactObjectHeaders"); + } + if (CDS) { + args.add("-Xshare:on"); + } else { + args.add("-Xshare:off"); + args.add("-XX:CompressedClassSpaceBaseAddress=" + forceBaseString); + } + args.add(AccessZeroNKlassHitsProtectionZone.class.getName()); + args.add("runwb"); + ProcessBuilder pb = ProcessTools.createLimitedTestJavaProcessBuilder(args.toArray(new String[0])); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.reportDiagnosticSummary(); + return output; + } + + private static void run_test(boolean COH, boolean CDS) throws IOException, SkippedException { + // Notes: + // We want to enforce zero-based encoding, to test the protection page in that case. For zero-based encoding, + // protection page is at address zero, no need to test that. + // If CDS is on, we never use zero-based, forceBase is ignored. + // If CDS is off, we use forceBase to (somewhat) reliably force the encoding base to beyond 32G, + // in order to prevent zero-based encoding. Since that may fail, we try several times. + OutputAnalyzer output = null; + long forceBase = -1; + if (CDS) { + output = run_test(COH, CDS, ""); + // Not all distributions build COH archives. We tolerate that. + if (COH) { + String s = output.firstMatch("Specified shared archive file not found .*coh.jsa"); + if (s != null) { + throw new SkippedException("Failed to find COH archive, was it not built? Skipping test."); + } + } + } else { + long g4 = 0x1_0000_0000L; + long start = g4 * 8; // 32g + long step = g4; + long end = start + step * 16; + for (forceBase = start; forceBase < end; forceBase += step) { + String thisBaseString = String.format("0x%016X", forceBase).toLowerCase(); + output = run_test(COH, CDS, thisBaseString); + if (output.contains("CompressedClassSpaceBaseAddress=" + thisBaseString + " given, but reserving class space failed.")) { + // try next one + } else if (output.contains("Successfully forced class space address to " + thisBaseString)) { + break; + } else { + throw new RuntimeException("Unexpected"); + } + } + if (forceBase >= end) { + throw new SkippedException("Failed to force ccs to any of the given bases. Skipping test."); + } + } + + // Parse the encoding base from the output. In case of CDS, it depends on ASLR. Even in case of CDS=off, we want + // to double-check it is the force address. + String nKlassBaseString = output.firstMatch("Narrow klass base: 0x([0-9a-f]+)", 1); + if (nKlassBaseString == null) { + throw new RuntimeException("did not find Narrow klass base in log output"); + } + long nKlassBase = Long.valueOf(nKlassBaseString, 16); + + if (!CDS && nKlassBase != forceBase) { + throw new RuntimeException("Weird - we should have mapped at force base"); // .. otherwise we would have skipped out above + } + if (nKlassBase == 0) { + throw new RuntimeException("We should not be running zero-based at this point."); + } + + // Calculate the expected crash address pattern. The precise crash address is unknown, but should be located + // in the lower part of the guard page following the encoding base. We just accept any address matching the + // upper 52 digits (leaving 4K = 12 bits = 4 nibbles of wiggle room) + String expectedCrashAddressString = nKlassBaseString.substring(0, nKlassBaseString.length() - 3); + + // output from whitebox function: Klass* should point to encoding base + output.shouldMatch("WB_DecodeNKlassAndAccessKlass: nk 0 k 0x" + nKlassBaseString); + + // Then, we should have crashed + output.shouldNotHaveExitValue(0); + output.shouldContain("# A fatal error has been detected"); + + // The hs-err file should contain a reference to the nKlass protection zone, like this: + // "RDI=0x0000000800000000 points into nKlass protection zone" + File hsErrFile = HsErrFileUtils.openHsErrFileFromOutput(output); + + ArrayList hsErrPatternList = new ArrayList<>(); + hsErrPatternList.add(Pattern.compile(".*(SIGBUS|SIGSEGV|EXCEPTION_ACCESS_VIOLATION).*")); + + hsErrPatternList.add(Pattern.compile(".*siginfo:.*" + expectedCrashAddressString + ".*")); + hsErrPatternList.add(Pattern.compile(".*" + expectedCrashAddressString + ".*points into nKlass protection zone.*")); + Pattern[] hsErrPattern = hsErrPatternList.toArray(new Pattern[0]); + HsErrFileUtils.checkHsErrFileContent(hsErrFile, hsErrPattern, true); + } + + enum Argument { runwb, no_coh_no_cds, no_coh_cds, coh_no_cds, coh_cds }; + public static void main(String[] args) throws Exception { + if (args.length != 1) { + throw new RuntimeException("Expecting one argument"); + } + Argument arg = Argument.valueOf(args[0]); + System.out.println(arg); + switch (arg) { + case runwb -> WhiteBox.getWhiteBox().decodeNKlassAndAccessKlass(0); + case no_coh_no_cds -> run_test(false, false); + case no_coh_cds -> run_test(false, true); + case coh_no_cds -> run_test(true, false); + case coh_cds -> run_test(true, true); + } + } +} diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java index be9bc646ce4..c45aecbced5 100644 --- a/test/lib/jdk/test/whitebox/WhiteBox.java +++ b/test/lib/jdk/test/whitebox/WhiteBox.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -642,6 +642,8 @@ public class WhiteBox { // Tests on ReservedSpace/VirtualSpace classes public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations); public native void readFromNoaccessArea(); + + public native void decodeNKlassAndAccessKlass(int nKlass); public native long getThreadStackSize(); public native long getThreadRemainingStackSize();