mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8353637: ZGC: Discontiguous memory reservation is broken on Windows
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org> Reviewed-by: jsikstro, aboldtch, eosterlund
This commit is contained in:
parent
c494a00a66
commit
6ab1647af2
@ -32,7 +32,7 @@ void ZVirtualMemoryManager::pd_initialize_before_reserve() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
|
||||
void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
|
||||
@ -33,7 +33,7 @@
|
||||
class ZVirtualMemoryManagerImpl : public CHeapObj<mtGC> {
|
||||
public:
|
||||
virtual void initialize_before_reserve() {}
|
||||
virtual void initialize_after_reserve(ZMemoryManager* manager) {}
|
||||
virtual void register_callbacks(ZMemoryManager* manager) {}
|
||||
virtual bool reserve(zaddress_unsafe addr, size_t size) = 0;
|
||||
virtual void unreserve(zaddress_unsafe addr, size_t size) = 0;
|
||||
};
|
||||
@ -47,7 +47,7 @@ public:
|
||||
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
|
||||
private:
|
||||
class PlaceholderCallbacks : public AllStatic {
|
||||
public:
|
||||
private:
|
||||
static void split_placeholder(zoffset start, size_t size) {
|
||||
ZMapper::split_placeholder(ZOffset::address_unsafe(start), size);
|
||||
}
|
||||
@ -79,99 +79,93 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
// Called when a memory area is returned to the memory manager but can't
|
||||
// be merged with an already existing area. Make sure this area is covered
|
||||
// by a single placeholder.
|
||||
static void create_callback(const ZMemory* area) {
|
||||
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
|
||||
// Callback implementations
|
||||
|
||||
coalesce_into_one_placeholder(area->start(), area->size());
|
||||
// Called when a memory area is going to be handed out to be used.
|
||||
//
|
||||
// Splits the memory area into granule-sized placeholders.
|
||||
static void prepare_for_hand_out_callback(const ZMemory& area) {
|
||||
assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
split_into_granule_sized_placeholders(area.start(), area.size());
|
||||
}
|
||||
|
||||
// Called when a complete memory area in the memory manager is allocated.
|
||||
// Create granule sized placeholders for the entire area.
|
||||
static void destroy_callback(const ZMemory* area) {
|
||||
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
|
||||
// Called when a memory area is handed back to the memory manager.
|
||||
//
|
||||
// Combines the granule-sized placeholders into one placeholder.
|
||||
static void prepare_for_hand_back_callback(const ZMemory& area) {
|
||||
assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
split_into_granule_sized_placeholders(area->start(), area->size());
|
||||
coalesce_into_one_placeholder(area.start(), area.size());
|
||||
}
|
||||
|
||||
// Called when a memory area is allocated at the front of an exising memory area.
|
||||
// Turn the first part of the memory area into granule sized placeholders.
|
||||
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
|
||||
assert(area->size() > size, "Must be larger than what we try to split out");
|
||||
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
|
||||
// Called when inserting a memory area and it can be merged with an
|
||||
// existing, adjacent memory area.
|
||||
//
|
||||
// Coalesces the underlying placeholders into one.
|
||||
static void grow_callback(const ZMemory& from, const ZMemory& to) {
|
||||
assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(from != to, "Must have grown");
|
||||
assert(to.contains(from), "Must be within");
|
||||
|
||||
coalesce_into_one_placeholder(to.start(), to.size());
|
||||
}
|
||||
|
||||
// Called when a memory area is removed from the front or back of an existing
|
||||
// memory area.
|
||||
//
|
||||
// Splits the memory into two placeholders.
|
||||
static void shrink_callback(const ZMemory& from, const ZMemory& to) {
|
||||
assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(from != to, "Must have shrunk");
|
||||
assert(from.contains(to), "Must be larger than what we try to split out");
|
||||
assert(from.start() == to.start() || from.end() == to.end(),
|
||||
"Only verified to work if we split a placeholder into two placeholders");
|
||||
|
||||
// Split the area into two placeholders
|
||||
split_placeholder(area->start(), size);
|
||||
|
||||
// Split the first part into granule sized placeholders
|
||||
split_into_granule_sized_placeholders(area->start(), size);
|
||||
split_placeholder(to.start(), to.size());
|
||||
}
|
||||
|
||||
// Called when a memory area is allocated at the end of an existing memory area.
|
||||
// Turn the second part of the memory area into granule sized placeholders.
|
||||
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
|
||||
assert(area->size() > size, "Must be larger than what we try to split out");
|
||||
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
// Split the area into two placeholders
|
||||
const zoffset start = to_zoffset(area->end() - size);
|
||||
split_placeholder(start, size);
|
||||
|
||||
// Split the second part into granule sized placeholders
|
||||
split_into_granule_sized_placeholders(start, size);
|
||||
}
|
||||
|
||||
// Called when freeing a memory area and it can be merged at the start of an
|
||||
// existing area. Coalesce the underlying placeholders into one.
|
||||
static void grow_from_front_callback(const ZMemory* area, size_t size) {
|
||||
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
const zoffset start = area->start() - size;
|
||||
coalesce_into_one_placeholder(start, area->size() + size);
|
||||
}
|
||||
|
||||
// Called when freeing a memory area and it can be merged at the end of an
|
||||
// existing area. Coalesce the underlying placeholders into one.
|
||||
static void grow_from_back_callback(const ZMemory* area, size_t size) {
|
||||
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
coalesce_into_one_placeholder(area->start(), area->size() + size);
|
||||
}
|
||||
|
||||
static void register_with(ZMemoryManager* manager) {
|
||||
public:
|
||||
static ZMemoryManager::Callbacks callbacks() {
|
||||
// Each reserved virtual memory address area registered in _manager is
|
||||
// exactly covered by a single placeholder. Callbacks are installed so
|
||||
// that whenever a memory area changes, the corresponding placeholder
|
||||
// is adjusted.
|
||||
//
|
||||
// The create and grow callbacks are called when virtual memory is
|
||||
// returned to the memory manager. The new memory area is then covered
|
||||
// by a new single placeholder.
|
||||
// The prepare_for_hand_out callback is called when virtual memory is
|
||||
// handed out to callers. The memory area is split into granule-sized
|
||||
// placeholders.
|
||||
//
|
||||
// The destroy and shrink callbacks are called when virtual memory is
|
||||
// allocated from the memory manager. The memory area is then is split
|
||||
// into granule-sized placeholders.
|
||||
// The prepare_for_hand_back callback is called when previously handed
|
||||
// out virtual memory is handed back to the memory manager. The
|
||||
// returned memory area is then covered by a new single placeholder.
|
||||
//
|
||||
// The grow callback is called when a virtual memory area grows. The
|
||||
// resulting memory area is then covered by a single placeholder.
|
||||
//
|
||||
// The shrink callback is called when a virtual memory area is split into
|
||||
// two parts. The two resulting memory areas are then covered by two
|
||||
// separate placeholders.
|
||||
//
|
||||
// See comment in zMapper_windows.cpp explaining why placeholders are
|
||||
// split into ZGranuleSize sized placeholders.
|
||||
|
||||
ZMemoryManager::Callbacks callbacks;
|
||||
|
||||
callbacks._create = &create_callback;
|
||||
callbacks._destroy = &destroy_callback;
|
||||
callbacks._shrink_from_front = &shrink_from_front_callback;
|
||||
callbacks._shrink_from_back = &shrink_from_back_callback;
|
||||
callbacks._grow_from_front = &grow_from_front_callback;
|
||||
callbacks._grow_from_back = &grow_from_back_callback;
|
||||
callbacks._prepare_for_hand_out = &prepare_for_hand_out_callback;
|
||||
callbacks._prepare_for_hand_back = &prepare_for_hand_back_callback;
|
||||
callbacks._grow = &grow_callback;
|
||||
callbacks._shrink = &shrink_callback;
|
||||
|
||||
manager->register_callbacks(callbacks);
|
||||
return callbacks;
|
||||
}
|
||||
};
|
||||
|
||||
virtual void initialize_after_reserve(ZMemoryManager* manager) {
|
||||
PlaceholderCallbacks::register_with(manager);
|
||||
virtual void register_callbacks(ZMemoryManager* manager) {
|
||||
manager->register_callbacks(PlaceholderCallbacks::callbacks());
|
||||
}
|
||||
|
||||
virtual bool reserve(zaddress_unsafe addr, size_t size) {
|
||||
@ -220,8 +214,8 @@ void ZVirtualMemoryManager::pd_initialize_before_reserve() {
|
||||
_impl->initialize_before_reserve();
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
|
||||
_impl->initialize_after_reserve(&_manager);
|
||||
void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) {
|
||||
_impl->register_callbacks(manager);
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
|
||||
@ -29,6 +29,8 @@
|
||||
class CollectedHeap;
|
||||
|
||||
class ZArguments : public GCArguments {
|
||||
friend class ZTest;
|
||||
|
||||
private:
|
||||
static void select_max_gc_threads();
|
||||
|
||||
|
||||
@ -37,6 +37,8 @@ public:
|
||||
};
|
||||
|
||||
class ZInitialize : public AllStatic {
|
||||
friend class ZTest;
|
||||
|
||||
private:
|
||||
static constexpr size_t ErrorMessageLength = 256;
|
||||
|
||||
|
||||
@ -25,56 +25,47 @@
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
|
||||
ZMemory* ZMemoryManager::create(zoffset start, size_t size) {
|
||||
ZMemory* const area = new ZMemory(start, size);
|
||||
if (_callbacks._create != nullptr) {
|
||||
_callbacks._create(area);
|
||||
}
|
||||
return area;
|
||||
}
|
||||
|
||||
void ZMemoryManager::destroy(ZMemory* area) {
|
||||
if (_callbacks._destroy != nullptr) {
|
||||
_callbacks._destroy(area);
|
||||
}
|
||||
delete area;
|
||||
}
|
||||
|
||||
void ZMemoryManager::shrink_from_front(ZMemory* area, size_t size) {
|
||||
if (_callbacks._shrink_from_front != nullptr) {
|
||||
_callbacks._shrink_from_front(area, size);
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start() + size, area->size() - size);
|
||||
_callbacks._shrink(*from, to);
|
||||
}
|
||||
area->shrink_from_front(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::shrink_from_back(ZMemory* area, size_t size) {
|
||||
if (_callbacks._shrink_from_back != nullptr) {
|
||||
_callbacks._shrink_from_back(area, size);
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start(), area->size() - size);
|
||||
_callbacks._shrink(*from, to);
|
||||
}
|
||||
area->shrink_from_back(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::grow_from_front(ZMemory* area, size_t size) {
|
||||
if (_callbacks._grow_from_front != nullptr) {
|
||||
_callbacks._grow_from_front(area, size);
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start() - size, area->size() + size);
|
||||
_callbacks._grow(*from, to);
|
||||
}
|
||||
area->grow_from_front(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) {
|
||||
if (_callbacks._grow_from_back != nullptr) {
|
||||
_callbacks._grow_from_back(area, size);
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start(), area->size() + size);
|
||||
_callbacks._grow(*from, to);
|
||||
}
|
||||
area->grow_from_back(size);
|
||||
}
|
||||
|
||||
ZMemoryManager::Callbacks::Callbacks()
|
||||
: _create(nullptr),
|
||||
_destroy(nullptr),
|
||||
_shrink_from_front(nullptr),
|
||||
_shrink_from_back(nullptr),
|
||||
_grow_from_front(nullptr),
|
||||
_grow_from_back(nullptr) {}
|
||||
: _prepare_for_hand_out(nullptr),
|
||||
_prepare_for_hand_back(nullptr),
|
||||
_grow(nullptr),
|
||||
_shrink(nullptr) {}
|
||||
|
||||
ZMemoryManager::ZMemoryManager()
|
||||
: _freelist(),
|
||||
@ -118,18 +109,24 @@ zoffset ZMemoryManager::alloc_low_address(size_t size) {
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
zoffset start;
|
||||
|
||||
if (area->size() == size) {
|
||||
// Exact match, remove area
|
||||
const zoffset start = area->start();
|
||||
start = area->start();
|
||||
_freelist.remove(area);
|
||||
destroy(area);
|
||||
return start;
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
const zoffset start = area->start();
|
||||
start = area->start();
|
||||
shrink_from_front(area, size);
|
||||
return start;
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, size));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,20 +139,24 @@ zoffset ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated
|
||||
|
||||
ZMemory* const area = _freelist.first();
|
||||
if (area != nullptr) {
|
||||
const zoffset start = area->start();
|
||||
|
||||
if (area->size() <= size) {
|
||||
// Smaller than or equal to requested, remove area
|
||||
const zoffset start = area->start();
|
||||
*allocated = area->size();
|
||||
_freelist.remove(area);
|
||||
destroy(area);
|
||||
return start;
|
||||
*allocated = area->size();
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
const zoffset start = area->start();
|
||||
shrink_from_front(area, size);
|
||||
*allocated = size;
|
||||
return start;
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, *allocated));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
@ -169,17 +170,24 @@ zoffset ZMemoryManager::alloc_high_address(size_t size) {
|
||||
ZListReverseIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
zoffset start;
|
||||
|
||||
if (area->size() == size) {
|
||||
// Exact match, remove area
|
||||
const zoffset start = area->start();
|
||||
start = area->start();
|
||||
_freelist.remove(area);
|
||||
destroy(area);
|
||||
return start;
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
shrink_from_back(area, size);
|
||||
return to_zoffset(area->end());
|
||||
start = to_zoffset(area->end());
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, size));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,12 +195,10 @@ zoffset ZMemoryManager::alloc_high_address(size_t size) {
|
||||
return zoffset(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
void ZMemoryManager::free(zoffset start, size_t size) {
|
||||
void ZMemoryManager::move_into(zoffset start, size_t size) {
|
||||
assert(start != zoffset(UINTPTR_MAX), "Invalid address");
|
||||
const zoffset_end end = to_zoffset_end(start, size);
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (start < area->start()) {
|
||||
@ -213,7 +219,7 @@ void ZMemoryManager::free(zoffset start, size_t size) {
|
||||
} else {
|
||||
// Insert new area before current area
|
||||
assert(end < area->start(), "Areas must not overlap");
|
||||
ZMemory* const new_area = create(start, size);
|
||||
ZMemory* const new_area = new ZMemory(start, size);
|
||||
_freelist.insert_before(area, new_area);
|
||||
}
|
||||
|
||||
@ -229,7 +235,50 @@ void ZMemoryManager::free(zoffset start, size_t size) {
|
||||
grow_from_back(last, size);
|
||||
} else {
|
||||
// Insert new area last
|
||||
ZMemory* const new_area = create(start, size);
|
||||
ZMemory* const new_area = new ZMemory(start, size);
|
||||
_freelist.insert_last(new_area);
|
||||
}
|
||||
}
|
||||
|
||||
void ZMemoryManager::free(zoffset start, size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
if (_callbacks._prepare_for_hand_back != nullptr) {
|
||||
_callbacks._prepare_for_hand_back(ZMemory(start, size));
|
||||
}
|
||||
|
||||
move_into(start, size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::register_range(zoffset start, size_t size) {
|
||||
// Note that there's no need to call the _prepare_for_hand_back when memory
|
||||
// is added the first time. We don't have to undo the effects of a previous
|
||||
// _prepare_for_hand_out callback.
|
||||
|
||||
// No need to lock during initialization.
|
||||
|
||||
move_into(start, size);
|
||||
}
|
||||
|
||||
bool ZMemoryManager::unregister_first(zoffset* start_out, size_t* size_out) {
|
||||
// Note that this doesn't hand out memory to be used, so we don't call the
|
||||
// _prepare_for_hand_out callback.
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
if (_freelist.is_empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't invoke the _prepare_for_hand_out callback
|
||||
|
||||
ZMemory* const area = _freelist.remove_first();
|
||||
|
||||
// Return the range
|
||||
*start_out = area->start();
|
||||
*size_out = area->size();
|
||||
|
||||
delete area;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -44,6 +44,11 @@ public:
|
||||
zoffset_end end() const;
|
||||
size_t size() const;
|
||||
|
||||
bool operator==(const ZMemory& other) const;
|
||||
bool operator!=(const ZMemory& other) const;
|
||||
|
||||
bool contains(const ZMemory& other) const;
|
||||
|
||||
void shrink_from_front(size_t size);
|
||||
void shrink_from_back(size_t size);
|
||||
void grow_from_front(size_t size);
|
||||
@ -51,17 +56,17 @@ public:
|
||||
};
|
||||
|
||||
class ZMemoryManager {
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
public:
|
||||
typedef void (*CreateDestroyCallback)(const ZMemory* area);
|
||||
typedef void (*ResizeCallback)(const ZMemory* area, size_t size);
|
||||
typedef void (*CallbackPrepare)(const ZMemory& area);
|
||||
typedef void (*CallbackResize)(const ZMemory& from, const ZMemory& to);
|
||||
|
||||
struct Callbacks {
|
||||
CreateDestroyCallback _create;
|
||||
CreateDestroyCallback _destroy;
|
||||
ResizeCallback _shrink_from_front;
|
||||
ResizeCallback _shrink_from_back;
|
||||
ResizeCallback _grow_from_front;
|
||||
ResizeCallback _grow_from_back;
|
||||
CallbackPrepare _prepare_for_hand_out;
|
||||
CallbackPrepare _prepare_for_hand_back;
|
||||
CallbackResize _grow;
|
||||
CallbackResize _shrink;
|
||||
|
||||
Callbacks();
|
||||
};
|
||||
@ -71,13 +76,13 @@ private:
|
||||
ZList<ZMemory> _freelist;
|
||||
Callbacks _callbacks;
|
||||
|
||||
ZMemory* create(zoffset start, size_t size);
|
||||
void destroy(ZMemory* area);
|
||||
void shrink_from_front(ZMemory* area, size_t size);
|
||||
void shrink_from_back(ZMemory* area, size_t size);
|
||||
void grow_from_front(ZMemory* area, size_t size);
|
||||
void grow_from_back(ZMemory* area, size_t size);
|
||||
|
||||
void move_into(zoffset start, size_t size);
|
||||
|
||||
public:
|
||||
ZMemoryManager();
|
||||
|
||||
@ -92,6 +97,8 @@ public:
|
||||
zoffset alloc_high_address(size_t size);
|
||||
|
||||
void free(zoffset start, size_t size);
|
||||
void register_range(zoffset start, size_t size);
|
||||
bool unregister_first(zoffset* start_out, size_t* size_out);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZMEMORY_HPP
|
||||
|
||||
@ -46,6 +46,18 @@ inline size_t ZMemory::size() const {
|
||||
return end() - start();
|
||||
}
|
||||
|
||||
inline bool ZMemory::operator==(const ZMemory& other) const {
|
||||
return _start == other._start && _end == other._end;
|
||||
}
|
||||
|
||||
inline bool ZMemory::operator!=(const ZMemory& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
inline bool ZMemory::contains(const ZMemory& other) const {
|
||||
return _start <= other._start && other.end() <= end();
|
||||
}
|
||||
|
||||
inline void ZMemory::shrink_from_front(size_t size) {
|
||||
assert(this->size() > size, "Too small");
|
||||
_start += size;
|
||||
|
||||
@ -40,6 +40,26 @@ void ZNMT::reserve(zaddress_unsafe start, size_t size) {
|
||||
MemTracker::record_virtual_memory_reserve((address)untype(start), size, CALLER_PC, mtJavaHeap);
|
||||
}
|
||||
|
||||
void ZNMT::unreserve(zaddress_unsafe start, size_t size) {
|
||||
precond(is_aligned(untype(start), ZGranuleSize));
|
||||
precond(is_aligned(size, ZGranuleSize));
|
||||
|
||||
if (MemTracker::enabled()) {
|
||||
// We are the owner of the reserved memory, and any failure to unreserve
|
||||
// are fatal, so so we don't need to hold a lock while unreserving memory.
|
||||
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
|
||||
// The current NMT implementation does not support unreserving a memory
|
||||
// region that was built up from smaller memory reservations. Workaround
|
||||
// this problem by splitting the work up into granule-sized chunks, which
|
||||
// is the smallest unit we ever reserve.
|
||||
for (size_t i = 0; i < size; i += ZGranuleSize) {
|
||||
MemTracker::record_virtual_memory_release((address)untype(start + i), ZGranuleSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ZNMT::commit(zoffset offset, size_t size) {
|
||||
MemTracker::allocate_memory_in(ZNMT::_device, untype(offset), size, CALLER_PC, mtJavaHeap);
|
||||
}
|
||||
|
||||
@ -42,6 +42,8 @@ public:
|
||||
static void initialize();
|
||||
|
||||
static void reserve(zaddress_unsafe start, size_t size);
|
||||
static void unreserve(zaddress_unsafe start, size_t size);
|
||||
|
||||
static void commit(zoffset offset, size_t size);
|
||||
static void uncommit(zoffset offset, size_t size);
|
||||
|
||||
|
||||
@ -238,7 +238,7 @@ ZPhysicalMemory ZPhysicalMemory::split_committed() {
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity)
|
||||
: _backing(max_capacity) {
|
||||
// Make the whole range free
|
||||
_manager.free(zoffset(0), max_capacity);
|
||||
_manager.register_range(zoffset(0), max_capacity);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
|
||||
@ -42,6 +42,9 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
|
||||
// Initialize platform specific parts before reserving address space
|
||||
pd_initialize_before_reserve();
|
||||
|
||||
// Register the Windows callbacks
|
||||
pd_register_callbacks(&_manager);
|
||||
|
||||
// Reserve address space
|
||||
if (!reserve(max_capacity)) {
|
||||
ZInitialize::error_d("Failed to reserve enough address space for Java heap");
|
||||
@ -51,9 +54,6 @@ ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
|
||||
// Set ZAddressOffsetMax to the highest address end available after reservation
|
||||
ZAddressOffsetMax = untype(highest_available_address_end());
|
||||
|
||||
// Initialize platform specific parts after reserving address space
|
||||
pd_initialize_after_reserve();
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
@ -154,7 +154,7 @@ bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) {
|
||||
ZNMT::reserve(addr, size);
|
||||
|
||||
// Make the address range free
|
||||
_manager.free(start, size);
|
||||
_manager.register_range(start, size);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -211,6 +211,25 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
|
||||
return reserved >= max_capacity;
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::unreserve(zoffset start, size_t size) {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(start);
|
||||
|
||||
// Unregister the reserved memory from NMT
|
||||
ZNMT::unreserve(addr, size);
|
||||
|
||||
// Unreserve address space
|
||||
pd_unreserve(addr, size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::unreserve_all() {
|
||||
zoffset start;
|
||||
size_t size;
|
||||
|
||||
while (_manager.unregister_first(&start, &size)) {
|
||||
unreserve(start, size);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
@ -48,6 +48,7 @@ public:
|
||||
|
||||
class ZVirtualMemoryManager {
|
||||
friend class ZMapperTest;
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
private:
|
||||
static size_t calculate_min_range(size_t size);
|
||||
@ -58,7 +59,7 @@ private:
|
||||
|
||||
// Platform specific implementation
|
||||
void pd_initialize_before_reserve();
|
||||
void pd_initialize_after_reserve();
|
||||
void pd_register_callbacks(ZMemoryManager* manager);
|
||||
bool pd_reserve(zaddress_unsafe addr, size_t size);
|
||||
void pd_unreserve(zaddress_unsafe addr, size_t size);
|
||||
|
||||
@ -68,6 +69,8 @@ private:
|
||||
size_t reserve_discontiguous(size_t size);
|
||||
bool reserve(size_t max_capacity);
|
||||
|
||||
void unreserve(zoffset start, size_t size);
|
||||
|
||||
DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);)
|
||||
|
||||
public:
|
||||
@ -81,6 +84,8 @@ public:
|
||||
|
||||
ZVirtualMemory alloc(size_t size, bool force_low_address);
|
||||
void free(const ZVirtualMemory& vmem);
|
||||
|
||||
void unreserve_all();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_HPP
|
||||
|
||||
@ -29,182 +29,71 @@
|
||||
#include "gc/z/zMapper_windows.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "gc/z/zSyscall_windows.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
using namespace testing;
|
||||
|
||||
#define EXPECT_ALLOC_OK(offset) EXPECT_NE(offset, zoffset(UINTPTR_MAX))
|
||||
|
||||
class ZMapperTest : public Test {
|
||||
class ZMapperTest : public ZTest {
|
||||
private:
|
||||
static constexpr size_t ZMapperTestReservationSize = 32 * M;
|
||||
static constexpr size_t ReservationSize = 32 * M;
|
||||
|
||||
static bool _initialized;
|
||||
static ZMemoryManager* _va;
|
||||
|
||||
static ZVirtualMemoryManager* _vmm;
|
||||
|
||||
static bool _has_unreserved;
|
||||
ZVirtualMemoryManager* _vmm;
|
||||
ZMemoryManager* _va;
|
||||
|
||||
public:
|
||||
bool reserve_for_test() {
|
||||
// Initialize platform specific parts before reserving address space
|
||||
_vmm->pd_initialize_before_reserve();
|
||||
|
||||
// Reserve address space
|
||||
if (!_vmm->pd_reserve(ZOffset::address_unsafe(zoffset(0)), ZMapperTestReservationSize)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make the address range free before setting up callbacks below
|
||||
_va->free(zoffset(0), ZMapperTestReservationSize);
|
||||
|
||||
// Initialize platform specific parts after reserving address space
|
||||
_vmm->pd_initialize_after_reserve();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual void SetUp() {
|
||||
// Only run test on supported Windows versions
|
||||
if (!ZSyscall::is_supported()) {
|
||||
if (!is_os_supported()) {
|
||||
GTEST_SKIP() << "Requires Windows version 1803 or later";
|
||||
return;
|
||||
}
|
||||
|
||||
ZSyscall::initialize();
|
||||
ZGlobalsPointers::initialize();
|
||||
|
||||
// Fake a ZVirtualMemoryManager
|
||||
_vmm = (ZVirtualMemoryManager*)os::malloc(sizeof(ZVirtualMemoryManager), mtTest);
|
||||
_vmm = ::new (_vmm) ZVirtualMemoryManager(ReservationSize);
|
||||
|
||||
// Construct its internal ZMemoryManager
|
||||
_va = new (&_vmm->_manager) ZMemoryManager();
|
||||
|
||||
// Reserve address space for the test
|
||||
if (!reserve_for_test()) {
|
||||
if (_vmm->reserved() != ReservationSize) {
|
||||
GTEST_SKIP() << "Failed to reserve address space";
|
||||
return;
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
_has_unreserved = false;
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
if (!ZSyscall::is_supported()) {
|
||||
if (!is_os_supported()) {
|
||||
// Test skipped, nothing to cleanup
|
||||
return;
|
||||
}
|
||||
|
||||
if (_initialized && !_has_unreserved) {
|
||||
_vmm->pd_unreserve(ZOffset::address_unsafe(zoffset(0)), 0);
|
||||
}
|
||||
// Best-effort cleanup
|
||||
_vmm->unreserve_all();
|
||||
_vmm->~ZVirtualMemoryManager();
|
||||
os::free(_vmm);
|
||||
}
|
||||
|
||||
static void test_unreserve() {
|
||||
void test_unreserve() {
|
||||
zoffset bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
zoffset top = _va->alloc_high_address(ZGranuleSize);
|
||||
zoffset middle = _va->alloc_low_address(ZGranuleSize);
|
||||
zoffset top = _va->alloc_low_address(ZGranuleSize);
|
||||
|
||||
ASSERT_EQ(bottom, zoffset(0));
|
||||
ASSERT_EQ(middle, bottom + 1 * ZGranuleSize);
|
||||
ASSERT_EQ(top, bottom + 2 * ZGranuleSize);
|
||||
|
||||
// Unreserve the middle part
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(bottom + ZGranuleSize), ZGranuleSize);
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(middle), ZGranuleSize);
|
||||
|
||||
// Make sure that we still can unreserve the memory before and after
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(bottom), ZGranuleSize);
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(top), ZGranuleSize);
|
||||
|
||||
_has_unreserved = true;
|
||||
}
|
||||
|
||||
static void test_alloc_low_address() {
|
||||
// Verify that we get placeholder for first granule
|
||||
zoffset bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
EXPECT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ZGranuleSize);
|
||||
|
||||
// Alloc something larger than a granule and free it
|
||||
bottom = _va->alloc_low_address(ZGranuleSize * 3);
|
||||
EXPECT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ZGranuleSize * 3);
|
||||
|
||||
// Free with more memory allocated
|
||||
bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
EXPECT_ALLOC_OK(bottom);
|
||||
|
||||
zoffset next = _va->alloc_low_address(ZGranuleSize);
|
||||
EXPECT_ALLOC_OK(next);
|
||||
|
||||
_va->free(bottom, ZGranuleSize);
|
||||
_va->free(next, ZGranuleSize);
|
||||
}
|
||||
|
||||
static void test_alloc_high_address() {
|
||||
// Verify that we get placeholder for last granule
|
||||
zoffset high = _va->alloc_high_address(ZGranuleSize);
|
||||
EXPECT_ALLOC_OK(high);
|
||||
|
||||
zoffset prev = _va->alloc_high_address(ZGranuleSize);
|
||||
EXPECT_ALLOC_OK(prev);
|
||||
|
||||
_va->free(high, ZGranuleSize);
|
||||
_va->free(prev, ZGranuleSize);
|
||||
|
||||
// Alloc something larger than a granule and return it
|
||||
high = _va->alloc_high_address(ZGranuleSize * 2);
|
||||
EXPECT_ALLOC_OK(high);
|
||||
|
||||
_va->free(high, ZGranuleSize * 2);
|
||||
}
|
||||
|
||||
static void test_alloc_whole_area() {
|
||||
// Alloc the whole reservation
|
||||
zoffset bottom = _va->alloc_low_address(ZMapperTestReservationSize);
|
||||
EXPECT_ALLOC_OK(bottom);
|
||||
|
||||
// Free two chunks and then allocate them again
|
||||
_va->free(bottom, ZGranuleSize * 4);
|
||||
_va->free(bottom + ZGranuleSize * 6, ZGranuleSize * 6);
|
||||
|
||||
zoffset offset = _va->alloc_low_address(ZGranuleSize * 4);
|
||||
EXPECT_ALLOC_OK(offset);
|
||||
|
||||
offset = _va->alloc_low_address(ZGranuleSize * 6);
|
||||
EXPECT_ALLOC_OK(offset);
|
||||
|
||||
// Now free it all, and verify it can be re-allocated
|
||||
_va->free(bottom, ZMapperTestReservationSize);
|
||||
|
||||
bottom = _va->alloc_low_address(ZMapperTestReservationSize);
|
||||
EXPECT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ZMapperTestReservationSize);
|
||||
}
|
||||
};
|
||||
|
||||
bool ZMapperTest::_initialized = false;
|
||||
ZMemoryManager* ZMapperTest::_va = nullptr;
|
||||
ZVirtualMemoryManager* ZMapperTest::_vmm = nullptr;
|
||||
bool ZMapperTest::_has_unreserved;
|
||||
|
||||
TEST_VM_F(ZMapperTest, test_unreserve) {
|
||||
test_unreserve();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZMapperTest, test_alloc_low_address) {
|
||||
test_alloc_low_address();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZMapperTest, test_alloc_high_address) {
|
||||
test_alloc_high_address();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZMapperTest, test_alloc_whole_area) {
|
||||
test_alloc_whole_area();
|
||||
}
|
||||
|
||||
#endif // _WINDOWS
|
||||
|
||||
@ -23,28 +23,10 @@
|
||||
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class ZAddressOffsetMaxSetter {
|
||||
private:
|
||||
const size_t _old_max;
|
||||
const size_t _old_mask;
|
||||
|
||||
public:
|
||||
ZAddressOffsetMaxSetter()
|
||||
: _old_max(ZAddressOffsetMax),
|
||||
_old_mask(ZAddressOffsetMask) {
|
||||
ZAddressOffsetMax = size_t(16) * G * 1024;
|
||||
ZAddressOffsetMask = ZAddressOffsetMax - 1;
|
||||
}
|
||||
~ZAddressOffsetMaxSetter() {
|
||||
ZAddressOffsetMax = _old_max;
|
||||
ZAddressOffsetMask = _old_mask;
|
||||
}
|
||||
};
|
||||
#include "zunittest.hpp"
|
||||
|
||||
TEST(ZMemory, accessors) {
|
||||
ZAddressOffsetMaxSetter setter;
|
||||
ZAddressOffsetMaxSetter setter(size_t(16) * G * 1024);
|
||||
|
||||
{
|
||||
ZMemory mem(zoffset(0), ZGranuleSize);
|
||||
@ -74,7 +56,7 @@ TEST(ZMemory, accessors) {
|
||||
}
|
||||
|
||||
TEST(ZMemory, resize) {
|
||||
ZAddressOffsetMaxSetter setter;
|
||||
ZAddressOffsetMaxSetter setter(size_t(16) * G * 1024);
|
||||
|
||||
ZMemory mem(zoffset(ZGranuleSize * 2), ZGranuleSize * 2) ;
|
||||
|
||||
|
||||
@ -22,9 +22,11 @@
|
||||
*/
|
||||
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
TEST(ZVirtualMemory, split) {
|
||||
ZAddressOffsetMaxSetter setter(size_t(16) * G * 1024);
|
||||
|
||||
ZVirtualMemory vmem(zoffset(0), 10);
|
||||
|
||||
ZVirtualMemory vmem0 = vmem.split(0);
|
||||
|
||||
269
test/hotspot/gtest/gc/z/test_zVirtualMemoryManager.cpp
Normal file
269
test/hotspot/gtest/gc/z/test_zVirtualMemoryManager.cpp
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zArguments.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
using namespace testing;
|
||||
|
||||
#define ASSERT_ALLOC_OK(offset) ASSERT_NE(offset, zoffset(UINTPTR_MAX))
|
||||
|
||||
class ZCallbacksResetter {
|
||||
private:
|
||||
ZMemoryManager::Callbacks* _callbacks;
|
||||
ZMemoryManager::Callbacks _saved;
|
||||
|
||||
public:
|
||||
ZCallbacksResetter(ZMemoryManager::Callbacks* callbacks)
|
||||
: _callbacks(callbacks),
|
||||
_saved(*callbacks) {
|
||||
*_callbacks = {};
|
||||
}
|
||||
~ZCallbacksResetter() {
|
||||
*_callbacks = _saved;
|
||||
}
|
||||
};
|
||||
|
||||
class ZVirtualMemoryManagerTest : public ZTest {
|
||||
private:
|
||||
static constexpr size_t ReservationSize = 32 * M;
|
||||
|
||||
ZMemoryManager* _va;
|
||||
ZVirtualMemoryManager* _vmm;
|
||||
|
||||
public:
|
||||
virtual void SetUp() {
|
||||
// Only run test on supported Windows versions
|
||||
if (!is_os_supported()) {
|
||||
GTEST_SKIP() << "OS not supported";
|
||||
}
|
||||
|
||||
void* vmr_mem = os::malloc(sizeof(ZVirtualMemoryManager), mtTest);
|
||||
_vmm = ::new (vmr_mem) ZVirtualMemoryManager(ReservationSize);
|
||||
_va = &_vmm->_manager;
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
if (!is_os_supported()) {
|
||||
// Test skipped, nothing to cleanup
|
||||
return;
|
||||
}
|
||||
|
||||
// Best-effort cleanup
|
||||
_vmm->unreserve_all();
|
||||
_vmm->~ZVirtualMemoryManager();
|
||||
os::free(_vmm);
|
||||
}
|
||||
|
||||
void test_reserve_discontiguous_and_coalesce() {
|
||||
// Start by ensuring that we have 3 unreserved granules, and then let the
|
||||
// fourth granule be pre-reserved and therefore blocking subsequent requests
|
||||
// to reserve memory.
|
||||
//
|
||||
// +----+----+----+----+
|
||||
// ----- pre-reserved - to block contiguous reservation
|
||||
// --------------- unreserved - to allow reservation of 3 granules
|
||||
//
|
||||
// If we then asks for 4 granules starting at the first granule above,
|
||||
// then we won't be able to allocate 4 consecutive granules and the code
|
||||
// reverts into the discontiguous mode. This mode uses interval halving
|
||||
// to find the limits of memory areas that have already been reserved.
|
||||
// This will lead to the first 2 granules being reserved, then the third
|
||||
// granule will be reserved.
|
||||
//
|
||||
// The problem we had with this is that this would yield two separate
|
||||
// placeholder reservations, even though they are adjacent. The callbacks
|
||||
// are supposed to fix that by coalescing the placeholders, *but* the
|
||||
// callbacks used to be only turned on *after* the reservation call. So,
|
||||
// we end up with one 3 granule large memory area in the manager, which
|
||||
// unexpectedly was covered by two placeholders (instead of the expected
|
||||
// one placeholder).
|
||||
//
|
||||
// Later when the callbacks had been installed and we tried to fetch memory
|
||||
// from the manager, the callbacks would try to split off the placeholder
|
||||
// to separate the fetched memory from the memory left in the manager. This
|
||||
// used to fail because the memory was already split into two placeholders.
|
||||
|
||||
if (_vmm->reserved() < 4 * ZGranuleSize || !_va->free_is_contiguous()) {
|
||||
GTEST_SKIP() << "Fixture failed to reserve adequate memory, reserved "
|
||||
<< (_vmm->reserved() >> ZGranuleSizeShift) << " * ZGranuleSize";
|
||||
}
|
||||
|
||||
// Start at the offset we reserved.
|
||||
const zoffset base_offset = _vmm->lowest_available_address();
|
||||
|
||||
// Empty the reserved memory in preparation for the rest of the test.
|
||||
_vmm->unreserve_all();
|
||||
|
||||
const zaddress_unsafe base = ZOffset::address_unsafe(base_offset);
|
||||
const zaddress_unsafe blocked = base + 3 * ZGranuleSize;
|
||||
|
||||
// Reserve the memory that is acting as a blocking reservation.
|
||||
{
|
||||
char* const result = os::attempt_reserve_memory_at((char*)untype(blocked), ZGranuleSize, !ExecMem, mtTest);
|
||||
if (uintptr_t(result) != untype(blocked)) {
|
||||
GTEST_SKIP() << "Failed to reserve requested memory at " << untype(blocked);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// This ends up reserving 2 granules and then 1 granule adjacent to the
|
||||
// first. In previous implementations this resulted in two separate
|
||||
// placeholders (4MB and 2MB). This was a bug, because the manager is
|
||||
// designed to have one placeholder per memory area. This in turn would
|
||||
// lead to a subsequent failure when _vmm->alloc tried to split off the
|
||||
// 4MB that is already covered by its own placeholder. You can't place
|
||||
// a placeholder over an already existing placeholder.
|
||||
|
||||
// To reproduce this, the test needed to mimic the initializing memory
|
||||
// reservation code which had the placeholders turned off. This was done
|
||||
// with this helper:
|
||||
//
|
||||
// ZCallbacksResetter resetter(&_va->_callbacks);
|
||||
//
|
||||
// After the fix, we always have the callbacks turned on, so we don't
|
||||
// need this to mimic the initializing memory reservation.
|
||||
|
||||
const size_t reserved = _vmm->reserve_discontiguous(base_offset, 4 * ZGranuleSize, ZGranuleSize);
|
||||
ASSERT_LE(reserved, 3 * ZGranuleSize);
|
||||
if (reserved < 3 * ZGranuleSize) {
|
||||
GTEST_SKIP() << "Failed reserve_discontiguous"
|
||||
", expected 3 * ZGranuleSize, got " << (reserved >> ZGranuleSizeShift)
|
||||
<< " * ZGranuleSize";
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// The test used to crash here because the 3 granule memory area was
|
||||
// inadvertently covered by two place holders (2 granules + 1 granule).
|
||||
const ZVirtualMemory vmem = _vmm->alloc(2 * ZGranuleSize, true);
|
||||
ASSERT_EQ(vmem.start(), base_offset);
|
||||
ASSERT_EQ(vmem.size(), 2 * ZGranuleSize);
|
||||
|
||||
// Cleanup - Must happen in granule-sizes because of how Windows hands
|
||||
// out memory in granule-sized placeholder reservations.
|
||||
_vmm->unreserve(base_offset, ZGranuleSize);
|
||||
_vmm->unreserve(base_offset + ZGranuleSize, ZGranuleSize);
|
||||
}
|
||||
|
||||
// Final cleanup
|
||||
const ZVirtualMemory vmem = _vmm->alloc(ZGranuleSize, true);
|
||||
ASSERT_EQ(vmem.start(), base_offset + 2 * ZGranuleSize);
|
||||
ASSERT_EQ(vmem.size(), ZGranuleSize);
|
||||
_vmm->unreserve(vmem.start(), vmem.size());
|
||||
|
||||
const bool released = os::release_memory((char*)untype(blocked), ZGranuleSize);
|
||||
ASSERT_TRUE(released);
|
||||
}
|
||||
|
||||
void test_alloc_low_address() {
|
||||
// Verify that we get a placeholder for the first granule
|
||||
zoffset bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
ASSERT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ZGranuleSize);
|
||||
|
||||
// Alloc something larger than a granule and free it
|
||||
bottom = _va->alloc_low_address(ZGranuleSize * 3);
|
||||
ASSERT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ZGranuleSize * 3);
|
||||
|
||||
// Free with more memory allocated
|
||||
bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
ASSERT_ALLOC_OK(bottom);
|
||||
|
||||
zoffset next = _va->alloc_low_address(ZGranuleSize);
|
||||
ASSERT_ALLOC_OK(next);
|
||||
|
||||
_va->free(bottom, ZGranuleSize);
|
||||
_va->free(next, ZGranuleSize);
|
||||
}
|
||||
|
||||
void test_alloc_high_address() {
|
||||
// Verify that we get a placeholder for the last granule
|
||||
zoffset high = _va->alloc_high_address(ZGranuleSize);
|
||||
ASSERT_ALLOC_OK(high);
|
||||
|
||||
zoffset prev = _va->alloc_high_address(ZGranuleSize);
|
||||
ASSERT_ALLOC_OK(prev);
|
||||
|
||||
_va->free(high, ZGranuleSize);
|
||||
_va->free(prev, ZGranuleSize);
|
||||
|
||||
// Alloc something larger than a granule and return it
|
||||
high = _va->alloc_high_address(ZGranuleSize * 2);
|
||||
ASSERT_ALLOC_OK(high);
|
||||
|
||||
_va->free(high, ZGranuleSize * 2);
|
||||
}
|
||||
|
||||
void test_alloc_whole_area() {
|
||||
// Alloc the whole reservation
|
||||
zoffset bottom = _va->alloc_low_address(ReservationSize);
|
||||
ASSERT_ALLOC_OK(bottom);
|
||||
|
||||
// Free two chunks and then allocate them again
|
||||
_va->free(bottom, ZGranuleSize * 4);
|
||||
_va->free(bottom + ZGranuleSize * 6, ZGranuleSize * 6);
|
||||
|
||||
zoffset offset = _va->alloc_low_address(ZGranuleSize * 4);
|
||||
ASSERT_ALLOC_OK(offset);
|
||||
|
||||
offset = _va->alloc_low_address(ZGranuleSize * 6);
|
||||
ASSERT_ALLOC_OK(offset);
|
||||
|
||||
// Now free it all, and verify it can be re-allocated
|
||||
_va->free(bottom, ReservationSize);
|
||||
|
||||
bottom = _va->alloc_low_address(ReservationSize);
|
||||
ASSERT_ALLOC_OK(bottom);
|
||||
|
||||
_va->free(bottom, ReservationSize);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_VM_F(ZVirtualMemoryManagerTest, test_reserve_discontiguous_and_coalesce) {
|
||||
test_reserve_discontiguous_and_coalesce();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZVirtualMemoryManagerTest, test_alloc_low_address) {
|
||||
test_alloc_low_address();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZVirtualMemoryManagerTest, test_alloc_high_address) {
|
||||
test_alloc_high_address();
|
||||
}
|
||||
|
||||
TEST_VM_F(ZVirtualMemoryManagerTest, test_alloc_whole_area) {
|
||||
test_alloc_whole_area();
|
||||
}
|
||||
82
test/hotspot/gtest/gc/z/zunittest.hpp
Normal file
82
test/hotspot/gtest/gc/z/zunittest.hpp
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef ZUNITTEST_HPP
|
||||
#define ZUNITTEST_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zArguments.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
class ZAddressOffsetMaxSetter {
|
||||
friend class ZTest;
|
||||
|
||||
private:
|
||||
size_t _old_max;
|
||||
size_t _old_mask;
|
||||
|
||||
public:
|
||||
ZAddressOffsetMaxSetter(size_t zaddress_offset_max)
|
||||
: _old_max(ZAddressOffsetMax),
|
||||
_old_mask(ZAddressOffsetMask) {
|
||||
ZAddressOffsetMax = zaddress_offset_max;
|
||||
ZAddressOffsetMask = ZAddressOffsetMax - 1;
|
||||
}
|
||||
~ZAddressOffsetMaxSetter() {
|
||||
ZAddressOffsetMax = _old_max;
|
||||
ZAddressOffsetMask = _old_mask;
|
||||
}
|
||||
};
|
||||
|
||||
class ZTest : public testing::Test {
|
||||
private:
|
||||
ZAddressOffsetMaxSetter _zaddress_offset_max_setter;
|
||||
|
||||
protected:
|
||||
ZTest()
|
||||
: _zaddress_offset_max_setter(ZAddressOffsetMax) {
|
||||
if (!is_os_supported()) {
|
||||
// If the OS does not support ZGC do not run initialization, as it may crash the VM.
|
||||
return;
|
||||
}
|
||||
|
||||
// Initialize ZGC subsystems for gtests, may only be called once per process.
|
||||
static bool runs_once = [&]() {
|
||||
ZInitialize::pd_initialize();
|
||||
ZGlobalsPointers::initialize();
|
||||
|
||||
// ZGlobalsPointers::initialize() sets ZAddressOffsetMax, make sure the
|
||||
// first test fixture invocation has a correct ZAddressOffsetMaxSetter.
|
||||
_zaddress_offset_max_setter._old_max = ZAddressOffsetMax;
|
||||
_zaddress_offset_max_setter._old_mask = ZAddressOffsetMask;
|
||||
return true;
|
||||
}();
|
||||
}
|
||||
|
||||
bool is_os_supported() {
|
||||
return ZArguments::is_os_supported();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // ZUNITTEST_HPP
|
||||
Loading…
x
Reference in New Issue
Block a user