8220569: ZGC: Rename and rework ZUnmapBadViews to ZVerifyViews

Reviewed-by: stefank, eosterlund
This commit is contained in:
Per Lidén 2019-03-18 11:50:38 +01:00
parent 8de73b3995
commit fbf62f0fb8
10 changed files with 83 additions and 68 deletions

View File

@ -237,8 +237,8 @@ uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
}
void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
} else {
// Map all views
@ -249,8 +249,8 @@ void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
}
void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
if (ZUnmapBadViews) {
// Only map the good view, for debugging only
if (ZVerifyViews) {
// Unmap good view
unmap_view(pmem, ZAddress::good(offset));
} else {
// Unmap all views
@ -260,11 +260,14 @@ void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const
}
}
void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
assert(ZUnmapBadViews, "Should be enabled");
const uintptr_t addr_good = ZAddress::good(offset);
const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
// Map/Unmap views
map_view(pmem, addr_good, false /* pretouch */);
unmap_view(pmem, addr_bad);
void ZPhysicalMemoryBacking::debug_map(ZPhysicalMemory pmem, uintptr_t offset) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset), false /* pretouch */);
}
void ZPhysicalMemoryBacking::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
unmap_view(pmem, ZAddress::good(offset));
}

View File

@ -58,7 +58,9 @@ public:
void map(ZPhysicalMemory pmem, uintptr_t offset) const;
void unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
void flip(ZPhysicalMemory pmem, uintptr_t offset) const;
void debug_map(ZPhysicalMemory pmem, uintptr_t offset) const;
void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) const;
};
#endif // OS_CPU_LINUX_X86_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_X86_HPP

View File

@ -247,22 +247,37 @@ void ZHeap::release_page(ZPage* page, bool reclaimed) {
}
}
void ZHeap::flip_views() {
// For debugging only
if (ZUnmapBadViews) {
// Flip pages
void ZHeap::before_flip() {
if (ZVerifyViews) {
// Unmap all pages
_page_allocator.unmap_all_pages();
}
}
void ZHeap::after_flip() {
if (ZVerifyViews) {
// Map all pages
ZPageTableIterator iter(&_pagetable);
for (ZPage* page; iter.next(&page);) {
if (!page->is_detached()) {
_page_allocator.flip_page(page);
_page_allocator.map_page(page);
}
}
// Flip pre-mapped memory
_page_allocator.flip_pre_mapped();
}
}
void ZHeap::flip_to_marked() {
before_flip();
ZAddressMasks::flip_to_marked();
after_flip();
}
void ZHeap::flip_to_remapped() {
before_flip();
ZAddressMasks::flip_to_remapped();
after_flip();
}
void ZHeap::mark_start() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
@ -270,8 +285,7 @@ void ZHeap::mark_start() {
ZStatSample(ZSamplerHeapUsedBeforeMark, used());
// Flip address view
ZAddressMasks::flip_to_marked();
flip_views();
flip_to_marked();
// Retire allocating pages
_object_allocator.retire_pages();
@ -466,8 +480,7 @@ void ZHeap::relocate_start() {
_unload.finish();
// Flip address view
ZAddressMasks::flip_to_remapped();
flip_views();
flip_to_remapped();
// Enter relocate phase
ZGlobalPhase = ZPhaseRelocate;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -67,8 +67,13 @@ private:
size_t heap_max_size() const;
size_t heap_max_reserve_size() const;
void before_flip();
void after_flip();
void flip_to_marked();
void flip_to_remapped();
void out_of_memory();
void flip_views();
void fixup_partial_loads();
public:

View File

@ -242,11 +242,6 @@ void ZPageAllocator::flush_pre_mapped() {
_pre_mapped.clear();
}
void ZPageAllocator::map_page(ZPage* page) {
// Map physical memory
_physical.map(page->physical_memory(), page->start());
}
void ZPageAllocator::detach_page(ZPage* page) {
// Detach the memory mapping.
detach_memory(page->virtual_memory(), page->physical_memory());
@ -267,6 +262,21 @@ void ZPageAllocator::destroy_page(ZPage* page) {
delete page;
}
void ZPageAllocator::map_page(ZPage* page) {
// Map physical memory
if (!page->is_mapped()) {
_physical.map(page->physical_memory(), page->start());
} else if (ZVerifyViews) {
_physical.debug_map(page->physical_memory(), page->start());
}
}
void ZPageAllocator::unmap_all_pages() {
ZPhysicalMemory pmem(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax));
_physical.debug_unmap(pmem, 0 /* offset */);
pmem.clear();
}
void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
ZLocker<ZLock> locker(&_lock);
list->transfer(&_detached);
@ -398,9 +408,7 @@ ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags fl
}
// Map page if needed
if (!page->is_mapped()) {
map_page(page);
}
map_page(page);
// Reset page. This updates the page's sequence number and must
// be done after page allocation, which potentially blocked in
@ -455,27 +463,6 @@ void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory&
pmem.clear();
}
void ZPageAllocator::flip_page(ZPage* page) {
const ZPhysicalMemory& pmem = page->physical_memory();
const uintptr_t addr = page->start();
// Flip physical mapping
_physical.flip(pmem, addr);
}
void ZPageAllocator::flip_pre_mapped() {
if (_pre_mapped.available() == 0) {
// Nothing to flip
return;
}
const ZPhysicalMemory& pmem = _pre_mapped.physical_memory();
const ZVirtualMemory& vmem = _pre_mapped.virtual_memory();
// Flip physical mapping
_physical.flip(pmem, vmem.start());
}
void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
ZLocker<ZLock> locker(&_lock);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,7 +63,6 @@ private:
size_t try_ensure_unused_for_pre_mapped(size_t size);
ZPage* create_page(uint8_t type, size_t size);
void map_page(ZPage* page);
void detach_page(ZPage* page);
void flush_pre_mapped();
void flush_cache(size_t size);
@ -97,13 +96,13 @@ public:
void reset_statistics();
ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
void flip_page(ZPage* page);
void free_page(ZPage* page, bool reclaimed);
void destroy_page(ZPage* page);
void flush_detached_pages(ZList<ZPage>* list);
void map_page(ZPage* page);
void unmap_all_pages();
void flip_pre_mapped();
void flush_detached_pages(ZList<ZPage>* list);
bool is_alloc_stalled() const;
void check_out_of_memory();

View File

@ -179,6 +179,10 @@ void ZPhysicalMemoryManager::unmap(ZPhysicalMemory pmem, uintptr_t offset) {
_backing.unmap(pmem, offset);
}
void ZPhysicalMemoryManager::flip(ZPhysicalMemory pmem, uintptr_t offset) {
_backing.flip(pmem, offset);
void ZPhysicalMemoryManager::debug_map(ZPhysicalMemory pmem, uintptr_t offset) {
_backing.debug_map(pmem, offset);
}
void ZPhysicalMemoryManager::debug_unmap(ZPhysicalMemory pmem, uintptr_t offset) {
_backing.debug_unmap(pmem, offset);
}

View File

@ -94,7 +94,9 @@ public:
void map(ZPhysicalMemory pmem, uintptr_t offset);
void unmap(ZPhysicalMemory pmem, uintptr_t offset);
void flip(ZPhysicalMemory pmem, uintptr_t offset);
void debug_map(ZPhysicalMemory pmem, uintptr_t offset);
void debug_unmap(ZPhysicalMemory pmem, uintptr_t offset);
};
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -70,8 +70,8 @@
diagnostic(bool, ZProactive, true, \
"Enable proactive GC cycles") \
\
diagnostic(bool, ZUnmapBadViews, false, \
"Unmap bad (inactive) heap views") \
diagnostic(bool, ZVerifyViews, false, \
"Verify heap view accesses") \
\
diagnostic(bool, ZVerifyMarking, false, \
"Verify marking stacks") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,7 @@
* @modules java.base/jdk.internal.misc:+open
* @summary Validate barriers after Unsafe getReference, CAS and swap (GetAndSet)
* @requires vm.gc.Z & !vm.graal.enabled
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+ZUnmapBadViews -XX:ZCollectionInterval=1 -XX:-CreateCoredumpOnCrash -XX:CompileCommand=dontinline,*::mergeImpl* compiler.gcbarriers.UnsafeIntrinsicsTest
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:+UnlockDiagnosticVMOptions -XX:+ZVerifyViews -XX:ZCollectionInterval=1 -XX:-CreateCoredumpOnCrash -XX:CompileCommand=dontinline,*::mergeImpl* compiler.gcbarriers.UnsafeIntrinsicsTest
*/
package compiler.gcbarriers;