mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-01 14:08:24 +00:00
8230566: ZGC: Don't substitute klass pointer during array clearing
Reviewed-by: stefank, eosterlund
This commit is contained in:
parent
7ecde2ec48
commit
24ed3d4eef
@ -72,7 +72,7 @@ bool ZBarrier::should_mark_through(uintptr_t addr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <bool finalizable, bool publish>
|
||||
template <bool follow, bool finalizable, bool publish>
|
||||
uintptr_t ZBarrier::mark(uintptr_t addr) {
|
||||
uintptr_t good_addr;
|
||||
|
||||
@ -89,7 +89,7 @@ uintptr_t ZBarrier::mark(uintptr_t addr) {
|
||||
|
||||
// Mark
|
||||
if (should_mark_through<finalizable>(addr)) {
|
||||
ZHeap::heap()->mark_object<finalizable, publish>(good_addr);
|
||||
ZHeap::heap()->mark_object<follow, finalizable, publish>(good_addr);
|
||||
}
|
||||
|
||||
return good_addr;
|
||||
@ -108,7 +108,7 @@ uintptr_t ZBarrier::relocate(uintptr_t addr) {
|
||||
}
|
||||
|
||||
uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) {
|
||||
return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr);
|
||||
return during_relocate() ? relocate(addr) : mark<Follow, Strong, Publish>(addr);
|
||||
}
|
||||
|
||||
uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) {
|
||||
@ -174,11 +174,11 @@ uintptr_t ZBarrier::keep_alive_barrier_on_phantom_oop_slow_path(uintptr_t addr)
|
||||
// Mark barrier
|
||||
//
|
||||
uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) {
|
||||
return mark<Strong, Overflow>(addr);
|
||||
return mark<Follow, Strong, Overflow>(addr);
|
||||
}
|
||||
|
||||
uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) {
|
||||
const uintptr_t good_addr = mark<Finalizable, Overflow>(addr);
|
||||
const uintptr_t good_addr = mark<Follow, Finalizable, Overflow>(addr);
|
||||
if (ZAddress::is_good(addr)) {
|
||||
// If the oop was already strongly marked/good, then we do
|
||||
// not want to downgrade it to finalizable marked/good.
|
||||
@ -200,7 +200,15 @@ uintptr_t ZBarrier::mark_barrier_on_root_oop_slow_path(uintptr_t addr) {
|
||||
assert(during_mark(), "Invalid phase");
|
||||
|
||||
// Mark
|
||||
return mark<Strong, Publish>(addr);
|
||||
return mark<Follow, Strong, Publish>(addr);
|
||||
}
|
||||
|
||||
uintptr_t ZBarrier::mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
assert(during_mark(), "Invalid phase");
|
||||
|
||||
// Mark
|
||||
return mark<DontFollow, Strong, Publish>(addr);
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
@ -32,6 +32,9 @@ typedef uintptr_t (*ZBarrierSlowPath)(uintptr_t);
|
||||
|
||||
class ZBarrier : public AllStatic {
|
||||
private:
|
||||
static const bool Follow = true;
|
||||
static const bool DontFollow = false;
|
||||
|
||||
static const bool Strong = false;
|
||||
static const bool Finalizable = true;
|
||||
|
||||
@ -51,7 +54,7 @@ private:
|
||||
static bool during_mark();
|
||||
static bool during_relocate();
|
||||
template <bool finalizable> static bool should_mark_through(uintptr_t addr);
|
||||
template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
|
||||
template <bool follow, bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr);
|
||||
static uintptr_t remap(uintptr_t addr);
|
||||
static uintptr_t relocate(uintptr_t addr);
|
||||
static uintptr_t relocate_or_mark(uintptr_t addr);
|
||||
@ -69,6 +72,7 @@ private:
|
||||
static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr);
|
||||
static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr);
|
||||
static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr);
|
||||
static uintptr_t mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr);
|
||||
|
||||
static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr);
|
||||
|
||||
@ -106,6 +110,7 @@ public:
|
||||
static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable);
|
||||
static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable);
|
||||
static void mark_barrier_on_root_oop_field(oop* p);
|
||||
static void mark_barrier_on_invisible_root_oop_field(oop* p);
|
||||
|
||||
// Relocate barrier
|
||||
static void relocate_barrier_on_root_oop_field(oop* p);
|
||||
|
||||
@ -326,6 +326,11 @@ inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
|
||||
root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
|
||||
}
|
||||
|
||||
inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
|
||||
const oop o = *p;
|
||||
root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
|
||||
}
|
||||
|
||||
//
|
||||
// Relocate barrier
|
||||
//
|
||||
|
||||
@ -140,7 +140,7 @@ public:
|
||||
// Marking
|
||||
bool is_object_live(uintptr_t addr) const;
|
||||
bool is_object_strongly_live(uintptr_t addr) const;
|
||||
template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
|
||||
template <bool follow, bool finalizable, bool publish> void mark_object(uintptr_t addr);
|
||||
void mark_start();
|
||||
void mark(bool initial);
|
||||
void mark_flush_and_free(Thread* thread);
|
||||
|
||||
@ -60,10 +60,10 @@ inline bool ZHeap::is_object_strongly_live(uintptr_t addr) const {
|
||||
return page->is_object_strongly_live(addr);
|
||||
}
|
||||
|
||||
template <bool finalizable, bool publish>
|
||||
template <bool follow, bool finalizable, bool publish>
|
||||
inline void ZHeap::mark_object(uintptr_t addr) {
|
||||
assert(ZGlobalPhase == ZPhaseMark, "Mark not allowed");
|
||||
_mark.mark_object<finalizable, publish>(addr);
|
||||
_mark.mark_object<follow, finalizable, publish>(addr);
|
||||
}
|
||||
|
||||
inline uintptr_t ZHeap::alloc_tlab(size_t size) {
|
||||
|
||||
@ -193,7 +193,7 @@ void ZHeapIterator::objects_do(ObjectClosure* cl) {
|
||||
ZStatTimerDisable disable;
|
||||
|
||||
// Push roots to visit
|
||||
push_roots<ZRootsIteratorNoInvisible, false /* Concurrent */, false /* Weak */>();
|
||||
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
|
||||
push_roots<ZConcurrentRootsIteratorClaimOther, true /* Concurrent */, false /* Weak */>();
|
||||
if (VisitWeaks) {
|
||||
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
|
||||
|
||||
@ -133,6 +133,9 @@ public:
|
||||
// Update thread local address bad mask
|
||||
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
||||
|
||||
// Mark invisible root
|
||||
ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field);
|
||||
|
||||
// Retire TLAB
|
||||
ZThreadLocalAllocBuffer::retire(thread);
|
||||
}
|
||||
@ -156,7 +159,7 @@ public:
|
||||
ZMarkRootsTask(ZMark* mark) :
|
||||
ZTask("ZMarkRootsTask"),
|
||||
_mark(mark),
|
||||
_roots(true /* visit_invisible */, false /* visit_jvmti_weak_export */) {}
|
||||
_roots(false /* visit_jvmti_weak_export */) {}
|
||||
|
||||
virtual void work() {
|
||||
_roots.oops_do(&_cl);
|
||||
@ -339,7 +342,7 @@ void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Decode object address
|
||||
// Decode object address and follow flag
|
||||
const uintptr_t addr = entry.object_address();
|
||||
|
||||
if (!try_mark_object(cache, addr, finalizable)) {
|
||||
@ -348,7 +351,13 @@ void ZMark::mark_and_follow(ZMarkCache* cache, ZMarkStackEntry entry) {
|
||||
}
|
||||
|
||||
if (is_array(addr)) {
|
||||
follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
|
||||
// Decode follow flag
|
||||
const bool follow = entry.follow();
|
||||
|
||||
// The follow flag is currently only relevant for object arrays
|
||||
if (follow) {
|
||||
follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable);
|
||||
}
|
||||
} else {
|
||||
follow_object(ZOop::from_address(addr), finalizable);
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
|
||||
template <bool follow, bool finalizable, bool publish> void mark_object(uintptr_t addr);
|
||||
|
||||
void start();
|
||||
void mark(bool initial);
|
||||
|
||||
@ -31,12 +31,12 @@
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
template <bool finalizable, bool publish>
|
||||
template <bool follow, bool finalizable, bool publish>
|
||||
inline void ZMark::mark_object(uintptr_t addr) {
|
||||
assert(ZAddress::is_marked(addr), "Should be marked");
|
||||
ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current());
|
||||
ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr);
|
||||
ZMarkStackEntry entry(addr, finalizable);
|
||||
ZMarkStackEntry entry(addr, follow, finalizable);
|
||||
|
||||
stacks->push(&_allocator, &_stripes, stripe, entry, publish);
|
||||
}
|
||||
|
||||
@ -35,16 +35,18 @@
|
||||
// ------------
|
||||
//
|
||||
// 6
|
||||
// 3 2 1 0
|
||||
// +---------------------------------------------------------------------+-+-+
|
||||
// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1|
|
||||
// +---------------------------------------------------------------------+-+-+
|
||||
// | | |
|
||||
// | 1-1 Partial Array Flag (1-bit) * |
|
||||
// | |
|
||||
// | 0-0 Final Flag (1-bit) *
|
||||
// 3 3 2 1 0
|
||||
// +--------------------------------------------------------------------+-+-+-+
|
||||
// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111|1|1|1|
|
||||
// +--------------------------------------------------------------------+-+-+-+
|
||||
// | | | |
|
||||
// | 2-2 Follow Flag (1-bit) * | |
|
||||
// | | |
|
||||
// | 1-1 Partial Array Flag (1-bit) * |
|
||||
// | |
|
||||
// | 0-0 Final Flag (1-bit) *
|
||||
// |
|
||||
// * 63-2 Object Address (62-bits)
|
||||
// * 63-3 Object Address (61-bits)
|
||||
//
|
||||
//
|
||||
// Partial array entry
|
||||
@ -69,7 +71,8 @@ class ZMarkStackEntry {
|
||||
private:
|
||||
typedef ZBitField<uint64_t, bool, 0, 1> field_finalizable;
|
||||
typedef ZBitField<uint64_t, bool, 1, 1> field_partial_array;
|
||||
typedef ZBitField<uint64_t, uintptr_t, 2, 62> field_object_address;
|
||||
typedef ZBitField<uint64_t, bool, 2, 1> field_follow;
|
||||
typedef ZBitField<uint64_t, uintptr_t, 3, 61> field_object_address;
|
||||
typedef ZBitField<uint64_t, size_t, 2, 30> field_partial_array_length;
|
||||
typedef ZBitField<uint64_t, size_t, 32, 32> field_partial_array_offset;
|
||||
|
||||
@ -83,8 +86,9 @@ public:
|
||||
// what _entry is initialized to.
|
||||
}
|
||||
|
||||
ZMarkStackEntry(uintptr_t object_address, bool finalizable) :
|
||||
ZMarkStackEntry(uintptr_t object_address, bool follow, bool finalizable) :
|
||||
_entry(field_object_address::encode(object_address) |
|
||||
field_follow::encode(follow) |
|
||||
field_partial_array::encode(false) |
|
||||
field_finalizable::encode(finalizable)) {}
|
||||
|
||||
@ -110,6 +114,10 @@ public:
|
||||
return field_partial_array_length::decode(_entry);
|
||||
}
|
||||
|
||||
bool follow() const {
|
||||
return field_follow::decode(_entry);
|
||||
}
|
||||
|
||||
uintptr_t object_address() const {
|
||||
return field_object_address::decode(_entry);
|
||||
}
|
||||
|
||||
@ -25,46 +25,24 @@
|
||||
#include "gc/z/zThreadLocalData.hpp"
|
||||
#include "gc/z/zObjArrayAllocator.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// To avoid delaying safepoints, clearing of arrays is split up in segments
|
||||
// with safepoint polling inbetween. However, we can't have a not-yet-cleared
|
||||
// array of oops on the heap when we safepoint since the GC will then stumble
|
||||
// across uninitialized oops. To avoid this we let an array of oops be an
|
||||
// array of a primitive type of the same size until the clearing has completed.
|
||||
// A max segment size of 64K was chosen because benchmarking suggests that is
|
||||
// offers a good trade-off between allocation time and time-to-safepoint.
|
||||
|
||||
static Klass* substitute_object_array_klass(Klass* klass) {
|
||||
if (!klass->is_objArray_klass()) {
|
||||
return klass;
|
||||
}
|
||||
|
||||
Klass* const substitute_klass = Universe::longArrayKlassObj();
|
||||
const BasicType type = ArrayKlass::cast(klass)->element_type();
|
||||
const BasicType substitute_type = ArrayKlass::cast(substitute_klass)->element_type();
|
||||
assert(type2aelembytes(type) == type2aelembytes(substitute_type), "Element size mismatch");
|
||||
return substitute_klass;
|
||||
}
|
||||
|
||||
ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, Thread* thread) :
|
||||
ObjArrayAllocator(substitute_object_array_klass(klass), word_size, length, false /* do_zero */, thread),
|
||||
_final_klass(klass) {}
|
||||
ObjArrayAllocator(klass, word_size, length, false /* do_zero */, thread) {}
|
||||
|
||||
oop ZObjArrayAllocator::finish(HeapWord* mem) const {
|
||||
// Set mark word and initial klass pointer
|
||||
// Initialize object header and length field
|
||||
ObjArrayAllocator::finish(mem);
|
||||
|
||||
// Keep the array alive across safepoints, but make it invisible
|
||||
// to the heap itarator until the final klass pointer has been set
|
||||
// Keep the array alive across safepoints through an invisible
|
||||
// root. Invisible roots are not visited by the heap itarator
|
||||
// and the marking logic will not attempt to follow its elements.
|
||||
ZThreadLocalData::set_invisible_root(_thread, (oop*)&mem);
|
||||
|
||||
// A max segment size of 64K was chosen because microbenchmarking
|
||||
// suggested that it offered a good trade-off between allocation
|
||||
// time and time-to-safepoint
|
||||
const size_t segment_max = ZUtils::bytes_to_words(64 * K);
|
||||
const size_t skip = arrayOopDesc::header_size(ArrayKlass::cast(_klass)->element_type());
|
||||
size_t remaining = _word_size - skip;
|
||||
@ -81,12 +59,6 @@ oop ZObjArrayAllocator::finish(HeapWord* mem) const {
|
||||
}
|
||||
}
|
||||
|
||||
if (_klass != _final_klass) {
|
||||
// Set final klass pointer
|
||||
oopDesc::release_set_klass(mem, _final_klass);
|
||||
}
|
||||
|
||||
// Make the array visible to the heap iterator
|
||||
ZThreadLocalData::clear_invisible_root(_thread);
|
||||
|
||||
return oop(mem);
|
||||
|
||||
@ -27,9 +27,6 @@
|
||||
#include "gc/shared/memAllocator.hpp"
|
||||
|
||||
class ZObjArrayAllocator : public ObjArrayAllocator {
|
||||
private:
|
||||
Klass* const _final_klass;
|
||||
|
||||
public:
|
||||
ZObjArrayAllocator(Klass* klass, size_t word_size, int length, Thread* thread);
|
||||
|
||||
|
||||
@ -48,6 +48,9 @@ public:
|
||||
// Update thread local address bad mask
|
||||
ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
|
||||
|
||||
// Relocate invisible root
|
||||
ZThreadLocalData::do_invisible_root(thread, ZBarrier::relocate_barrier_on_root_oop_field);
|
||||
|
||||
// Remap TLAB
|
||||
ZThreadLocalAllocBuffer::remap(thread);
|
||||
}
|
||||
@ -69,7 +72,7 @@ private:
|
||||
public:
|
||||
ZRelocateRootsTask() :
|
||||
ZTask("ZRelocateRootsTask"),
|
||||
_roots(true /* visit_invisible */, true /* visit_jvmti_weak_export */) {}
|
||||
_roots(true /* visit_jvmti_weak_export */) {}
|
||||
|
||||
virtual void work() {
|
||||
// During relocation we need to visit the JVMTI
|
||||
|
||||
@ -159,25 +159,19 @@ public:
|
||||
class ZRootsIteratorThreadClosure : public ThreadClosure {
|
||||
private:
|
||||
ZRootsIteratorClosure* _cl;
|
||||
const bool _visit_invisible;
|
||||
|
||||
public:
|
||||
ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl, bool visit_invisible) :
|
||||
_cl(cl),
|
||||
_visit_invisible(visit_invisible) {}
|
||||
ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) :
|
||||
_cl(cl) {}
|
||||
|
||||
virtual void do_thread(Thread* thread) {
|
||||
ZRootsIteratorCodeBlobClosure code_cl(_cl);
|
||||
thread->oops_do(_cl, ClassUnloading ? &code_cl : NULL);
|
||||
_cl->do_thread(thread);
|
||||
if (_visit_invisible && ZThreadLocalData::has_invisible_root(thread)) {
|
||||
_cl->do_oop(ZThreadLocalData::invisible_root(thread));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ZRootsIterator::ZRootsIterator(bool visit_invisible, bool visit_jvmti_weak_export) :
|
||||
_visit_invisible(visit_invisible),
|
||||
ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) :
|
||||
_visit_jvmti_weak_export(visit_jvmti_weak_export),
|
||||
_universe(this),
|
||||
_object_synchronizer(this),
|
||||
@ -246,7 +240,7 @@ void ZRootsIterator::do_system_dictionary(ZRootsIteratorClosure* cl) {
|
||||
void ZRootsIterator::do_threads(ZRootsIteratorClosure* cl) {
|
||||
ZStatTimer timer(ZSubPhasePauseRootsThreads);
|
||||
ResourceMark rm;
|
||||
ZRootsIteratorThreadClosure thread_cl(cl, _visit_invisible);
|
||||
ZRootsIteratorThreadClosure thread_cl(cl);
|
||||
Threads::possibly_parallel_threads_do(true, &thread_cl);
|
||||
}
|
||||
|
||||
|
||||
@ -84,7 +84,6 @@ public:
|
||||
|
||||
class ZRootsIterator {
|
||||
private:
|
||||
bool _visit_invisible;
|
||||
bool _visit_jvmti_weak_export;
|
||||
|
||||
void do_universe(ZRootsIteratorClosure* cl);
|
||||
@ -106,18 +105,12 @@ private:
|
||||
ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache> _code_cache;
|
||||
|
||||
public:
|
||||
ZRootsIterator(bool visit_invisible = true, bool visit_jvmti_weak_export = false);
|
||||
ZRootsIterator(bool visit_jvmti_weak_export = false);
|
||||
~ZRootsIterator();
|
||||
|
||||
void oops_do(ZRootsIteratorClosure* cl);
|
||||
};
|
||||
|
||||
class ZRootsIteratorNoInvisible : public ZRootsIterator {
|
||||
public:
|
||||
ZRootsIteratorNoInvisible() :
|
||||
ZRootsIterator(false /* visit_invisible */, false /* visit_jvmti_weak_export */) {}
|
||||
};
|
||||
|
||||
class ZConcurrentRootsIterator {
|
||||
private:
|
||||
ZOopStorageIterator _jni_handles_iter;
|
||||
|
||||
@ -63,22 +63,20 @@ public:
|
||||
}
|
||||
|
||||
static void set_invisible_root(Thread* thread, oop* root) {
|
||||
assert(!has_invisible_root(thread), "Already set");
|
||||
assert(data(thread)->_invisible_root == NULL, "Already set");
|
||||
data(thread)->_invisible_root = root;
|
||||
}
|
||||
|
||||
static void clear_invisible_root(Thread* thread) {
|
||||
assert(has_invisible_root(thread), "Should be set");
|
||||
assert(data(thread)->_invisible_root != NULL, "Should be set");
|
||||
data(thread)->_invisible_root = NULL;
|
||||
}
|
||||
|
||||
static bool has_invisible_root(Thread* thread) {
|
||||
return data(thread)->_invisible_root != NULL;
|
||||
}
|
||||
|
||||
static oop* invisible_root(Thread* thread) {
|
||||
assert(has_invisible_root(thread), "Should be set");
|
||||
return data(thread)->_invisible_root;
|
||||
template <typename T>
|
||||
static void do_invisible_root(Thread* thread, T f) {
|
||||
if (data(thread)->_invisible_root != NULL) {
|
||||
f(data(thread)->_invisible_root);
|
||||
}
|
||||
}
|
||||
|
||||
static ByteSize address_bad_mask_offset() {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user