mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8371200: ZGC: C2 allocation deopt race
Reviewed-by: aboldtch, stefank
This commit is contained in:
parent
f125c76f5b
commit
b9ee9541cf
@ -86,10 +86,6 @@ inline void ZBarrier::self_heal(ZBarrierFastPath fast_path, volatile zpointer* p
|
||||
assert(ZPointer::is_remapped(heal_ptr), "invariant");
|
||||
|
||||
for (;;) {
|
||||
if (ptr == zpointer::null) {
|
||||
assert(!ZVerifyOops || !ZHeap::heap()->is_in(uintptr_t(p)) || !ZHeap::heap()->is_old(p), "No raw null in old");
|
||||
}
|
||||
|
||||
assert_transition_monotonicity(ptr, heal_ptr);
|
||||
|
||||
// Heal
|
||||
|
||||
@ -223,27 +223,7 @@ void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
// breaks that promise. Take a few steps in the interpreter instead, which has
|
||||
// no such assumptions about where an object resides.
|
||||
deoptimize_allocation(thread);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ZGeneration::young()->is_phase_mark_complete()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!page->is_relocatable()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZRelocate::compute_to_age(age) != ZPageAge::old) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the object is young, we have to still be careful that it isn't racingly
|
||||
// about to get promoted to the old generation. That causes issues when null
|
||||
// pointers are supposed to be coloured, but the JIT is a bit sloppy and
|
||||
// reinitializes memory with raw nulls. We detect this situation and detune
|
||||
// rather than relying on the JIT to never be sloppy with redundant initialization.
|
||||
deoptimize_allocation(thread);
|
||||
}
|
||||
|
||||
void ZBarrierSet::print_on(outputStream* st) const {
|
||||
|
||||
@ -111,6 +111,16 @@ static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUni
|
||||
ZGenerationYoung* ZGeneration::_young;
|
||||
ZGenerationOld* ZGeneration::_old;
|
||||
|
||||
class ZRendezvousHandshakeClosure : public HandshakeClosure {
|
||||
public:
|
||||
ZRendezvousHandshakeClosure()
|
||||
: HandshakeClosure("ZRendezvous") {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
// Does nothing
|
||||
}
|
||||
};
|
||||
|
||||
ZGeneration::ZGeneration(ZGenerationId id, ZPageTable* page_table, ZPageAllocator* page_allocator)
|
||||
: _id(id),
|
||||
_page_allocator(page_allocator),
|
||||
@ -168,11 +178,19 @@ void ZGeneration::free_empty_pages(ZRelocationSetSelector* selector, int bulk) {
|
||||
}
|
||||
|
||||
void ZGeneration::flip_age_pages(const ZRelocationSetSelector* selector) {
|
||||
if (is_young()) {
|
||||
_relocate.flip_age_pages(selector->not_selected_small());
|
||||
_relocate.flip_age_pages(selector->not_selected_medium());
|
||||
_relocate.flip_age_pages(selector->not_selected_large());
|
||||
}
|
||||
_relocate.flip_age_pages(selector->not_selected_small());
|
||||
_relocate.flip_age_pages(selector->not_selected_medium());
|
||||
_relocate.flip_age_pages(selector->not_selected_large());
|
||||
|
||||
// Perform a handshake between flip promotion and running the promotion barrier. This ensures
|
||||
// that ZBarrierSet::on_slowpath_allocation_exit() observing a young page that was then racingly
|
||||
// flip promoted, will run any stores without barriers to completion before responding to the
|
||||
// handshake at the subsequent safepoint poll. This ensures that the flip promotion barriers always
|
||||
// run after compiled code missing barriers, but before relocate start.
|
||||
ZRendezvousHandshakeClosure cl;
|
||||
Handshake::execute(&cl);
|
||||
|
||||
_relocate.barrier_flip_promoted_pages(_relocation_set.flip_promoted_pages());
|
||||
}
|
||||
|
||||
static double fragmentation_limit(ZGenerationId generation) {
|
||||
@ -235,7 +253,9 @@ void ZGeneration::select_relocation_set(bool promote_all) {
|
||||
_relocation_set.install(&selector);
|
||||
|
||||
// Flip age young pages that were not selected
|
||||
flip_age_pages(&selector);
|
||||
if (is_young()) {
|
||||
flip_age_pages(&selector);
|
||||
}
|
||||
|
||||
// Setup forwarding table
|
||||
ZRelocationSetIterator rs_iter(&_relocation_set);
|
||||
@ -1280,16 +1300,6 @@ bool ZGenerationOld::uses_clear_all_soft_reference_policy() const {
|
||||
return _reference_processor.uses_clear_all_soft_reference_policy();
|
||||
}
|
||||
|
||||
class ZRendezvousHandshakeClosure : public HandshakeClosure {
|
||||
public:
|
||||
ZRendezvousHandshakeClosure()
|
||||
: HandshakeClosure("ZRendezvous") {}
|
||||
|
||||
void do_thread(Thread* thread) {
|
||||
// Does nothing
|
||||
}
|
||||
};
|
||||
|
||||
class ZRendezvousGCThreads: public VM_Operation {
|
||||
public:
|
||||
VMOp_Type type() const { return VMOp_ZRendezvousGCThreads; }
|
||||
|
||||
@ -1322,7 +1322,7 @@ private:
|
||||
|
||||
public:
|
||||
ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
|
||||
: ZTask("ZPromotePagesTask"),
|
||||
: ZTask("ZFlipAgePagesTask"),
|
||||
_iter(pages) {}
|
||||
|
||||
virtual void work() {
|
||||
@ -1337,16 +1337,6 @@ public:
|
||||
// Figure out if this is proper promotion
|
||||
const bool promotion = to_age == ZPageAge::old;
|
||||
|
||||
if (promotion) {
|
||||
// Before promoting an object (and before relocate start), we must ensure that all
|
||||
// contained zpointers are store good. The marking code ensures that for non-null
|
||||
// pointers, but null pointers are ignored. This code ensures that even null pointers
|
||||
// are made store good, for the promoted objects.
|
||||
prev_page->object_iterate([&](oop obj) {
|
||||
ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
|
||||
});
|
||||
}
|
||||
|
||||
// Logging
|
||||
prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
|
||||
|
||||
@ -1360,7 +1350,7 @@ public:
|
||||
|
||||
if (promotion) {
|
||||
ZGeneration::young()->flip_promote(prev_page, new_page);
|
||||
// Defer promoted page registration times the lock is taken
|
||||
// Defer promoted page registration
|
||||
promoted_pages.push(prev_page);
|
||||
}
|
||||
|
||||
@ -1371,11 +1361,42 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class ZPromoteBarrierTask : public ZTask {
|
||||
private:
|
||||
ZArrayParallelIterator<ZPage*> _iter;
|
||||
|
||||
public:
|
||||
ZPromoteBarrierTask(const ZArray<ZPage*>* pages)
|
||||
: ZTask("ZPromoteBarrierTask"),
|
||||
_iter(pages) {}
|
||||
|
||||
virtual void work() {
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
|
||||
for (ZPage* page; _iter.next(&page);) {
|
||||
// When promoting an object (and before relocate start), we must ensure that all
|
||||
// contained zpointers are store good. The marking code ensures that for non-null
|
||||
// pointers, but null pointers are ignored. This code ensures that even null pointers
|
||||
// are made store good, for the promoted objects.
|
||||
page->object_iterate([&](oop obj) {
|
||||
ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
|
||||
});
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
|
||||
ZFlipAgePagesTask flip_age_task(pages);
|
||||
workers()->run(&flip_age_task);
|
||||
}
|
||||
|
||||
void ZRelocate::barrier_flip_promoted_pages(const ZArray<ZPage*>* pages) {
|
||||
ZPromoteBarrierTask promote_barrier_task(pages);
|
||||
workers()->run(&promote_barrier_task);
|
||||
}
|
||||
|
||||
void ZRelocate::synchronize() {
|
||||
_queue.synchronize();
|
||||
}
|
||||
|
||||
@ -119,6 +119,7 @@ public:
|
||||
void relocate(ZRelocationSet* relocation_set);
|
||||
|
||||
void flip_age_pages(const ZArray<ZPage*>* pages);
|
||||
void barrier_flip_promoted_pages(const ZArray<ZPage*>* pages);
|
||||
|
||||
void synchronize();
|
||||
void desynchronize();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user