diff --git a/src/hotspot/share/gc/g1/g1AllocRegion.hpp b/src/hotspot/share/gc/g1/g1AllocRegion.hpp index 4abf3530533..a609472a5c9 100644 --- a/src/hotspot/share/gc/g1/g1AllocRegion.hpp +++ b/src/hotspot/share/gc/g1/g1AllocRegion.hpp @@ -88,6 +88,10 @@ private: // to allocate a new region even if the max has been reached. HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force); + // Perform an allocation out of a new allocation region, retiring the current one. + inline HeapWord* attempt_allocation_using_new_region(size_t min_word_size, + size_t desired_word_size, + size_t* actual_word_size); protected: // The memory node index this allocation region belongs to. uint _node_index; @@ -172,11 +176,6 @@ public: size_t desired_word_size, size_t* actual_word_size); - // Perform an allocation out of a new allocation region, retiring the current one. - inline HeapWord* attempt_allocation_using_new_region(size_t min_word_size, - size_t desired_word_size, - size_t* actual_word_size); - // Should be called to allocate a new region even if the max of this // type of regions has been reached. Should only be called if other // allocation attempts have failed and we are not holding a valid diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp index 4de83eb6d4c..6eab853381b 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.cpp +++ b/src/hotspot/share/gc/g1/g1Allocator.cpp @@ -254,8 +254,8 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, // the memory may have been used up as the threads waited to acquire the lock. if (!survivor_is_full()) { result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size, - desired_word_size, - actual_word_size); + desired_word_size, + actual_word_size); if (result == NULL) { set_survivor_full(); } diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp index bc97de7fc15..9dd5fcdebe6 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.hpp @@ -117,10 +117,6 @@ public: size_t desired_word_size, size_t* actual_word_size); - // Attempt allocation, retiring the current region and allocating a new one. It is - // assumed that attempt_allocation() has been tried and failed already first. - inline HeapWord* attempt_allocation_using_new_region(size_t word_size); - // This is to be called when holding an appropriate lock. It first tries in the // current allocation region, and then attempts an allocation using a new region. inline HeapWord* attempt_allocation_locked(size_t word_size); diff --git a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp index 0237e002140..aa10347ad4e 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp @@ -62,16 +62,6 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size, return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size); } -inline HeapWord* G1Allocator::attempt_allocation_using_new_region(size_t word_size) { - uint node_index = current_node_index(); - size_t temp; - HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_using_new_region(word_size, word_size, &temp); - assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL, - "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, - p2i(mutator_alloc_region(node_index)->get())); - return result; -} - inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) { uint node_index = current_node_index(); HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 0d651f7340e..35e92173888 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -398,7 +398,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { HeapWord* result = NULL; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; - bool preventive_collection_required = false; uint gc_count_before; { @@ -406,32 +405,21 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // Now that we have the lock, we first retry the allocation in case another // thread changed the region while we were waiting to acquire the lock. - size_t actual_size; - result = _allocator->attempt_allocation(word_size, word_size, &actual_size); + result = _allocator->attempt_allocation_locked(word_size); if (result != NULL) { return result; } - preventive_collection_required = policy()->preventive_collection_required(1); - if (!preventive_collection_required) { - // We've already attempted a lock-free allocation above, so we don't want to - // do it again. Let's jump straight to replacing the active region. - result = _allocator->attempt_allocation_using_new_region(word_size); + // If the GCLocker is active and we are bound for a GC, try expanding young gen. + // This is different to when only GCLocker::needs_gc() is set: try to avoid + // waiting because the GCLocker is active to not wait too long. + if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { + // No need for an ergo message here, can_expand_young_list() does this when + // it returns true. + result = _allocator->attempt_allocation_force(word_size); if (result != NULL) { return result; } - - // If the GCLocker is active and we are bound for a GC, try expanding young gen. - // This is different to when only GCLocker::needs_gc() is set: try to avoid - // waiting because the GCLocker is active to not wait too long. - if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { - // No need for an ergo message here, can_expand_young_list() does this when - // it returns true. - result = _allocator->attempt_allocation_force(word_size); - if (result != NULL) { - return result; - } - } } // Only try a GC if the GCLocker does not signal the need for a GC. Wait until @@ -443,10 +431,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { } if (should_try_gc) { - GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection - : GCCause::_g1_inc_collection_pause; bool succeeded; - result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause); + result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, @@ -860,7 +846,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { HeapWord* result = NULL; for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { bool should_try_gc; - bool preventive_collection_required = false; uint gc_count_before; @@ -868,17 +853,14 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { MutexLocker x(Heap_lock); size_t size_in_regions = humongous_obj_size_in_regions(word_size); - preventive_collection_required = policy()->preventive_collection_required((uint)size_in_regions); - if (!preventive_collection_required) { - // Given that humongous objects are not allocated in young - // regions, we'll first try to do the allocation without doing a - // collection hoping that there's enough space in the heap. - result = humongous_obj_allocate(word_size); - if (result != NULL) { - policy()->old_gen_alloc_tracker()-> - add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); - return result; - } + // Given that humongous objects are not allocated in young + // regions, we'll first try to do the allocation without doing a + // collection hoping that there's enough space in the heap. + result = humongous_obj_allocate(word_size); + if (result != NULL) { + policy()->old_gen_alloc_tracker()-> + add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes); + return result; } // Only try a GC if the GCLocker does not signal the need for a GC. Wait until @@ -890,10 +872,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { } if (should_try_gc) { - GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection - : GCCause::_g1_humongous_allocation; bool succeeded; - result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause); + result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.cpp b/src/hotspot/share/gc/g1/g1CollectionSet.cpp index 9d1e0347776..a6bbca73709 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp @@ -100,10 +100,6 @@ void G1CollectionSet::clear_candidates() { _candidates = NULL; } -bool G1CollectionSet::has_candidates() { - return _candidates != NULL && !_candidates->is_empty(); -} - // Add the heap region at the head of the non-incremental collection set void G1CollectionSet::add_old_region(HeapRegion* hr) { assert_at_safepoint_on_vm_thread(); diff --git a/src/hotspot/share/gc/g1/g1CollectionSet.hpp b/src/hotspot/share/gc/g1/g1CollectionSet.hpp index 68c271f6687..90fe22a2021 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp +++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp @@ -223,7 +223,6 @@ public: void initialize(uint max_region_length); void clear_candidates(); - bool has_candidates(); void set_candidates(G1CollectionSetCandidates* candidates) { assert(_candidates == NULL, "Trying to replace collection set candidates."); diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index dbf7519a6c3..ffe8b1642b3 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -71,8 +71,6 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) : _reserve_regions(0), _young_gen_sizer(), _free_regions_at_end_of_collection(0), - _predicted_surviving_bytes_from_survivor(0), - _predicted_surviving_bytes_from_old(0), _rs_length(0), _pending_cards_at_gc_start(0), _concurrent_start_to_mixed(), @@ -583,7 +581,6 @@ void G1Policy::record_full_collection_end() { // also call this on any additional surv rate groups _free_regions_at_end_of_collection = _g1h->num_free_regions(); - update_survival_estimates_for_next_collection(); _survivor_surv_rate_group->reset(); update_young_length_bounds(); @@ -898,8 +895,6 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar _free_regions_at_end_of_collection = _g1h->num_free_regions(); - update_survival_estimates_for_next_collection(); - // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely // that in this case we are not running in a "normal" operating mode. if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { @@ -1578,88 +1573,6 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat num_optional_regions, max_optional_regions, total_prediction_ms); } -// Number of regions required to store the given number of bytes, taking -// into account the target amount of wasted space in PLABs. -static size_t get_num_regions_adjust_for_plab_waste(size_t byte_count) { - size_t byte_count_adjusted = byte_count * (size_t)(100 + TargetPLABWastePct) / 100.0; - - // Round up the region count - return (byte_count_adjusted + HeapRegion::GrainBytes - 1) / HeapRegion::GrainBytes; -} - -bool G1Policy::preventive_collection_required(uint alloc_region_count) { - if (!G1UsePreventiveGC || !Universe::is_fully_initialized()) { - // Don't attempt any preventive GC's if the feature is disabled, - // or before initialization is complete. - return false; - } - - if (_g1h->young_regions_count() == 0 && !_collection_set->has_candidates()) { - return false; - } - - uint eden_count = _g1h->eden_regions_count(); - size_t const eden_surv_bytes_pred = _eden_surv_rate_group->accum_surv_rate_pred(eden_count) * HeapRegion::GrainBytes; - size_t const total_young_predicted_surviving_bytes = eden_surv_bytes_pred + _predicted_surviving_bytes_from_survivor; - - uint required_regions = (uint)(get_num_regions_adjust_for_plab_waste(total_young_predicted_surviving_bytes) + - get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_old)); - - if (required_regions > _g1h->num_free_or_available_regions() - alloc_region_count) { - log_debug(gc, ergo, cset)("Preventive GC, insufficient free or available regions. " - "Predicted need %u. Curr Eden %u (Pred %u). Curr Survivor %u (Pred %u). Curr Old %u (Pred %u) Free or Avail %u (Free %u) Alloc %u", - required_regions, - eden_count, - (uint)get_num_regions_adjust_for_plab_waste(eden_surv_bytes_pred), - _g1h->survivor_regions_count(), - (uint)get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_survivor), - _g1h->old_regions_count(), - (uint)get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_old), - _g1h->num_free_or_available_regions(), - _g1h->num_free_regions(), - alloc_region_count); - - return true; - } - - return false; -} - -void G1Policy::update_survival_estimates_for_next_collection() { - // Predict the number of bytes of surviving objects from survivor and old - // regions and update the associated members. - - // Survivor regions - size_t survivor_bytes = 0; - const GrowableArray* survivor_regions = _g1h->survivor()->regions(); - for (GrowableArrayIterator it = survivor_regions->begin(); - it != survivor_regions->end(); - ++it) { - survivor_bytes += predict_bytes_to_copy(*it); - } - - _predicted_surviving_bytes_from_survivor = survivor_bytes; - - // Old regions - if (!_collection_set->has_candidates()) { - _predicted_surviving_bytes_from_old = 0; - return; - } - - // Use the minimum old gen collection set as conservative estimate for the number - // of regions to take for this calculation. - G1CollectionSetCandidates *candidates = _collection_set->candidates(); - uint iterate_count = MIN2(candidates->num_remaining(), calc_min_old_cset_length(candidates)); - uint current_index = candidates->cur_idx(); - size_t old_bytes = 0; - for (uint i = 0; i < iterate_count; i++) { - HeapRegion *region = candidates->at(current_index + i); - old_bytes += predict_bytes_to_copy(region); - } - - _predicted_surviving_bytes_from_old = old_bytes; -} - void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) { start_adding_survivor_regions(); diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index efd91c3df9b..450b83340bb 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -101,11 +101,6 @@ class G1Policy: public CHeapObj { uint _free_regions_at_end_of_collection; - // These values are predictions of how much we think will survive in each - // section of the heap. - size_t _predicted_surviving_bytes_from_survivor; - size_t _predicted_surviving_bytes_from_old; - size_t _rs_length; size_t _pending_cards_at_gc_start; @@ -360,11 +355,6 @@ public: double time_remaining_ms, uint& num_optional_regions); - // Returns whether a collection should be done proactively, taking into - // account the current number of free regions and the expected survival - // rates in each section of the heap. - bool preventive_collection_required(uint region_count); - private: // Predict the number of bytes of surviving objects from survivor and old diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp index 4d3d81424d7..8ccd7f2c9fb 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp @@ -123,15 +123,10 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, VM_CollectForAllocation(word_size, gc_count_before, gc_cause), _gc_succeeded(false) {} -bool VM_G1CollectForAllocation::should_try_allocation_before_gc() { - // Don't allocate before a preventive GC. - return _gc_cause != GCCause::_g1_preventive_collection; -} - void VM_G1CollectForAllocation::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - if (should_try_allocation_before_gc() && _word_size > 0) { + if (_word_size > 0) { // An allocation has been requested. So, try to do that first. _result = g1h->attempt_allocation_at_safepoint(_word_size, false /* expect_null_cur_alloc_region */); diff --git a/src/hotspot/share/gc/g1/g1VMOperations.hpp b/src/hotspot/share/gc/g1/g1VMOperations.hpp index edf53073789..cfca9e21d07 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.hpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp @@ -77,9 +77,6 @@ public: virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; } virtual void doit(); bool gc_succeeded() const { return _gc_succeeded; } - -private: - bool should_try_allocation_before_gc(); }; // Concurrent G1 stop-the-world operations such as remark and cleanup. diff --git a/src/hotspot/share/gc/shared/gcCause.cpp b/src/hotspot/share/gc/shared/gcCause.cpp index 37cd32b0ff2..ec0281319f7 100644 --- a/src/hotspot/share/gc/shared/gcCause.cpp +++ b/src/hotspot/share/gc/shared/gcCause.cpp @@ -99,9 +99,6 @@ const char* GCCause::to_string(GCCause::Cause cause) { case _g1_periodic_collection: return "G1 Periodic Collection"; - case _g1_preventive_collection: - return "G1 Preventive Collection"; - case _dcmd_gc_run: return "Diagnostic Command"; diff --git a/src/hotspot/share/gc/shared/gcCause.hpp b/src/hotspot/share/gc/shared/gcCause.hpp index 1def51523d6..f949cf8889a 100644 --- a/src/hotspot/share/gc/shared/gcCause.hpp +++ b/src/hotspot/share/gc/shared/gcCause.hpp @@ -74,7 +74,6 @@ class GCCause : public AllStatic { _g1_compaction_pause, _g1_humongous_allocation, _g1_periodic_collection, - _g1_preventive_collection, _dcmd_gc_run, diff --git a/test/hotspot/jtreg/gc/g1/TestEvacuationFailure.java b/test/hotspot/jtreg/gc/g1/TestEvacuationFailure.java index d04a1a4374b..616d740cff9 100644 --- a/test/hotspot/jtreg/gc/g1/TestEvacuationFailure.java +++ b/test/hotspot/jtreg/gc/g1/TestEvacuationFailure.java @@ -50,7 +50,6 @@ public class TestEvacuationFailure { "-XX:G1EvacuationFailureALotCount=100", "-XX:G1EvacuationFailureALotInterval=1", "-XX:+UnlockDiagnosticVMOptions", - "-XX:-G1UsePreventiveGC", "-Xlog:gc", GCTestWithEvacuationFailure.class.getName()); diff --git a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java index d0fe28f0bbc..6bcb2c2b503 100644 --- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java +++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java @@ -278,7 +278,6 @@ public class TestGCLogMessages { "-XX:G1EvacuationFailureALotCount=100", "-XX:G1EvacuationFailureALotInterval=1", "-XX:+UnlockDiagnosticVMOptions", - "-XX:-G1UsePreventiveGC", "-Xlog:gc+phases=debug", GCTestWithEvacuationFailure.class.getName()); @@ -291,7 +290,6 @@ public class TestGCLogMessages { "-Xmn16M", "-Xms32M", "-XX:+UnlockDiagnosticVMOptions", - "-XX:-G1UsePreventiveGC", "-Xlog:gc+phases=trace", GCTestWithEvacuationFailure.class.getName()); diff --git a/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java b/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java index 7030b68714e..12a6054a74a 100644 --- a/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java +++ b/test/hotspot/jtreg/gc/g1/TestVerifyGCType.java @@ -125,8 +125,7 @@ public class TestVerifyGCType { new String[] {"-XX:+G1EvacuationFailureALot", "-XX:G1EvacuationFailureALotCount=100", "-XX:G1EvacuationFailureALotInterval=1", - "-XX:+UnlockDiagnosticVMOptions", - "-XX:-G1UsePreventiveGC"}); + "-XX:+UnlockDiagnosticVMOptions"}); output.shouldHaveExitValue(0); verifyCollection("Pause Young (Normal)", false, false, true, output.getStdout());