mirror of
https://github.com/openjdk/jdk.git
synced 2026-04-10 23:18:45 +00:00
8276932: G1: Annotate methods with override explicitly in g1CollectedHeap.hpp
Reviewed-by: tschatzl, sjohanss
This commit is contained in:
parent
fdcd16a38f
commit
02f7900882
@ -100,21 +100,21 @@ class G1STWIsAliveClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
|
||||
bool do_object_b(oop p);
|
||||
bool do_object_b(oop p) override;
|
||||
};
|
||||
|
||||
class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
public:
|
||||
G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
|
||||
bool do_object_b(oop p);
|
||||
bool do_object_b(oop p) override;
|
||||
};
|
||||
|
||||
class G1RegionMappingChangedListener : public G1MappingChangedListener {
|
||||
private:
|
||||
void reset_from_card_cache(uint start_idx, size_t num_regions);
|
||||
public:
|
||||
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
||||
void on_commit(uint start_idx, size_t num_regions, bool zero_filled) override;
|
||||
};
|
||||
|
||||
class G1CollectedHeap : public CollectedHeap {
|
||||
@ -235,7 +235,7 @@ private:
|
||||
// roots or the young generation.
|
||||
class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
|
||||
protected:
|
||||
bool default_value() const { return false; }
|
||||
bool default_value() const override { return false; }
|
||||
public:
|
||||
void clear() { G1BiasedMappedArray<bool>::clear(); }
|
||||
void set_candidate(uint region, bool value) {
|
||||
@ -306,7 +306,7 @@ private:
|
||||
size_t size,
|
||||
size_t translation_factor);
|
||||
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer) override;
|
||||
|
||||
// These are macros so that, if the assert fires, we get the correct
|
||||
// line number, file, etc.
|
||||
@ -439,12 +439,12 @@ private:
|
||||
// humongous allocation requests should go to mem_allocate() which
|
||||
// will satisfy them with a special path.
|
||||
|
||||
virtual HeapWord* allocate_new_tlab(size_t min_size,
|
||||
size_t requested_size,
|
||||
size_t* actual_size);
|
||||
HeapWord* allocate_new_tlab(size_t min_size,
|
||||
size_t requested_size,
|
||||
size_t* actual_size) override;
|
||||
|
||||
virtual HeapWord* mem_allocate(size_t word_size,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
HeapWord* mem_allocate(size_t word_size,
|
||||
bool* gc_overhead_limit_was_exceeded) override;
|
||||
|
||||
// First-level mutator allocation attempt: try to allocate out of
|
||||
// the mutator alloc region without taking the Heap_lock. This
|
||||
@ -495,7 +495,7 @@ private:
|
||||
bool do_maximum_compaction);
|
||||
|
||||
// Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
|
||||
virtual void do_full_collection(bool clear_all_soft_refs);
|
||||
void do_full_collection(bool clear_all_soft_refs) override;
|
||||
|
||||
// Helper to do a full collection that clears soft references.
|
||||
bool upgrade_to_full_collection();
|
||||
@ -891,26 +891,26 @@ public:
|
||||
// Initialize the G1CollectedHeap to have the initial and
|
||||
// maximum sizes and remembered and barrier sets
|
||||
// specified by the policy object.
|
||||
jint initialize();
|
||||
jint initialize() override;
|
||||
|
||||
// Returns whether concurrent mark threads (and the VM) are about to terminate.
|
||||
bool concurrent_mark_is_terminating() const;
|
||||
|
||||
virtual void stop();
|
||||
virtual void safepoint_synchronize_begin();
|
||||
virtual void safepoint_synchronize_end();
|
||||
void stop() override;
|
||||
void safepoint_synchronize_begin() override;
|
||||
void safepoint_synchronize_end() override;
|
||||
|
||||
// Does operations required after initialization has been done.
|
||||
void post_initialize();
|
||||
void post_initialize() override;
|
||||
|
||||
// Initialize weak reference processing.
|
||||
void ref_processing_init();
|
||||
|
||||
virtual Name kind() const {
|
||||
Name kind() const override {
|
||||
return CollectedHeap::G1;
|
||||
}
|
||||
|
||||
virtual const char* name() const {
|
||||
const char* name() const override {
|
||||
return "G1";
|
||||
}
|
||||
|
||||
@ -927,14 +927,14 @@ public:
|
||||
const G1CollectionSet* collection_set() const { return &_collection_set; }
|
||||
G1CollectionSet* collection_set() { return &_collection_set; }
|
||||
|
||||
virtual SoftRefPolicy* soft_ref_policy();
|
||||
SoftRefPolicy* soft_ref_policy() override;
|
||||
|
||||
virtual void initialize_serviceability();
|
||||
virtual MemoryUsage memory_usage();
|
||||
virtual GrowableArray<GCMemoryManager*> memory_managers();
|
||||
virtual GrowableArray<MemoryPool*> memory_pools();
|
||||
void initialize_serviceability() override;
|
||||
MemoryUsage memory_usage() override;
|
||||
GrowableArray<GCMemoryManager*> memory_managers() override;
|
||||
GrowableArray<MemoryPool*> memory_pools() override;
|
||||
|
||||
virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
|
||||
void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) override;
|
||||
|
||||
// Try to minimize the remembered set.
|
||||
void scrub_rem_set();
|
||||
@ -958,8 +958,8 @@ public:
|
||||
|
||||
size_t unused_committed_regions_in_bytes() const;
|
||||
|
||||
virtual size_t capacity() const;
|
||||
virtual size_t used() const;
|
||||
size_t capacity() const override;
|
||||
size_t used() const override;
|
||||
// This should be called when we're not holding the heap lock. The
|
||||
// result might be a bit inaccurate.
|
||||
size_t used_unlocked() const;
|
||||
@ -971,7 +971,7 @@ public:
|
||||
// end fields defining the extent of the contiguous allocation region.)
|
||||
// But G1CollectedHeap doesn't yet support this.
|
||||
|
||||
virtual bool is_maximal_no_gc() const {
|
||||
bool is_maximal_no_gc() const override {
|
||||
return _hrm.available() == 0;
|
||||
}
|
||||
|
||||
@ -1026,7 +1026,7 @@ public:
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
// "CollectedHeap" supports.
|
||||
virtual void collect(GCCause::Cause cause);
|
||||
void collect(GCCause::Cause cause) override;
|
||||
|
||||
// Perform a collection of the heap with the given cause.
|
||||
// Returns whether this collection actually executed.
|
||||
@ -1040,7 +1040,7 @@ public:
|
||||
void prepend_to_freelist(FreeRegionList* list);
|
||||
void decrement_summary_bytes(size_t bytes);
|
||||
|
||||
virtual bool is_in(const void* p) const;
|
||||
bool is_in(const void* p) const override;
|
||||
|
||||
// Return "TRUE" iff the given object address is within the collection
|
||||
// set. Assumes that the reference points into the heap.
|
||||
@ -1080,12 +1080,12 @@ public:
|
||||
void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
|
||||
|
||||
// Iterate over all objects, calling "cl.do_object" on each.
|
||||
virtual void object_iterate(ObjectClosure* cl);
|
||||
void object_iterate(ObjectClosure* cl) override;
|
||||
|
||||
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
|
||||
ParallelObjectIterator* parallel_object_iterator(uint thread_num) override;
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
virtual void keep_alive(oop obj);
|
||||
void keep_alive(oop obj) override;
|
||||
|
||||
// Iterate over heap regions, in address order, terminating the
|
||||
// iteration early if the "do_heap_region" method returns "true".
|
||||
@ -1176,10 +1176,10 @@ public:
|
||||
// Section on thread-local allocation buffers (TLABs)
|
||||
// See CollectedHeap for semantics.
|
||||
|
||||
size_t tlab_capacity(Thread* ignored) const;
|
||||
size_t tlab_used(Thread* ignored) const;
|
||||
size_t max_tlab_size() const;
|
||||
size_t unsafe_max_tlab_alloc(Thread* ignored) const;
|
||||
size_t tlab_capacity(Thread* ignored) const override;
|
||||
size_t tlab_used(Thread* ignored) const override;
|
||||
size_t max_tlab_size() const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread* ignored) const override;
|
||||
|
||||
inline bool is_in_young(const oop obj);
|
||||
|
||||
@ -1203,7 +1203,7 @@ public:
|
||||
static size_t humongous_obj_size_in_regions(size_t word_size);
|
||||
|
||||
// Print the maximum heap capacity.
|
||||
virtual size_t max_capacity() const;
|
||||
size_t max_capacity() const override;
|
||||
|
||||
Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
|
||||
|
||||
@ -1266,16 +1266,16 @@ public:
|
||||
// Optimized nmethod scanning support routines
|
||||
|
||||
// Register the given nmethod with the G1 heap.
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
void register_nmethod(nmethod* nm) override;
|
||||
|
||||
// Unregister the given nmethod from the G1 heap.
|
||||
virtual void unregister_nmethod(nmethod* nm);
|
||||
void unregister_nmethod(nmethod* nm) override;
|
||||
|
||||
// No nmethod flushing needed.
|
||||
virtual void flush_nmethod(nmethod* nm) {}
|
||||
void flush_nmethod(nmethod* nm) override {}
|
||||
|
||||
// No nmethod verification implemented.
|
||||
virtual void verify_nmethod(nmethod* nm) {}
|
||||
void verify_nmethod(nmethod* nm) override {}
|
||||
|
||||
// Recalculate amount of used memory after GC. Must be called after all allocation
|
||||
// has finished.
|
||||
@ -1297,7 +1297,7 @@ public:
|
||||
// Verification
|
||||
|
||||
// Perform any cleanup actions necessary before allowing a verification.
|
||||
virtual void prepare_for_verify();
|
||||
void prepare_for_verify() override;
|
||||
|
||||
// Perform verification.
|
||||
|
||||
@ -1314,14 +1314,14 @@ public:
|
||||
// Currently there is only one place where this is called with
|
||||
// vo == UseFullMarking, which is to verify the marking during a
|
||||
// full GC.
|
||||
void verify(VerifyOption vo);
|
||||
void verify(VerifyOption vo) override;
|
||||
|
||||
// WhiteBox testing support.
|
||||
virtual bool supports_concurrent_gc_breakpoints() const;
|
||||
bool supports_concurrent_gc_breakpoints() const override;
|
||||
|
||||
virtual WorkerThreads* safepoint_workers() { return _workers; }
|
||||
WorkerThreads* safepoint_workers() override { return _workers; }
|
||||
|
||||
virtual bool is_archived_object(oop object) const;
|
||||
bool is_archived_object(oop object) const override;
|
||||
|
||||
// The methods below are here for convenience and dispatch the
|
||||
// appropriate method depending on value of the given VerifyOption
|
||||
@ -1344,21 +1344,21 @@ private:
|
||||
void print_regions_on(outputStream* st) const;
|
||||
|
||||
public:
|
||||
virtual void print_on(outputStream* st) const;
|
||||
virtual void print_extended_on(outputStream* st) const;
|
||||
virtual void print_on_error(outputStream* st) const;
|
||||
void print_on(outputStream* st) const override;
|
||||
void print_extended_on(outputStream* st) const override;
|
||||
void print_on_error(outputStream* st) const override;
|
||||
|
||||
virtual void gc_threads_do(ThreadClosure* tc) const;
|
||||
void gc_threads_do(ThreadClosure* tc) const override;
|
||||
|
||||
// Override
|
||||
void print_tracing_info() const;
|
||||
void print_tracing_info() const override;
|
||||
|
||||
// The following two methods are helpful for debugging RSet issues.
|
||||
void print_cset_rsets() PRODUCT_RETURN;
|
||||
void print_all_rsets() PRODUCT_RETURN;
|
||||
|
||||
// Used to print information about locations in the hs_err file.
|
||||
virtual bool print_location(outputStream* st, void* addr) const;
|
||||
bool print_location(outputStream* st, void* addr) const override;
|
||||
};
|
||||
|
||||
// Scoped object that performs common pre- and post-gc heap printing operations.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user