From cd598622cc942de388b0cc494dac367d6fbe8935 Mon Sep 17 00:00:00 2001 From: Coleen Phillimore Date: Mon, 24 Jun 2019 16:51:23 -0400 Subject: [PATCH] 8214822: Move ConcurrentHashTable VALUE parameter to CONFIG Make VALUE parameter be included in CONFIG configuration, also remove BaseConfig Reviewed-by: dholmes, kbarrett --- src/hotspot/share/classfile/stringTable.cpp | 20 +- src/hotspot/share/classfile/symbolTable.cpp | 17 +- .../share/prims/resolvedMethodTable.cpp | 18 +- .../share/utilities/concurrentHashTable.hpp | 25 +- .../utilities/concurrentHashTable.inline.hpp | 299 +++++++++--------- .../concurrentHashTableTasks.inline.hpp | 20 +- .../utilities/test_concurrentHashtable.cpp | 37 ++- 7 files changed, 205 insertions(+), 231 deletions(-) diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index bfe6d07df8a..f2f3014972f 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -79,8 +79,7 @@ static CompactHashtable< // -------------------------------------------------------------------------- -typedef ConcurrentHashTable, - StringTableConfig, mtSymbol> StringTableHash; +typedef ConcurrentHashTable StringTableHash; static StringTableHash* _local_table = NULL; volatile bool StringTable::_has_work = false; @@ -101,11 +100,12 @@ uintx hash_string(const jchar* s, int len, bool useAlt) { java_lang_String::hash_code(s, len); } -class StringTableConfig : public StringTableHash::BaseConfig { +class StringTableConfig : public StackObj { private: public: - static uintx get_hash(WeakHandle const& value, - bool* is_dead) { + typedef WeakHandle Value; + + static uintx get_hash(Value const& value, bool* is_dead) { EXCEPTION_MARK; oop val_oop = value.peek(); if (val_oop == NULL) { @@ -124,15 +124,13 @@ class StringTableConfig : public StringTableHash::BaseConfig { return 0; } // We use default allocation/deallocation but counted - static void* allocate_node(size_t size, - WeakHandle const& value) { + static void* allocate_node(size_t size, Value const& value) { StringTable::item_added(); - return StringTableHash::BaseConfig::allocate_node(size, value); + return AllocateHeap(size, mtSymbol); } - static void free_node(void* memory, - WeakHandle const& value) { + static void free_node(void* memory, Value const& value) { value.release(); - StringTableHash::BaseConfig::free_node(memory, value); + FreeHeap(memory); StringTable::item_removed(); } }; diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index 4023e722eee..99e31b39744 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -77,8 +77,7 @@ static OffsetCompactHashtable< // -------------------------------------------------------------------------- -typedef ConcurrentHashTable SymbolTableHash; +typedef ConcurrentHashTable SymbolTableHash; static SymbolTableHash* _local_table = NULL; volatile bool SymbolTable::_has_work = 0; @@ -121,10 +120,12 @@ static uintx hash_shared_symbol(const char* s, int len) { } #endif -class SymbolTableConfig : public SymbolTableHash::BaseConfig { +class SymbolTableConfig : public AllStatic { private: public: - static uintx get_hash(Symbol* const& value, bool* is_dead) { + typedef Symbol* Value; // value of the Node in the hashtable + + static uintx get_hash(Value const& value, bool* is_dead) { *is_dead = (value->refcount() == 0); if (*is_dead) { return 0; @@ -133,11 +134,11 @@ public: } } // We use default allocation/deallocation but counted - static void* allocate_node(size_t size, Symbol* const& value) { + static void* allocate_node(size_t size, Value const& value) { SymbolTable::item_added(); - return SymbolTableHash::BaseConfig::allocate_node(size, value); + return AllocateHeap(size, mtSymbol); } - static void free_node(void* memory, Symbol* const& value) { + static void free_node(void* memory, Value const& value) { // We get here because #1 some threads lost a race to insert a newly created Symbol // or #2 we're cleaning up unused symbol. // If #1, then the symbol can be either permanent (refcount==PERM_REFCOUNT), @@ -150,7 +151,7 @@ public: assert(value->refcount() == 0, "expected dead symbol"); } SymbolTable::delete_symbol(value); - SymbolTableHash::BaseConfig::free_node(memory, value); + FreeHeap(memory); SymbolTable::item_removed(); } }; diff --git a/src/hotspot/share/prims/resolvedMethodTable.cpp b/src/hotspot/share/prims/resolvedMethodTable.cpp index 106f3b3b290..9e84b7d6dbe 100644 --- a/src/hotspot/share/prims/resolvedMethodTable.cpp +++ b/src/hotspot/share/prims/resolvedMethodTable.cpp @@ -56,15 +56,15 @@ unsigned int method_hash(const Method* method) { return name_hash ^ signature_hash; } -typedef ConcurrentHashTable, - ResolvedMethodTableConfig, +typedef ConcurrentHashTable ResolvedMethodTableHash; -class ResolvedMethodTableConfig : public ResolvedMethodTableHash::BaseConfig { +class ResolvedMethodTableConfig : public AllStatic { private: public: - static uintx get_hash(WeakHandle const& value, - bool* is_dead) { + typedef WeakHandle Value; + + static uintx get_hash(Value const& value, bool* is_dead) { oop val_oop = value.peek(); if (val_oop == NULL) { *is_dead = true; @@ -76,13 +76,13 @@ class ResolvedMethodTableConfig : public ResolvedMethodTableHash::BaseConfig { } // We use default allocation/deallocation but counted - static void* allocate_node(size_t size, WeakHandle const& value) { + static void* allocate_node(size_t size, Value const& value) { ResolvedMethodTable::item_added(); - return ResolvedMethodTableHash::BaseConfig::allocate_node(size, value); + return AllocateHeap(size, mtClass); } - static void free_node(void* memory, WeakHandle const& value) { + static void free_node(void* memory, Value const& value) { value.release(); - ResolvedMethodTableHash::BaseConfig::free_node(memory, value); + FreeHeap(memory); ResolvedMethodTable::item_removed(); } }; diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp index dc553affe19..bec1661d4ce 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -38,8 +38,9 @@ class Thread; class Mutex; -template +template class ConcurrentHashTable : public CHeapObj { + typedef typename CONFIG::Value VALUE; private: // This is the internal node structure. // Only constructed with placement new from memory allocated with MEMFLAGS of @@ -252,10 +253,10 @@ class ConcurrentHashTable : public CHeapObj { class ScopedCS: public StackObj { protected: Thread* _thread; - ConcurrentHashTable* _cht; + ConcurrentHashTable* _cht; GlobalCounter::CSContext _cs_context; public: - ScopedCS(Thread* thread, ConcurrentHashTable* cht); + ScopedCS(Thread* thread, ConcurrentHashTable* cht); ~ScopedCS(); }; @@ -473,26 +474,12 @@ class ConcurrentHashTable : public CHeapObj { const char* table_name); // Moves all nodes from this table to to_cht - bool try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); - - // This is a Curiously Recurring Template Pattern (CRPT) interface for the - // specialization. - struct BaseConfig { - public: - // Called when the hash table needs the hash for a VALUE. - static uintx get_hash(const VALUE& value, bool* dead) { - return CONFIG::get_hash(value, dead); - } - // Default node allocation. - static void* allocate_node(size_t size, const VALUE& value); - // Default node reclamation. - static void free_node(void* memory, const VALUE& value); - }; + bool try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); // Scoped multi getter. class MultiGetHandle : private ScopedCS { public: - MultiGetHandle(Thread* thread, ConcurrentHashTable* cht) + MultiGetHandle(Thread* thread, ConcurrentHashTable* cht) : ScopedCS(thread, cht) {} // In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC. // The VALUEs are safe as long as you never save the VALUEs outside the diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index 72248275132..7681d520468 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -53,28 +53,28 @@ static const void* POISON_PTR = (void*)0xffbadbac; #endif // Node -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Node::next() const { return OrderAccess::load_acquire(&_next); } // Bucket -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Bucket::first_raw() const { return OrderAccess::load_acquire(&_first); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::release_assign_node_ptr( - typename ConcurrentHashTable::Node* const volatile * dst, - typename ConcurrentHashTable::Node* node) const + typename ConcurrentHashTable::Node* const volatile * dst, + typename ConcurrentHashTable::Node* node) const { // Due to this assert this methods is not static. assert(is_locked(), "Must be locked."); @@ -82,31 +82,31 @@ inline void ConcurrentHashTable:: OrderAccess::release_store(tmp, clear_set_state(node, *dst)); } -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Bucket::first() const { // We strip the states bit before returning the ptr. return clear_state(OrderAccess::load_acquire(&_first)); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::have_redirect() const { return is_state(first_raw(), STATE_REDIRECT_BIT); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::is_locked() const { return is_state(first_raw(), STATE_LOCK_BIT); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::lock() { int i = 0; @@ -123,10 +123,10 @@ inline void ConcurrentHashTable:: } } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::release_assign_last_node_next( - typename ConcurrentHashTable::Node* node) + typename ConcurrentHashTable::Node* node) { assert(is_locked(), "Must be locked."); Node* const volatile * ret = first_ptr(); @@ -136,10 +136,10 @@ inline void ConcurrentHashTable:: release_assign_node_ptr(ret, node); } -template -inline bool ConcurrentHashTable:: - Bucket::cas_first(typename ConcurrentHashTable::Node* node, - typename ConcurrentHashTable::Node* expect +template +inline bool ConcurrentHashTable:: + Bucket::cas_first(typename ConcurrentHashTable::Node* node, + typename ConcurrentHashTable::Node* expect ) { if (is_locked()) { @@ -151,8 +151,8 @@ inline bool ConcurrentHashTable:: return false; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::trylock() { if (is_locked()) { @@ -166,8 +166,8 @@ inline bool ConcurrentHashTable:: return false; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::unlock() { assert(is_locked(), "Must be locked."); @@ -176,8 +176,8 @@ inline void ConcurrentHashTable:: OrderAccess::release_store(&_first, clear_state(first())); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::redirect() { assert(is_locked(), "Must be locked."); @@ -185,8 +185,8 @@ inline void ConcurrentHashTable:: } // InternalTable -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: InternalTable::InternalTable(size_t log2_size) : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), _hash_mask(~(~((size_t)0) << _log2_size)) @@ -201,17 +201,17 @@ inline ConcurrentHashTable:: } } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: InternalTable::~InternalTable() { FREE_C_HEAP_ARRAY(Bucket, _buckets); } // ScopedCS -template -inline ConcurrentHashTable:: - ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable* cht) +template +inline ConcurrentHashTable:: + ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable* cht) : _thread(thread), _cht(cht), _cs_context(GlobalCounter::critical_section_begin(_thread)) @@ -222,40 +222,25 @@ inline ConcurrentHashTable:: } } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ScopedCS::~ScopedCS() { GlobalCounter::critical_section_end(_thread, _cs_context); } -// BaseConfig -template -inline void* ConcurrentHashTable:: - BaseConfig::allocate_node(size_t size, const VALUE& value) -{ - return AllocateHeap(size, F); -} - -template -inline void ConcurrentHashTable:: - BaseConfig::free_node(void* memory, const VALUE& value) -{ - FreeHeap(memory); -} - -template +template template -inline VALUE* ConcurrentHashTable:: +inline typename CONFIG::Value* ConcurrentHashTable:: MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) { return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); } // HaveDeletables -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: HaveDeletables::have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f, Bucket* prefetch_bucket) @@ -281,9 +266,9 @@ inline bool ConcurrentHashTable:: return false; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: HaveDeletables::have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f, Bucket* preb) @@ -297,8 +282,8 @@ inline bool ConcurrentHashTable:: } // ConcurrentHashTable -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: write_synchonize_on_visible_epoch(Thread* thread) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -314,8 +299,8 @@ inline void ConcurrentHashTable:: GlobalCounter::write_synchronize(); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: try_resize_lock(Thread* locker) { if (_resize_lock->try_lock()) { @@ -333,8 +318,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: lock_resize_lock(Thread* locker) { size_t i = 0; @@ -358,8 +343,8 @@ inline void ConcurrentHashTable:: _invisible_epoch = 0; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: unlock_resize_lock(Thread* locker) { _invisible_epoch = 0; @@ -368,8 +353,8 @@ inline void ConcurrentHashTable:: _resize_lock->unlock(); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: free_nodes() { // We assume we are not MT during freeing. @@ -384,25 +369,25 @@ inline void ConcurrentHashTable:: } } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: get_table() const { return OrderAccess::load_acquire(&_table); } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: get_new_table() const { return OrderAccess::load_acquire(&_new_table); } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: set_table_from_new() { InternalTable* old_table = _table; @@ -416,8 +401,8 @@ ConcurrentHashTable:: return old_table; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_grow_range(Thread* thread, size_t start, size_t stop) { assert(stop <= _table->_size, "Outside backing array"); @@ -456,9 +441,9 @@ inline void ConcurrentHashTable:: } } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) { Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); @@ -489,9 +474,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) { @@ -542,9 +527,9 @@ inline void ConcurrentHashTable:: GlobalCounter::critical_section_end(thread, cs_context); } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) { assert(bucket->is_locked(), "Must be locked."); @@ -579,9 +564,9 @@ inline void ConcurrentHashTable:: } } -template -inline typename ConcurrentHashTable::Bucket* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Bucket* +ConcurrentHashTable:: get_bucket(uintx hash) const { InternalTable* table = get_table(); @@ -593,9 +578,9 @@ ConcurrentHashTable:: return bucket; } -template -inline typename ConcurrentHashTable::Bucket* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Bucket* +ConcurrentHashTable:: get_bucket_locked(Thread* thread, const uintx hash) { Bucket* bucket; @@ -624,10 +609,10 @@ ConcurrentHashTable:: } // Always called within critical section -template +template template -typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +typename ConcurrentHashTable::Node* +ConcurrentHashTable:: get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, bool* have_dead, size_t* loops) const { @@ -650,8 +635,8 @@ ConcurrentHashTable:: return node; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: unzip_bucket(Thread* thread, InternalTable* old_table, InternalTable* new_table, size_t even_index, size_t odd_index) { @@ -708,8 +693,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_shrink_prolog(Thread* thread, size_t log2_size) { if (!try_resize_lock(thread)) { @@ -725,8 +710,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_shrink_epilog(Thread* thread) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -744,8 +729,8 @@ inline void ConcurrentHashTable:: delete old_table; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_shrink_range(Thread* thread, size_t start, size_t stop) { // The state is also copied here. @@ -781,8 +766,8 @@ inline void ConcurrentHashTable:: } } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_shrink(Thread* thread, size_t log2_size) { if (!internal_shrink_prolog(thread, log2_size)) { @@ -796,8 +781,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_grow_prolog(Thread* thread, size_t log2_size) { // This double checking of _size_limit_reached/is_max_size_reached() @@ -825,8 +810,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_grow_epilog(Thread* thread) { assert(_resize_lock_owner == thread, "Should be locked"); @@ -843,8 +828,8 @@ inline void ConcurrentHashTable:: delete old_table; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_grow(Thread* thread, size_t log2_size) { if (!internal_grow_prolog(thread, log2_size)) { @@ -859,9 +844,9 @@ inline bool ConcurrentHashTable:: } // Always called within critical section -template +template template -inline VALUE* ConcurrentHashTable:: +inline typename CONFIG::Value* ConcurrentHashTable:: internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) { bool clean = false; @@ -880,9 +865,9 @@ inline VALUE* ConcurrentHashTable:: return ret; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: internal_insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value, bool* grow_hint, bool* clean_hint) { @@ -945,9 +930,9 @@ inline bool ConcurrentHashTable:: return ret; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: visit_nodes(Bucket* bucket, FUNC& visitor_f) { Node* current_node = bucket->first(); @@ -960,9 +945,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_scan_locked(Thread* thread, FUNC& scan_f) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -977,9 +962,9 @@ inline void ConcurrentHashTable:: } /* ends critical section */ } -template +template template -inline size_t ConcurrentHashTable:: +inline size_t ConcurrentHashTable:: delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, size_t num_del, Node** ndel) { @@ -1004,8 +989,8 @@ inline size_t ConcurrentHashTable:: } // Constructor -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint) : _new_table(NULL), _log2_size_limit(log2size_limit), _log2_start_size(log2size), _grow_hint(grow_hint), @@ -1021,8 +1006,8 @@ inline ConcurrentHashTable:: _size_limit_reached = _table->_log2_size == _log2_size_limit; } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ~ConcurrentHashTable() { delete _resize_lock; @@ -1030,16 +1015,16 @@ inline ConcurrentHashTable:: delete _table; } -template -inline size_t ConcurrentHashTable:: +template +inline size_t ConcurrentHashTable:: get_size_log2(Thread* thread) { ScopedCS cs(thread, this); return _table->_log2_size; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: shrink(Thread* thread, size_t size_limit_log2) { size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; @@ -1047,17 +1032,17 @@ inline bool ConcurrentHashTable:: return ret; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: grow(Thread* thread, size_t size_limit_log2) { size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; return internal_grow(thread, tmp); } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) { bool ret = false; @@ -1070,8 +1055,8 @@ inline bool ConcurrentHashTable:: return ret; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: unsafe_insert(const VALUE& value) { bool dead_hash = false; size_t hash = CONFIG::get_hash(value, &dead_hash); @@ -1090,9 +1075,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: try_scan(Thread* thread, SCAN_FUNC& scan_f) { if (!try_resize_lock(thread)) { @@ -1103,9 +1088,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { assert(!SafepointSynchronize::is_at_safepoint(), @@ -1117,9 +1102,9 @@ inline void ConcurrentHashTable:: assert(_resize_lock_owner != thread, "Re-size lock held"); } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_safepoint_scan(SCAN_FUNC& scan_f) { // We only allow this method to be used during a safepoint. @@ -1160,9 +1145,9 @@ inline void ConcurrentHashTable:: } } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { if (!try_resize_lock(thread)) { @@ -1174,9 +1159,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { assert(!SafepointSynchronize::is_at_safepoint(), @@ -1186,9 +1171,9 @@ inline void ConcurrentHashTable:: unlock_resize_lock(thread); } -template +template template -inline TableStatistics ConcurrentHashTable:: +inline TableStatistics ConcurrentHashTable:: statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f) { NumberSeq summary; @@ -1213,9 +1198,9 @@ inline TableStatistics ConcurrentHashTable:: return TableStatistics(_stats_rate, summary, literal_bytes, sizeof(Bucket), sizeof(Node)); } -template +template template -inline TableStatistics ConcurrentHashTable:: +inline TableStatistics ConcurrentHashTable:: statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old) { if (!try_resize_lock(thread)) { @@ -1228,9 +1213,9 @@ inline TableStatistics ConcurrentHashTable:: return ts; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st, const char* table_name) { @@ -1245,9 +1230,9 @@ inline void ConcurrentHashTable:: ts.print(st, table_name); } -template -inline bool ConcurrentHashTable:: - try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) +template +inline bool ConcurrentHashTable:: + try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) { if (!try_resize_lock(thread)) { return false; diff --git a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp index a5d4b539980..b863a515278 100644 --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp @@ -32,10 +32,10 @@ // operations, which they are serialized with each other. // Base class for pause and/or parallel bulk operations. -template -class ConcurrentHashTable::BucketsOperation { +template +class ConcurrentHashTable::BucketsOperation { protected: - ConcurrentHashTable* _cht; + ConcurrentHashTable* _cht; // Default size of _task_size_log2 static const size_t DEFAULT_TASK_SIZE_LOG2 = 12; @@ -47,7 +47,7 @@ class ConcurrentHashTable::BucketsOperation { size_t _size_log2; // Table size. bool _is_mt; - BucketsOperation(ConcurrentHashTable* cht, bool is_mt = false) + BucketsOperation(ConcurrentHashTable* cht, bool is_mt = false) : _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2), _stop_task(0), _size_log2(0), _is_mt(is_mt) {} @@ -116,12 +116,12 @@ public: }; // For doing pausable/parallel bulk delete. -template -class ConcurrentHashTable::BulkDeleteTask : +template +class ConcurrentHashTable::BulkDeleteTask : public BucketsOperation { public: - BulkDeleteTask(ConcurrentHashTable* cht, bool is_mt = false) + BulkDeleteTask(ConcurrentHashTable* cht, bool is_mt = false) : BucketsOperation(cht, is_mt) { } // Before start prepare must be called. @@ -160,12 +160,12 @@ class ConcurrentHashTable::BulkDeleteTask : } }; -template -class ConcurrentHashTable::GrowTask : +template +class ConcurrentHashTable::GrowTask : public BucketsOperation { public: - GrowTask(ConcurrentHashTable* cht) : BucketsOperation(cht) { + GrowTask(ConcurrentHashTable* cht) : BucketsOperation(cht) { } // Before start prepare must be called. bool prepare(Thread* thread) { diff --git a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp index 9e23812ae06..04d716fc2b3 100644 --- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp +++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp @@ -36,24 +36,22 @@ // Amusingly as long as they do not assert they are mt-safe. #define SIZE_32 5 -struct Pointer; - -typedef ConcurrentHashTable SimpleTestTable; -typedef ConcurrentHashTable::MultiGetHandle SimpleTestGetHandle; - -// Simplest working CRPT implementation for the hash-table. -struct Pointer : public SimpleTestTable::BaseConfig { - static uintx get_hash(const uintptr_t& value, bool* dead_hash) { +struct Pointer : public AllStatic { + typedef uintptr_t Value; + static uintx get_hash(const Value& value, bool* dead_hash) { return (uintx)value; } - static void* allocate_node(size_t size, const uintptr_t& value) { + static void* allocate_node(size_t size, const Value& value) { return ::malloc(size); } - static void free_node(void* memory, const uintptr_t& value) { + static void free_node(void* memory, const Value& value) { ::free(memory); } }; +typedef ConcurrentHashTable SimpleTestTable; +typedef ConcurrentHashTable::MultiGetHandle SimpleTestGetHandle; + struct SimpleTestLookup { uintptr_t _val; SimpleTestLookup(uintptr_t val) : _val(val) {} @@ -414,18 +412,23 @@ TEST_VM(ConcurrentHashTable, task_grow) { //############################################################################################# -class TestInterface; - -typedef ConcurrentHashTable TestTable; -typedef ConcurrentHashTable::MultiGetHandle TestGetHandle; - -class TestInterface : public TestTable::BaseConfig { +class TestInterface : public AllStatic { public: - static uintx get_hash(const uintptr_t& value, bool* dead_hash) { + typedef uintptr_t Value; + static uintx get_hash(const Value& value, bool* dead_hash) { return (uintx)(value + 18446744073709551557ul) * 18446744073709551557ul; } + static void* allocate_node(size_t size, const Value& value) { + return AllocateHeap(size, mtInternal); + } + static void free_node(void* memory, const Value& value) { + FreeHeap(memory); + } }; +typedef ConcurrentHashTable TestTable; +typedef ConcurrentHashTable::MultiGetHandle TestGetHandle; + struct TestLookup { uintptr_t _val; TestLookup(uintptr_t val) : _val(val) {}