diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp index a180c69d841..2a6af1fe4b4 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "utilities/globalCounter.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/growableArray.hpp" #include "utilities/tableStatistics.hpp" // A mostly concurrent-hash-table where the read-side is wait-free, inserts are @@ -271,8 +272,17 @@ class ConcurrentHashTable : public CHeapObj { }; - // Max number of deletes in one bucket chain during bulk delete. - static const size_t BULK_DELETE_LIMIT = 256; + // When doing deletes, we need to store the pointers until the next + // visible epoch. In the normal case (good hash function and + // reasonable sizing), we can save these pointers on the stack + // (there should not be more than a few entries per bucket). But if + // the hash function is bad and/or the sizing of the table is bad, + // we can not use a fixed size stack buffer alone. We will use a + // heap buffer as fall-back when the stack is not enough, and then + // we have to pay for a dynamic allocation. `StackBufferSize` tells + // the size of optimistic stack buffer that will almost always be + // used. + static const size_t StackBufferSize = 256; // Simple getters and setters for the internal table. InternalTable* get_table() const; @@ -349,7 +359,7 @@ class ConcurrentHashTable : public CHeapObj { // Check for dead items in a bucket. template size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, - size_t num_del, Node** ndel); + size_t num_del, Node** ndel, GrowableArrayCHeap& ndel_heap); // Check for dead items in this table. During shrink/grow we cannot guarantee // that we only visit nodes once. To keep it simple caller will have locked diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index 7443ad6b48b..285b1c997e2 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -33,6 +33,7 @@ #include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/globalCounter.inline.hpp" +#include "utilities/growableArray.hpp" #include "utilities/numberSeq.hpp" #include "utilities/spinYield.hpp" @@ -47,10 +48,10 @@ #ifdef ASSERT #ifdef _LP64 // Two low bits are not usable. -static const void* POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); +static void* const POISON_PTR = (void*)UCONST64(0xfbadbadbadbadbac); #else // Two low bits are not usable. -static const void* POISON_PTR = (void*)0xffbadbac; +static void* const POISON_PTR = (void*)0xffbadbac; #endif #endif @@ -486,7 +487,7 @@ inline void ConcurrentHashTable:: // table. Can do this in parallel if we want. assert((is_mt && _resize_lock_owner != NULL) || (!is_mt && _resize_lock_owner == thread), "Re-size lock not held"); - Node* ndel[BULK_DELETE_LIMIT]; + Node* ndel_stack[StackBufferSize]; InternalTable* table = get_table(); assert(start_idx < stop_idx, "Must be"); assert(stop_idx <= _table->_size, "Must be"); @@ -511,7 +512,8 @@ inline void ConcurrentHashTable:: // We left critical section but the bucket cannot be removed while we hold // the _resize_lock. bucket->lock(); - size_t nd = delete_check_nodes(bucket, eval_f, BULK_DELETE_LIMIT, ndel); + GrowableArrayCHeap extra(0); // use this buffer if StackBufferSize is not enough + size_t nd = delete_check_nodes(bucket, eval_f, StackBufferSize, ndel_stack, extra); bucket->unlock(); if (is_mt) { GlobalCounter::write_synchronize(); @@ -519,10 +521,11 @@ inline void ConcurrentHashTable:: write_synchonize_on_visible_epoch(thread); } for (size_t node_it = 0; node_it < nd; node_it++) { - del_f(ndel[node_it]->value()); - Node::destroy_node(_context, ndel[node_it]); + Node*& ndel = node_it < StackBufferSize ? ndel_stack[node_it] : extra.at(static_cast(node_it - StackBufferSize)); + del_f(ndel->value()); + Node::destroy_node(_context, ndel); JFR_ONLY(safe_stats_remove();) - DEBUG_ONLY(ndel[node_it] = (Node*)POISON_PTR;) + DEBUG_ONLY(ndel = static_cast(POISON_PTR);) } cs_context = GlobalCounter::critical_section_begin(thread); } @@ -537,7 +540,7 @@ inline void ConcurrentHashTable:: assert(bucket->is_locked(), "Must be locked."); size_t dels = 0; - Node* ndel[BULK_DELETE_LIMIT]; + Node* ndel[StackBufferSize]; Node* const volatile * rem_n_prev = bucket->first_ptr(); Node* rem_n = bucket->first(); while (rem_n != NULL) { @@ -548,7 +551,7 @@ inline void ConcurrentHashTable:: Node* next_node = rem_n->next(); bucket->release_assign_node_ptr(rem_n_prev, next_node); rem_n = next_node; - if (dels == BULK_DELETE_LIMIT) { + if (dels == StackBufferSize) { break; } } else { @@ -982,20 +985,24 @@ template template inline size_t ConcurrentHashTable:: delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, - size_t num_del, Node** ndel) + size_t num_del, Node** ndel, GrowableArrayCHeap& extra) { size_t dels = 0; Node* const volatile * rem_n_prev = bucket->first_ptr(); Node* rem_n = bucket->first(); - while (rem_n != NULL) { + while (rem_n != nullptr) { if (eval_f(rem_n->value())) { - ndel[dels++] = rem_n; + if (dels < num_del) { + ndel[dels] = rem_n; + } else { + guarantee(dels < std::numeric_limits::max(), + "Growable array size is limited by a (signed) int, something is seriously bad if we reach this point, better exit"); + extra.append(rem_n); + } + ++dels; Node* next_node = rem_n->next(); bucket->release_assign_node_ptr(rem_n_prev, next_node); rem_n = next_node; - if (dels == num_del) { - break; - } } else { rem_n_prev = rem_n->next_ptr(); rem_n = rem_n->next(); diff --git a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp index 4023b9b6f69..2094565dea3 100644 --- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp +++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp @@ -213,36 +213,62 @@ static void cht_getinsert_bulkdelete_insert_verified(Thread* thr, SimpleTestTabl } static void cht_getinsert_bulkdelete(Thread* thr) { - uintptr_t val1 = 1; - uintptr_t val2 = 2; - uintptr_t val3 = 3; - SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); + SimpleTestTable a[] = {SimpleTestTable(), SimpleTestTable(2, 2, 14) /* force long lists in the buckets*/ }; + const unsigned iter = 1000; + for (auto& table: a) { + for (unsigned i = 0; i < iter; ++i) { + uintptr_t val1 = i * 10 + 1; + uintptr_t val2 = i * 10 + 2; + uintptr_t val3 = i * 10 + 3; + SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); + cht_getinsert_bulkdelete_insert_verified(thr, &table, val1, false, true); + cht_getinsert_bulkdelete_insert_verified(thr, &table, val2, false, true); + cht_getinsert_bulkdelete_insert_verified(thr, &table, val3, false, true); - SimpleTestTable* cht = new SimpleTestTable(); - cht_getinsert_bulkdelete_insert_verified(thr, cht, val1, false, true); - cht_getinsert_bulkdelete_insert_verified(thr, cht, val2, false, true); - cht_getinsert_bulkdelete_insert_verified(thr, cht, val3, false, true); + EXPECT_TRUE(table.remove(thr, stl2)) << "Remove did not find value."; - EXPECT_TRUE(cht->remove(thr, stl2)) << "Remove did not find value."; + cht_getinsert_bulkdelete_insert_verified(thr, &table, val1, true, false); // val1 should be present + cht_getinsert_bulkdelete_insert_verified(thr, &table, val2, false, true); // val2 should be inserted + cht_getinsert_bulkdelete_insert_verified(thr, &table, val3, true, false); // val3 should be present - cht_getinsert_bulkdelete_insert_verified(thr, cht, val1, true, false); // val1 should be present - cht_getinsert_bulkdelete_insert_verified(thr, cht, val2, false, true); // val2 should be inserted - cht_getinsert_bulkdelete_insert_verified(thr, cht, val3, true, false); // val3 should be present + EXPECT_EQ(cht_get_copy(&table, thr, stl1), val1) << "Get did not find value."; + EXPECT_EQ(cht_get_copy(&table, thr, stl2), val2) << "Get did not find value."; + EXPECT_EQ(cht_get_copy(&table, thr, stl3), val3) << "Get did not find value."; + } - EXPECT_EQ(cht_get_copy(cht, thr, stl1), val1) << "Get did not find value."; - EXPECT_EQ(cht_get_copy(cht, thr, stl2), val2) << "Get did not find value."; - EXPECT_EQ(cht_get_copy(cht, thr, stl3), val3) << "Get did not find value."; + unsigned delete_count = 0; + unsigned scan_count = 0; + auto eval_odd_f = [](uintptr_t* val) { return *val & 0x1; }; + auto eval_true_f = [](uintptr_t* val) { return true; }; + auto scan_count_f = [&scan_count](uintptr_t* val) { scan_count++; return true; }; + auto delete_count_f = [&delete_count](uintptr_t* val) { delete_count++; }; + table.bulk_delete(thr, eval_odd_f, delete_count_f); + EXPECT_EQ(iter*2, delete_count) << "All odd values should have been deleted"; + table.do_scan(thr, scan_count_f); + EXPECT_EQ(iter, scan_count) << "All odd values should have been deleted"; - // Removes all odd values. - cht->bulk_delete(thr, getinsert_bulkdelete_eval, getinsert_bulkdelete_del); + for (unsigned i = 0; i < iter; ++i) { + uintptr_t val1 = i * 10 + 1; + uintptr_t val2 = i * 10 + 2; + uintptr_t val3 = i * 10 + 3; + SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); + EXPECT_EQ(cht_get_copy(&table, thr, stl1), (uintptr_t)0) << "Odd value should not exist."; + EXPECT_FALSE(table.remove(thr, stl1)) << "Odd value should not exist."; + EXPECT_EQ(cht_get_copy(&table, thr, stl2), val2) << "Even value should not have been removed."; + EXPECT_EQ(cht_get_copy(&table, thr, stl3), (uintptr_t)0) << "Add value should not exists."; + EXPECT_FALSE(table.remove(thr, stl3)) << "Odd value should not exists."; + } - EXPECT_EQ(cht_get_copy(cht, thr, stl1), (uintptr_t)0) << "Odd value should not exist."; - EXPECT_FALSE(cht->remove(thr, stl1)) << "Odd value should not exist."; - EXPECT_EQ(cht_get_copy(cht, thr, stl2), val2) << "Even value should not have been removed."; - EXPECT_EQ(cht_get_copy(cht, thr, stl3), (uintptr_t)0) << "Add value should not exists."; - EXPECT_FALSE(cht->remove(thr, stl3)) << "Odd value should not exists."; - - delete cht; + scan_count = 0; + table.do_scan(thr, scan_count_f); + EXPECT_EQ(iter, scan_count) << "All values should have been deleted"; + delete_count = 0; + table.bulk_delete(thr, eval_true_f, delete_count_f); + EXPECT_EQ(iter, delete_count) << "All odd values should have been deleted"; + scan_count = 0; + table.do_scan(thr, scan_count_f); + EXPECT_EQ(0u, scan_count) << "All values should have been deleted"; + } } static void cht_getinsert_bulkdelete_task(Thread* thr) {