diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 52591f7ce5f..2bbfb5032b3 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -291,9 +291,9 @@ void G1CMMarkStack::expand() { _chunk_allocator.try_expand(); } -void G1CMMarkStack::add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem) { - elem->next = *list; - *list = elem; +void G1CMMarkStack::add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem) { + elem->next = list->load_relaxed(); + list->store_relaxed(elem); } void G1CMMarkStack::add_chunk_to_chunk_list(TaskQueueEntryChunk* elem) { @@ -307,10 +307,10 @@ void G1CMMarkStack::add_chunk_to_free_list(TaskQueueEntryChunk* elem) { add_chunk_to_list(&_free_list, elem); } -G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(TaskQueueEntryChunk* volatile* list) { - TaskQueueEntryChunk* result = *list; +G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::remove_chunk_from_list(Atomic* list) { + TaskQueueEntryChunk* result = list->load_relaxed(); if (result != nullptr) { - *list = (*list)->next; + list->store_relaxed(list->load_relaxed()->next); } return result; } @@ -364,8 +364,8 @@ bool G1CMMarkStack::par_pop_chunk(G1TaskQueueEntry* ptr_arr) { void G1CMMarkStack::set_empty() { _chunks_in_chunk_list = 0; - _chunk_list = nullptr; - _free_list = nullptr; + _chunk_list.store_relaxed(nullptr); + _free_list.store_relaxed(nullptr); _chunk_allocator.reset(); } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp index 52a1b133439..836d7793f81 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp @@ -210,17 +210,17 @@ private: ChunkAllocator _chunk_allocator; char _pad0[DEFAULT_PADDING_SIZE]; - TaskQueueEntryChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. + Atomic _free_list; // Linked list of free chunks that can be allocated by users. char _pad1[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*)]; - TaskQueueEntryChunk* volatile _chunk_list; // List of chunks currently containing data. + Atomic _chunk_list; // List of chunks currently containing data. volatile size_t _chunks_in_chunk_list; char _pad2[DEFAULT_PADDING_SIZE - sizeof(TaskQueueEntryChunk*) - sizeof(size_t)]; // Atomically add the given chunk to the list. - void add_chunk_to_list(TaskQueueEntryChunk* volatile* list, TaskQueueEntryChunk* elem); + void add_chunk_to_list(Atomic* list, TaskQueueEntryChunk* elem); // Atomically remove and return a chunk from the given list. Returns null if the // list is empty. - TaskQueueEntryChunk* remove_chunk_from_list(TaskQueueEntryChunk* volatile* list); + TaskQueueEntryChunk* remove_chunk_from_list(Atomic* list); void add_chunk_to_chunk_list(TaskQueueEntryChunk* elem); void add_chunk_to_free_list(TaskQueueEntryChunk* elem); @@ -252,7 +252,7 @@ private: // Return whether the chunk list is empty. Racy due to unsynchronized access to // _chunk_list. - bool is_empty() const { return _chunk_list == nullptr; } + bool is_empty() const { return _chunk_list.load_relaxed() == nullptr; } size_t capacity() const { return _chunk_allocator.capacity(); } diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp index fe72c68d4eb..2f4824e4cae 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp @@ -90,7 +90,7 @@ inline void G1CMMarkStack::iterate(Fn fn) const { size_t num_chunks = 0; - TaskQueueEntryChunk* cur = _chunk_list; + TaskQueueEntryChunk* cur = _chunk_list.load_relaxed(); while (cur != nullptr) { guarantee(num_chunks <= _chunks_in_chunk_list, "Found %zu oop chunks which is more than there should be", num_chunks);