mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-22 08:21:27 +00:00
8151436: Leaner ArrayAllocator and BitMaps
Reviewed-by: tschatzl, pliden, kbarrett
This commit is contained in:
parent
d15936bdc0
commit
7419b91e7e
@ -36,7 +36,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
_use_cache = true;
|
||||
|
||||
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
|
||||
_hot_cache = _hot_cache_memory.allocate(_hot_cache_size);
|
||||
_hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
|
||||
|
||||
reset_hot_cache_internal();
|
||||
|
||||
@ -51,7 +51,7 @@ void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
G1HotCardCache::~G1HotCardCache() {
|
||||
if (default_use_cache()) {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
_hot_cache_memory.free();
|
||||
ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
|
||||
_hot_cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +61,6 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
ArrayAllocator<jbyte*, mtGC> _hot_cache_memory;
|
||||
|
||||
// The card cache table
|
||||
jbyte** _hot_cache;
|
||||
|
||||
@ -248,7 +248,6 @@ public:
|
||||
|
||||
template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
|
||||
class GenericTaskQueue: public TaskQueueSuper<N, F> {
|
||||
ArrayAllocator<E, F> _array_allocator;
|
||||
protected:
|
||||
typedef typename TaskQueueSuper<N, F>::Age Age;
|
||||
typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
|
||||
|
||||
@ -44,12 +44,13 @@ inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(int n) : _n(n) {
|
||||
|
||||
template<class E, MEMFLAGS F, unsigned int N>
|
||||
inline void GenericTaskQueue<E, F, N>::initialize() {
|
||||
_elems = _array_allocator.allocate(N);
|
||||
_elems = ArrayAllocator<E, F>::allocate(N);
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F, unsigned int N>
|
||||
inline GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
|
||||
FREE_C_HEAP_ARRAY(E, _elems);
|
||||
assert(false, "This code is currently never called");
|
||||
ArrayAllocator<E, F>::free(const_cast<E*>(_elems), N);
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F, unsigned int N>
|
||||
|
||||
@ -724,30 +724,23 @@ public:
|
||||
// is set so that we always use malloc except for Solaris where we set the
|
||||
// limit to get mapped memory.
|
||||
template <class E, MEMFLAGS F>
|
||||
class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
|
||||
char* _addr;
|
||||
bool _use_malloc;
|
||||
size_t _size;
|
||||
bool _free_in_destructor;
|
||||
class ArrayAllocator : public AllStatic {
|
||||
private:
|
||||
static bool should_use_malloc(size_t length);
|
||||
|
||||
static bool should_use_malloc(size_t size) {
|
||||
return size < ArrayAllocatorMallocLimit;
|
||||
}
|
||||
static size_t size_for_malloc(size_t length);
|
||||
static size_t size_for_mmap(size_t length);
|
||||
|
||||
static E* allocate_malloc(size_t length);
|
||||
static E* allocate_mmap(size_t length);
|
||||
|
||||
static void free_malloc(E* addr, size_t length);
|
||||
static void free_mmap(E* addr, size_t length);
|
||||
|
||||
static char* allocate_inner(size_t& size, bool& use_malloc);
|
||||
public:
|
||||
ArrayAllocator(bool free_in_destructor = true) :
|
||||
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
|
||||
|
||||
~ArrayAllocator() {
|
||||
if (_free_in_destructor) {
|
||||
free();
|
||||
}
|
||||
}
|
||||
|
||||
E* allocate(size_t length);
|
||||
E* reallocate(size_t new_length);
|
||||
void free();
|
||||
static E* allocate(size_t length);
|
||||
static E* reallocate(E* old_addr, size_t old_length, size_t new_length);
|
||||
static void free(E* addr, size_t length);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_MEMORY_ALLOCATION_HPP
|
||||
|
||||
@ -151,66 +151,87 @@ template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
|
||||
char* addr = NULL;
|
||||
|
||||
if (use_malloc) {
|
||||
addr = AllocateHeap(size, F);
|
||||
if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
|
||||
// malloc failed let's try with mmap instead
|
||||
use_malloc = false;
|
||||
} else {
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
size_t ArrayAllocator<E, F>::size_for_malloc(size_t length) {
|
||||
return length * sizeof(E);
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
size_t ArrayAllocator<E, F>::size_for_mmap(size_t length) {
|
||||
size_t size = length * sizeof(E);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
size = align_size_up(size, alignment);
|
||||
return align_size_up(size, alignment);
|
||||
}
|
||||
|
||||
addr = os::reserve_memory(size, NULL, alignment, F);
|
||||
template <class E, MEMFLAGS F>
|
||||
bool ArrayAllocator<E, F>::should_use_malloc(size_t length) {
|
||||
return size_for_malloc(length) < ArrayAllocatorMallocLimit;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate_malloc(size_t length) {
|
||||
return (E*)AllocateHeap(size_for_malloc(length), F);
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate_mmap(size_t length) {
|
||||
size_t size = size_for_mmap(length);
|
||||
int alignment = os::vm_allocation_granularity();
|
||||
|
||||
char* addr = os::reserve_memory(size, NULL, alignment, F);
|
||||
if (addr == NULL) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
|
||||
return addr;
|
||||
|
||||
return (E*)addr;
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::allocate(size_t length) {
|
||||
assert(_addr == NULL, "Already in use");
|
||||
if (should_use_malloc(length)) {
|
||||
return allocate_malloc(length);
|
||||
}
|
||||
|
||||
_size = sizeof(E) * length;
|
||||
_use_malloc = should_use_malloc(_size);
|
||||
_addr = allocate_inner(_size, _use_malloc);
|
||||
|
||||
return (E*)_addr;
|
||||
return allocate_mmap(length);
|
||||
}
|
||||
|
||||
template <class E, MEMFLAGS F>
|
||||
E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
|
||||
size_t new_size = sizeof(E) * new_length;
|
||||
bool use_malloc = should_use_malloc(new_size);
|
||||
char* new_addr = allocate_inner(new_size, use_malloc);
|
||||
E* ArrayAllocator<E, F>::reallocate(E* old_addr, size_t old_length, size_t new_length) {
|
||||
E* new_addr = (new_length > 0)
|
||||
? allocate(new_length)
|
||||
: NULL;
|
||||
|
||||
memcpy(new_addr, _addr, MIN2(new_size, _size));
|
||||
if (new_addr != NULL && old_addr != NULL) {
|
||||
memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
|
||||
}
|
||||
|
||||
free();
|
||||
_size = new_size;
|
||||
_use_malloc = use_malloc;
|
||||
_addr = new_addr;
|
||||
return (E*)new_addr;
|
||||
if (old_addr != NULL) {
|
||||
free(old_addr, old_length);
|
||||
}
|
||||
|
||||
return new_addr;
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F>
|
||||
void ArrayAllocator<E, F>::free() {
|
||||
if (_addr != NULL) {
|
||||
if (_use_malloc) {
|
||||
FreeHeap(_addr);
|
||||
void ArrayAllocator<E, F>::free_malloc(E* addr, size_t /*length*/) {
|
||||
FreeHeap(addr);
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F>
|
||||
void ArrayAllocator<E, F>::free_mmap(E* addr, size_t length) {
|
||||
bool result = os::release_memory((char*)addr, size_for_mmap(length));
|
||||
assert(result, "Failed to release memory");
|
||||
}
|
||||
|
||||
template<class E, MEMFLAGS F>
|
||||
void ArrayAllocator<E, F>::free(E* addr, size_t length) {
|
||||
if (addr != NULL) {
|
||||
if (should_use_malloc(length)) {
|
||||
free_malloc(addr, length);
|
||||
} else {
|
||||
os::release_memory(_addr, _size);
|
||||
free_mmap(addr, length);
|
||||
}
|
||||
_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -30,14 +30,14 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
|
||||
_map(map), _size(size_in_bits), _map_allocator(false)
|
||||
_map(map), _size(size_in_bits)
|
||||
{
|
||||
assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
|
||||
}
|
||||
|
||||
|
||||
BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
|
||||
_map(NULL), _size(0), _map_allocator(false)
|
||||
_map(NULL), _size(0)
|
||||
{
|
||||
assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
|
||||
resize(size_in_bits, in_resource_area);
|
||||
@ -54,7 +54,7 @@ void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
|
||||
Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
|
||||
MIN2(old_size_in_words, new_size_in_words));
|
||||
} else {
|
||||
_map = _map_allocator.reallocate(new_size_in_words);
|
||||
_map = ArrayAllocator<bm_word_t, mtInternal>::reallocate(old_map, old_size_in_words, new_size_in_words);
|
||||
}
|
||||
|
||||
if (new_size_in_words > old_size_in_words) {
|
||||
|
||||
@ -48,7 +48,6 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
|
||||
} RangeSizeHint;
|
||||
|
||||
private:
|
||||
ArrayAllocator<bm_word_t, mtInternal> _map_allocator;
|
||||
bm_word_t* _map; // First word in bitmap
|
||||
idx_t _size; // Size of bitmap (in bits)
|
||||
|
||||
@ -114,7 +113,7 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
|
||||
public:
|
||||
|
||||
// Constructs a bitmap with no map, and size 0.
|
||||
BitMap() : _map(NULL), _size(0), _map_allocator(false) {}
|
||||
BitMap() : _map(NULL), _size(0) {}
|
||||
|
||||
// Constructs a bitmap with the given map and size.
|
||||
BitMap(bm_word_t* map, idx_t size_in_bits);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user