8345656: Move os alignment functions out of ReservedSpace

Reviewed-by: dholmes
This commit is contained in:
Stefan Karlsson 2024-12-11 11:53:25 +00:00
parent 2826838389
commit 076bfa688c
14 changed files with 46 additions and 50 deletions

View File

@ -27,9 +27,14 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1HeapRegion.inline.hpp"
#include "logging/log.hpp"
#include "nmt/memTracker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
size_t G1BlockOffsetTable::compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / CardTable::card_size_in_words());
return os::align_up_vm_allocation_granularity(number_of_slots);
}
G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage) :
_reserved(heap), _offset_base(nullptr) {

View File

@ -29,7 +29,6 @@
#include "gc/shared/blockOffsetTable.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
// This implementation of "G1BlockOffsetTable" divides the covered region
@ -90,10 +89,7 @@ public:
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words) {
size_t number_of_slots = (mem_region_words / CardTable::card_size_in_words());
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
static size_t compute_size(size_t mem_region_words);
// Returns how many bytes of the heap a single byte of the BOT corresponds to.
static size_t heap_map_factor() {

View File

@ -27,6 +27,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/shared/memset_with_concurrent_readers.hpp"
#include "logging/log.hpp"
#include "runtime/os.hpp"
void G1CardTable::g1_mark_as_young(const MemRegion& mr) {
CardValue *const first = byte_for(mr.start());
@ -47,6 +48,11 @@ void G1CardTableChangedListener::on_commit(uint start_idx, size_t num_regions, b
_card_table->clear_MemRegion(mr);
}
size_t G1CardTable::compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / _card_size_in_words);
return os::align_up_vm_allocation_granularity(number_of_slots);
}
void G1CardTable::initialize(G1RegionToSpaceMapper* mapper) {
mapper->set_mapping_changed_listener(&_listener);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,10 +107,7 @@ public:
inline uint region_idx_for(CardValue* p);
static size_t compute_size(size_t mem_region_size_in_words) {
size_t number_of_slots = (mem_region_size_in_words / _card_size_in_words);
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
static size_t compute_size(size_t mem_region_size_in_words);
// Returns how many bytes of the heap a single byte of the Card Table corresponds to.
static size_t heap_map_factor() { return _card_size; }

View File

@ -991,7 +991,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
}
bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
aligned_expand_bytes = align_up(aligned_expand_bytes, G1HeapRegion::GrainBytes);
log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
@ -1034,8 +1034,7 @@ bool G1CollectedHeap::expand_single_region(uint node_index) {
}
void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
size_t aligned_shrink_bytes =
ReservedSpace::page_align_size_down(shrink_bytes);
size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes);
aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes);
uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes);

View File

@ -32,6 +32,16 @@
#include "nmt/memTracker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
size_t SerialBlockOffsetTable::compute_size(size_t mem_region_words) {
assert(mem_region_words % CardTable::card_size_in_words() == 0, "precondition");
size_t number_of_slots = mem_region_words / CardTable::card_size_in_words();
return os::align_up_vm_allocation_granularity(number_of_slots);
}
SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
size_t init_word_size):
@ -61,14 +71,14 @@ void SerialBlockOffsetTable::resize(size_t new_word_size) {
size_t delta;
char* high = _vs.high();
if (new_size > old_size) {
delta = ReservedSpace::page_align_size_up(new_size - old_size);
delta = os::align_up_vm_page_size(new_size - old_size);
assert(delta > 0, "just checking");
if (!_vs.expand_by(delta)) {
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
}
assert(_vs.high() == high + delta, "invalid expansion");
} else {
delta = ReservedSpace::page_align_size_down(old_size - new_size);
delta = os::align_down_vm_page_size(old_size - new_size);
if (delta == 0) return;
_vs.shrink_by(delta);
assert(_vs.high() == high - delta, "invalid expansion");

View File

@ -55,12 +55,7 @@ class SerialBlockOffsetTable: public CHeapObj<mtGC> {
// Return the number of slots needed for an offset array
// that covers mem_region_words words.
static size_t compute_size(size_t mem_region_words) {
assert(mem_region_words % CardTable::card_size_in_words() == 0, "precondition");
size_t number_of_slots = mem_region_words / CardTable::card_size_in_words();
return ReservedSpace::allocation_align_size_up(number_of_slots);
}
static size_t compute_size(size_t mem_region_words);
// Mapping from address to object start array entry.
uint8_t* entry_for_addr(const void* const p) const;

View File

@ -80,7 +80,7 @@ bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
if (bytes == 0) {
return true; // That's what grow_by(0) would return
}
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
size_t aligned_bytes = os::align_up_vm_page_size(bytes);
if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that an expansion was done when it
@ -88,9 +88,9 @@ bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
// but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to
// start with.
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
aligned_bytes = os::align_down_vm_page_size(bytes);
}
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
bool success = false;
if (aligned_expand_bytes > aligned_bytes) {
success = grow_by(aligned_expand_bytes);
@ -122,7 +122,7 @@ bool TenuredGeneration::grow_to_reserved() {
void TenuredGeneration::shrink(size_t bytes) {
assert_correct_size_change_locking();
size_t size = ReservedSpace::page_align_size_down(bytes);
size_t size = os::align_down_vm_page_size(bytes);
if (size == 0) {
return;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,14 +25,13 @@
#include "precompiled.hpp"
#include "gc/shared/markBitMap.inline.hpp"
#include "memory/universe.hpp"
#include "memory/virtualspace.hpp"
void MarkBitMap::print_on_error(outputStream* st, const char* prefix) const {
_bm.print_on_error(st, prefix);
}
size_t MarkBitMap::compute_size(size_t heap_size) {
return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
return os::align_up_vm_allocation_granularity(heap_size / mark_distance());
}
size_t MarkBitMap::mark_distance() {

View File

@ -27,6 +27,7 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahMarkBitMap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
ShenandoahMarkBitMap::ShenandoahMarkBitMap(MemRegion heap, MemRegion storage) :
@ -37,7 +38,7 @@ ShenandoahMarkBitMap::ShenandoahMarkBitMap(MemRegion heap, MemRegion storage) :
}
size_t ShenandoahMarkBitMap::compute_size(size_t heap_size) {
return ReservedSpace::allocation_align_size_up(heap_size / mark_distance());
return os::align_up_vm_allocation_granularity(heap_size / mark_distance());
}
size_t ShenandoahMarkBitMap::mark_distance() {

View File

@ -420,11 +420,11 @@ void* JfrVirtualMemory::initialize(size_t reservation_size_request_bytes,
_aligned_datum_size_bytes = align_up(datum_size_bytes, BytesPerWord);
assert(is_aligned(_aligned_datum_size_bytes, BytesPerWord), "invariant");
reservation_size_request_bytes = ReservedSpace::allocation_align_size_up(reservation_size_request_bytes);
reservation_size_request_bytes = os::align_up_vm_allocation_granularity(reservation_size_request_bytes);
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
assert(is_aligned(reservation_size_request_bytes, _aligned_datum_size_bytes), "invariant");
block_size_request_bytes = MAX2(block_size_request_bytes, (size_t)os::vm_allocation_granularity());
block_size_request_bytes = ReservedSpace::allocation_align_size_up(block_size_request_bytes);
block_size_request_bytes = os::align_up_vm_allocation_granularity(block_size_request_bytes);
assert(is_aligned(block_size_request_bytes, os::vm_allocation_granularity()), "invariant");
assert(is_aligned(block_size_request_bytes, _aligned_datum_size_bytes), "invariant");
// adjustment to valid ratio in units of vm_allocation_granularity

View File

@ -329,20 +329,6 @@ ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, siz
return result;
}
size_t ReservedSpace::page_align_size_up(size_t size) {
return align_up(size, os::vm_page_size());
}
size_t ReservedSpace::page_align_size_down(size_t size) {
return align_down(size, os::vm_page_size());
}
size_t ReservedSpace::allocation_align_size_up(size_t size) {
return align_up(size, os::vm_allocation_granularity());
}
void ReservedSpace::release() {
if (is_reserved()) {
char *real_base = _base - _noaccess_prefix;

View File

@ -103,10 +103,6 @@ class ReservedSpace {
inline ReservedSpace last_part (size_t partition_size);
inline ReservedSpace partition (size_t offset, size_t partition_size);
// Alignment
static size_t page_align_size_up(size_t size);
static size_t page_align_size_down(size_t size);
static size_t allocation_align_size_up(size_t size);
bool contains(const void* p) const {
return (base() <= ((char*)p)) && (((char*)p) < (base() + size()));
}

View File

@ -27,6 +27,7 @@
#include "jvm_md.h"
#include "runtime/osInfo.hpp"
#include "utilities/align.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -405,6 +406,9 @@ class os: AllStatic {
// Return the default page size.
static size_t vm_page_size() { return OSInfo::vm_page_size(); }
static size_t align_up_vm_page_size(size_t size) { return align_up (size, os::vm_page_size()); }
static size_t align_down_vm_page_size(size_t size) { return align_down(size, os::vm_page_size()); }
// The set of page sizes which the VM is allowed to use (may be a subset of
// the page sizes actually available on the platform).
static const PageSizes& page_sizes() { return _page_sizes; }
@ -445,6 +449,8 @@ class os: AllStatic {
static size_t vm_allocation_granularity() { return OSInfo::vm_allocation_granularity(); }
static size_t align_up_vm_allocation_granularity(size_t size) { return align_up(size, os::vm_allocation_granularity()); }
// Returns the lowest address the process is allowed to map against.
static size_t vm_min_address();