mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-26 10:10:19 +00:00
253 lines
8.7 KiB
C++
253 lines
8.7 KiB
C++
/*
|
|
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
*
|
|
* This code is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 only, as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
* version 2 for more details (a copy is included in the LICENSE file that
|
|
* accompanied this code).
|
|
*
|
|
* You should have received a copy of the GNU General Public License version
|
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
*
|
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
* or visit www.oracle.com if you need additional information or have any
|
|
* questions.
|
|
*
|
|
*/
|
|
|
|
#include "precompiled.hpp"
|
|
#include "code/codeBlob.hpp"
|
|
#include "code/codeCache.hpp"
|
|
#include "code/stubs.hpp"
|
|
#include "memory/allocation.inline.hpp"
|
|
#include "oops/oop.inline.hpp"
|
|
#include "runtime/mutexLocker.hpp"
|
|
#include "utilities/align.hpp"
|
|
#include "utilities/checkedCast.hpp"
|
|
|
|
|
|
// Implementation of StubQueue
|
|
//
|
|
// Standard wrap-around queue implementation; the queue dimensions
|
|
// are specified by the _queue_begin & _queue_end indices. The queue
|
|
// can be in two states (transparent to the outside):
|
|
//
|
|
// a) contiguous state: all queue entries in one block (or empty)
|
|
//
|
|
// Queue: |...|XXXXXXX|...............|
|
|
// ^0 ^begin ^end ^size = limit
|
|
// |_______|
|
|
// one block
|
|
//
|
|
// b) non-contiguous state: queue entries in two blocks
|
|
//
|
|
// Queue: |XXX|.......|XXXXXXX|.......|
|
|
// ^0 ^end ^begin ^limit ^size
|
|
// |___| |_______|
|
|
// 1st block 2nd block
|
|
//
|
|
// In the non-contiguous state, the wrap-around point is
|
|
// indicated via the _buffer_limit index since the last
|
|
// queue entry may not fill up the queue completely in
|
|
// which case we need to know where the 2nd block's end
|
|
// is to do the proper wrap-around. When removing the
|
|
// last entry of the 2nd block, _buffer_limit is reset
|
|
// to _buffer_size.
|
|
//
|
|
// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
|
|
// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
|
|
|
|
|
|
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
|
|
Mutex* lock, const char* name) : _mutex(lock) {
|
|
intptr_t size = align_up(buffer_size, 2*BytesPerWord);
|
|
BufferBlob* blob = BufferBlob::create(name, checked_cast<int>(size));
|
|
if( blob == nullptr) {
|
|
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name);
|
|
}
|
|
_stub_interface = stub_interface;
|
|
|
|
// The code blob alignment can be smaller than the requested stub alignment.
|
|
// Make sure we put the stubs at their requested alignment by aligning the buffer base and limits.
|
|
address aligned_start = align_up(blob->content_begin(), stub_alignment());
|
|
address aligned_end = align_down(blob->content_end(), stub_alignment());
|
|
int aligned_size = aligned_end - aligned_start;
|
|
_buffer_size = aligned_size;
|
|
_buffer_limit = aligned_size;
|
|
_stub_buffer = aligned_start;
|
|
_queue_begin = 0;
|
|
_queue_end = 0;
|
|
_number_of_stubs = 0;
|
|
}
|
|
|
|
|
|
StubQueue::~StubQueue() {
|
|
// Note: Currently StubQueues are never destroyed so nothing needs to be done here.
|
|
// If we want to implement the destructor, we need to release the BufferBlob
|
|
// allocated in the constructor (i.e., we need to keep it around or look it
|
|
// up via CodeCache::find_blob(...).
|
|
Unimplemented();
|
|
}
|
|
|
|
void StubQueue::deallocate_unused_tail() {
|
|
CodeBlob* blob = CodeCache::find_blob((void*)_stub_buffer);
|
|
CodeCache::free_unused_tail(blob, used_space());
|
|
// Update the limits to the new, trimmed CodeBlob size
|
|
address aligned_start = align_up(blob->content_begin(), stub_alignment());
|
|
address aligned_end = align_down(blob->content_end(), stub_alignment());
|
|
int aligned_size = aligned_end - aligned_start;
|
|
_buffer_size = aligned_size;
|
|
_buffer_limit = aligned_size;
|
|
}
|
|
|
|
Stub* StubQueue::stub_containing(address pc) const {
|
|
if (contains(pc)) {
|
|
for (Stub* s = first(); s != nullptr; s = next(s)) {
|
|
if (stub_contains(s, pc)) return s;
|
|
}
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
|
|
Stub* StubQueue::request_committed(int code_size) {
|
|
Stub* s = request(code_size);
|
|
if (s != nullptr) commit(code_size);
|
|
return s;
|
|
}
|
|
|
|
int StubQueue::compute_stub_size(Stub* stub, int code_size) {
|
|
address stub_begin = (address) stub;
|
|
address code_begin = stub_code_begin(stub);
|
|
address code_end = align_up(code_begin + code_size, stub_alignment());
|
|
return (int)(code_end - stub_begin);
|
|
}
|
|
|
|
Stub* StubQueue::request(int requested_code_size) {
|
|
assert(requested_code_size > 0, "requested_code_size must be > 0");
|
|
if (_mutex != nullptr) _mutex->lock_without_safepoint_check();
|
|
Stub* s = current_stub();
|
|
int requested_size = compute_stub_size(s, requested_code_size);
|
|
if (requested_size <= available_space()) {
|
|
if (is_contiguous()) {
|
|
// Queue: |...|XXXXXXX|.............|
|
|
// ^0 ^begin ^end ^size = limit
|
|
assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
|
|
if (_queue_end + requested_size <= _buffer_size) {
|
|
// code fits in at the end => nothing to do
|
|
stub_initialize(s, requested_size);
|
|
return s;
|
|
} else {
|
|
// stub doesn't fit in at the queue end
|
|
// => reduce buffer limit & wrap around
|
|
assert(!is_empty(), "just checkin'");
|
|
_buffer_limit = _queue_end;
|
|
_queue_end = 0;
|
|
}
|
|
}
|
|
}
|
|
if (requested_size <= available_space()) {
|
|
assert(!is_contiguous(), "just checkin'");
|
|
assert(_buffer_limit <= _buffer_size, "queue invariant broken");
|
|
// Queue: |XXX|.......|XXXXXXX|.......|
|
|
// ^0 ^end ^begin ^limit ^size
|
|
s = current_stub();
|
|
stub_initialize(s, requested_size);
|
|
return s;
|
|
}
|
|
// Not enough space left
|
|
if (_mutex != nullptr) _mutex->unlock();
|
|
return nullptr;
|
|
}
|
|
|
|
|
|
void StubQueue::commit(int committed_code_size) {
|
|
assert(committed_code_size > 0, "committed_code_size must be > 0");
|
|
Stub* s = current_stub();
|
|
int committed_size = compute_stub_size(s, committed_code_size);
|
|
assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
|
|
stub_initialize(s, committed_size);
|
|
_queue_end += committed_size;
|
|
_number_of_stubs++;
|
|
if (_mutex != nullptr) _mutex->unlock();
|
|
debug_only(stub_verify(s);)
|
|
}
|
|
|
|
|
|
void StubQueue::remove_first() {
|
|
if (number_of_stubs() == 0) return;
|
|
Stub* s = first();
|
|
debug_only(stub_verify(s);)
|
|
stub_finalize(s);
|
|
_queue_begin += stub_size(s);
|
|
assert(_queue_begin <= _buffer_limit, "sanity check");
|
|
if (_queue_begin == _queue_end) {
|
|
// buffer empty
|
|
// => reset queue indices
|
|
_queue_begin = 0;
|
|
_queue_end = 0;
|
|
_buffer_limit = _buffer_size;
|
|
} else if (_queue_begin == _buffer_limit) {
|
|
// buffer limit reached
|
|
// => reset buffer limit & wrap around
|
|
_buffer_limit = _buffer_size;
|
|
_queue_begin = 0;
|
|
}
|
|
_number_of_stubs--;
|
|
}
|
|
|
|
|
|
void StubQueue::remove_first(int n) {
|
|
int i = MIN2(n, number_of_stubs());
|
|
while (i-- > 0) remove_first();
|
|
}
|
|
|
|
|
|
void StubQueue::remove_all(){
|
|
debug_only(verify();)
|
|
remove_first(number_of_stubs());
|
|
assert(number_of_stubs() == 0, "sanity check");
|
|
}
|
|
|
|
|
|
void StubQueue::verify() {
|
|
// verify only if initialized
|
|
if (_stub_buffer == nullptr) return;
|
|
MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
|
|
// verify index boundaries
|
|
guarantee(0 <= _buffer_size, "buffer size must be positive");
|
|
guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
|
|
guarantee(0 <= _queue_begin && _queue_begin < _buffer_limit, "_queue_begin out of bounds");
|
|
guarantee(0 <= _queue_end && _queue_end <= _buffer_limit, "_queue_end out of bounds");
|
|
// verify alignment
|
|
guarantee(_queue_begin % stub_alignment() == 0, "_queue_begin not aligned");
|
|
guarantee(_queue_end % stub_alignment() == 0, "_queue_end not aligned");
|
|
// verify buffer limit/size relationship
|
|
if (is_contiguous()) {
|
|
guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
|
|
}
|
|
// verify contents
|
|
int n = 0;
|
|
for (Stub* s = first(); s != nullptr; s = next(s)) {
|
|
stub_verify(s);
|
|
n++;
|
|
}
|
|
guarantee(n == number_of_stubs(), "number of stubs inconsistent");
|
|
guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
|
|
}
|
|
|
|
|
|
void StubQueue::print() {
|
|
ConditionalMutexLocker lock(_mutex, _mutex != nullptr, Mutex::_no_safepoint_check_flag);
|
|
for (Stub* s = first(); s != nullptr; s = next(s)) {
|
|
stub_print(s);
|
|
}
|
|
}
|