8352251: Implement JEP 518: JFR Cooperative Sampling

Co-authored-by: Aleksey Shipilev <shade@openjdk.org>
Co-authored-by: Erik Österlund <eosterlund@openjdk.org>
Co-authored-by: Boris Ulasevich <bulasevich@openjdk.org>
Co-authored-by: Patricio Chilano Mateo <pchilanomate@openjdk.org>
Co-authored-by: Martin Doerr <mdoerr@openjdk.org>
Co-authored-by: Fei Yang <fyang@openjdk.org>
Co-authored-by: Amit Kumar <amitkumar@openjdk.org>
Reviewed-by: eosterlund, egahlin
This commit is contained in:
Markus Grönlund 2025-05-26 18:37:55 +00:00
parent e8eff4d25b
commit bbceab0725
112 changed files with 2776 additions and 1340 deletions

View File

@ -828,7 +828,6 @@ void JavaFrameAnchor::make_walkable() {
// already walkable?
if (walkable()) return;
vmassert(last_Java_sp() != nullptr, "not called from Java code?");
vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,6 +35,53 @@
// Inline functions for AArch64 frames:
#if INCLUDE_JFR
// Static helper routines
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
}
inline address frame::interpreter_return_address(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::return_addr_offset]);
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp + frame::interpreter_frame_initial_sp_offset;
}
inline intptr_t* frame::sender_sp(intptr_t* fp) {
assert(fp != nullptr, "invariant");
return fp + frame::sender_sp_offset;
}
inline intptr_t* frame::link(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
}
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(sp[-1]);
}
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(sp[-2]);
}
#endif // INCLUDE_JFR
// Constructors:
inline frame::frame() {

View File

@ -458,9 +458,10 @@ void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
// remove activation
//
// Apply stack watermark barrier.
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from synchronized blocks.
// Apply stack watermark barrier.
// Notify JVMTI.
// Remove the activation from the stack.
//
// If there are locked Java monitors
@ -470,30 +471,14 @@ void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
// installs IllegalMonitorStateException
// Else
// no error processing
void InterpreterMacroAssembler::remove_activation(
TosState state,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
void InterpreterMacroAssembler::remove_activation(TosState state,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
// Note: Registers r3 xmm0 may be in use for the
// result check if synchronized method
Label unlocked, unlock, no_unlock;
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
br(Assembler::AL, fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(esp, rfp, (address)pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// get the value of _do_not_unlock_if_synchronized into r3
const Address do_not_unlock_if_synchronized(rthread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
@ -611,7 +596,24 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_unlock);
// jvmti support
JFR_ONLY(enter_jfr_critical_section();)
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
br(Assembler::AL, fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(esp, rfp, pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// JVMTI support. Make sure the safepoint poll test is issued prior.
if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
} else {
@ -638,6 +640,8 @@ void InterpreterMacroAssembler::remove_activation(
cmp(rscratch2, rscratch1);
br(Assembler::LS, no_reserved_zone_enabling);
JFR_ONLY(leave_jfr_critical_section();)
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -647,10 +651,14 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_reserved_zone_enabling);
}
// restore sender esp
mov(esp, rscratch2);
// remove frame anchor
leave();
JFR_ONLY(leave_jfr_critical_section();)
// restore sender esp
mov(esp, rscratch2);
// If we're returning to interpreted code we will shortly be
// adjusting SP to allow some space for ESP. If we're returning to
// compiled code the saved sender SP was saved in sender_sp, so this
@ -658,6 +666,19 @@ void InterpreterMacroAssembler::remove_activation(
andr(sp, esp, -16);
}
#if INCLUDE_JFR
void InterpreterMacroAssembler::enter_jfr_critical_section() {
const Address sampling_critical_section(rthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
mov(rscratch1, true);
strb(rscratch1, sampling_critical_section);
}
void InterpreterMacroAssembler::leave_jfr_critical_section() {
const Address sampling_critical_section(rthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
strb(zr, sampling_critical_section);
}
#endif // INCLUDE_JFR
// Lock object
//
// Args:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -310,6 +310,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
JFR_ONLY(void enter_jfr_critical_section();)
JFR_ONLY(void leave_jfr_critical_section();)
virtual void _call_Unimplemented(address call_site) {
save_bcp();
set_last_Java_frame(esp, rfp, (address) pc(), rscratch1);

View File

@ -1985,6 +1985,23 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
#if INCLUDE_JFR
// We need to do a poll test after unwind in case the sampler
// managed to sample the native frame after returning to Java.
Label L_return;
__ ldr(rscratch1, Address(rthread, JavaThread::polling_word_offset()));
address poll_test_pc = __ pc();
__ relocate(relocInfo::poll_return_type);
__ tbz(rscratch1, log2i_exact(SafepointMechanism::poll_bit()), L_return);
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ adr(rscratch1, InternalAddress(poll_test_pc));
__ str(rscratch1, Address(rthread, JavaThread::saved_exception_pc_offset()));
__ far_jump(RuntimeAddress(stub));
__ bind(L_return);
#endif // INCLUDE_JFR
// Any exception pending?
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
__ cbnz(rscratch1, exception_pending);

View File

@ -1593,6 +1593,30 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ bind(L);
}
#if INCLUDE_JFR
__ enter_jfr_critical_section();
// This poll test is to uphold the invariant that a JFR sampled frame
// must not return to its caller without a prior safepoint poll check.
// The earlier poll check in this routine is insufficient for this purpose
// because the thread has transitioned back to Java.
Label slow_path;
Label fast_path;
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
__ br(Assembler::AL, fast_path);
__ bind(slow_path);
__ push(dtos);
__ push(ltos);
__ set_last_Java_frame(esp, rfp, __ pc(), rscratch1);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
__ reset_last_Java_frame(true);
__ pop(ltos);
__ pop(dtos);
__ bind(fast_path);
#endif // INCLUDE_JFR
// jvmti support
// Note: This must happen _after_ handling/throwing any exceptions since
// the exception handler code notifies the runtime of method exits
@ -1615,6 +1639,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// remove frame anchor
__ leave();
JFR_ONLY(__ leave_jfr_critical_section();)
// restore sender sp
__ mov(sp, esp);

View File

@ -1890,6 +1890,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
__ mov(r19, r0); // save the nmethod
JFR_ONLY(__ enter_jfr_critical_section();)
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
// r0 is OSR buffer, move it to expected parameter location
@ -1901,6 +1903,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
// remove frame anchor
__ leave();
JFR_ONLY(__ leave_jfr_critical_section();)
// Ensure compiled code always sees stack at proper alignment
__ andr(sp, esp, -16);

View File

@ -108,6 +108,9 @@
frame(intptr_t* sp, intptr_t* fp);
frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null = false);
void setup(address pc);
void init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc);
// accessors for the instance variables

View File

@ -27,9 +27,58 @@
#include "code/codeCache.hpp"
#include "code/vmreg.inline.hpp"
#include "runtime/sharedRuntime.hpp"
// Inline functions for ARM frames:
#if INCLUDE_JFR
// Static helper routines
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
}
inline address frame::interpreter_return_address(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::return_addr_offset]);
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp + frame::interpreter_frame_initial_sp_offset;
}
inline intptr_t* frame::sender_sp(intptr_t* fp) {
assert(fp != nullptr, "invariant");
return fp + frame::sender_sp_offset;
}
inline intptr_t* frame::link(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
}
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(sp[-1]);
}
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(sp[-2]);
}
#endif // INCLUDE_JFR
// Constructors:
inline frame::frame() {
@ -54,21 +103,30 @@ inline void frame::init(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, add
_fp = fp;
_pc = pc;
assert(pc != nullptr, "no pc?");
_on_heap = false;
_oop_map = nullptr;
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
DEBUG_ONLY(_frame_index = -1;)
setup(pc);
}
inline void frame::setup(address pc) {
adjust_unextended_sp();
address original_pc = get_deopt_original_pc();
if (original_pc != nullptr) {
_pc = original_pc;
assert(_cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
_deopt_state = is_deoptimized;
assert(_cb == nullptr || _cb->as_nmethod()->insts_contains_inclusive(_pc),
"original PC must be in the main code section of the compiled method (or must be immediately following it)");
} else {
_deopt_state = not_deoptimized;
if (_cb == SharedRuntime::deopt_blob()) {
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
}
}
_on_heap = false;
_oop_map = nullptr;
}
inline frame::frame(intptr_t* sp, intptr_t* fp, address pc) {
@ -85,6 +143,22 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
init(sp, sp, fp, pc);
}
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, bool allow_cb_null) {
intptr_t a = intptr_t(sp);
intptr_t b = intptr_t(fp);
_sp = sp;
_unextended_sp = unextended_sp;
_fp = fp;
_pc = pc;
assert(pc != nullptr, "no pc?");
_cb = cb;
_oop_map = nullptr;
assert(_cb != nullptr || allow_cb_null, "pc: " INTPTR_FORMAT, p2i(pc));
_on_heap = false;
DEBUG_ONLY(_frame_index = -1;)
setup(pc);
}
// Accessors

View File

@ -41,25 +41,8 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
if (UseSIGTRAP) {
DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); )
} else {
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ bind(_entry);
// Using pc relative address computation.
{
Label next_pc;
__ bl(next_pc);
__ bind(next_pc);
}
int current_offset = __ offset();
__ mflr(R12);
__ add_const_optimized(R12, R12, safepoint_offset() - current_offset);
__ std(R12, in_bytes(JavaThread::saved_exception_pc_offset()), R16_thread);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ bctr();
__ jump_to_polling_page_return_handler_blob(safepoint_offset());
}
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, SAP SE. All rights reserved.
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,26 +34,8 @@ int C2SafepointPollStub::max_size() const {
}
void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ bind(entry());
// Using pc relative address computation.
{
Label next_pc;
__ bl(next_pc);
__ bind(next_pc);
}
int current_offset = __ offset();
// Code size should not depend on offset: see _stub_size computation in output.cpp
__ load_const32(R12, _safepoint_offset - current_offset);
__ mflr(R0);
__ add(R12, R12, R0);
__ std(R12, in_bytes(JavaThread::saved_exception_pc_offset()), R16_thread);
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
__ mtctr(R0);
__ bctr();
__ jump_to_polling_page_return_handler_blob(_safepoint_offset, true);
}
#undef __

View File

@ -363,7 +363,7 @@
inline frame(intptr_t* sp, intptr_t* fp, address pc);
inline frame(intptr_t* sp, address pc, kind knd = kind::nmethod);
inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp, intptr_t* fp = nullptr, CodeBlob* cb = nullptr);
inline frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map);
inline frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map = nullptr);
inline frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map, bool on_heap);
private:

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -391,4 +391,43 @@ void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr)
// Nothing to do.
}
#if INCLUDE_JFR
// Static helper routines
inline intptr_t* frame::sender_sp(intptr_t* fp) { return fp; }
// Extract common_abi parts.
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(((common_abi*)sp)->callers_sp);
}
inline intptr_t* frame::link(const intptr_t* fp) { return frame::fp(fp); }
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(((common_abi*)sp)->lr);
}
inline address frame::interpreter_return_address(const intptr_t* fp) { return frame::return_address(fp); }
// Extract java interpreter state parts.
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(*(fp + ijava_idx(bcp)));
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(*(fp + ijava_idx(sender_sp)));
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp - ((frame::ijava_state_size + frame::top_ijava_frame_abi_size) >> LogBytesPerWord);
}
#endif // INCLUDE_JFR
#endif // CPU_PPC_FRAME_PPC_INLINE_HPP

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -170,7 +170,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
void remove_activation(TosState state,
bool throw_monitor_exception = true,
bool install_monitor_exception = true);
void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
JFR_ONLY(void enter_jfr_critical_section();)
JFR_ONLY(void leave_jfr_critical_section();)
void load_fp(Register fp);
void remove_top_frame_given_fp(Register fp, Register sender_sp, Register sender_fp, Register return_pc, Register temp);
void merge_frames(Register sender_sp, Register return_pc, Register temp1, Register temp2); // merge top frames
void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);

View File

@ -783,19 +783,27 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
}
// Support function for remove_activation & Co.
void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc,
Register Rscratch1, Register Rscratch2) {
// Pop interpreter frame.
ld(Rscratch1, 0, R1_SP); // *SP
ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp
ld(Rscratch2, 0, Rscratch1); // **SP
if (return_pc!=noreg) {
ld(return_pc, _abi0(lr), Rscratch1); // LR
}
void InterpreterMacroAssembler::load_fp(Register fp) {
ld(fp, _abi0(callers_sp), R1_SP); // *SP
}
// Merge top frames.
subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP
stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP
void InterpreterMacroAssembler::remove_top_frame_given_fp(Register fp, Register sender_sp, Register sender_fp,
Register return_pc, Register temp) {
assert_different_registers(sender_sp, sender_fp, return_pc, temp);
ld(sender_sp, _ijava_state_neg(sender_sp), fp);
ld(sender_fp, _abi0(callers_sp), fp); // **SP
if (return_pc != noreg) {
ld(return_pc, _abi0(lr), fp); // last usage of fp, register can be reused
}
subf(temp, R1_SP, sender_sp); // sender_sp - SP
stdux(sender_fp, R1_SP, temp); // atomically set *(SP = sender_sp) = sender_fp
}
void InterpreterMacroAssembler::merge_frames(Register sender_sp, Register return_pc,
Register temp1, Register temp2) {
Register fp = temp1, sender_fp = temp2;
load_fp(fp);
remove_top_frame_given_fp(fp, sender_sp, sender_fp, return_pc, /* temp */ fp);
}
void InterpreterMacroAssembler::narrow(Register result) {
@ -854,11 +862,16 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
bool install_monitor_exception) {
BLOCK_COMMENT("remove_activation {");
unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
Label slow_path, fast_path;
Register fp = R22_tmp2;
load_fp(fp);
JFR_ONLY(enter_jfr_critical_section();)
safepoint_poll(slow_path, R11_scratch1, true /* at_return */, false /* in_nmethod */);
b(fast_path);
bind(slow_path);
@ -870,8 +883,6 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
align(32);
bind(fast_path);
unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
// Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
notify_method_exit(false, state, NotifyJVMTI, true);
@ -891,10 +902,11 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
// call could have a smaller SP, so that this compare succeeds for an
// inner call of the method annotated with ReservedStack.
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
ld_ptr(R11_scratch1, _abi0(callers_sp), R1_SP); // Load frame pointer.
cmpld(CR0, R11_scratch1, R0);
cmpld(CR0, fp, R0);
blt_predict_taken(CR0, no_reserved_zone_enabling);
JFR_ONLY(leave_jfr_critical_section();)
// Enable reserved zone again, throw stack overflow exception.
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError));
@ -906,12 +918,26 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
verify_oop(R17_tos, state);
merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
remove_top_frame_given_fp(fp, R21_sender_SP, R23_tmp3, /*return_pc*/ R0, R11_scratch1);
mtlr(R0);
pop_cont_fastpath();
JFR_ONLY(leave_jfr_critical_section();)
BLOCK_COMMENT("} remove_activation");
}
#if INCLUDE_JFR
void InterpreterMacroAssembler::enter_jfr_critical_section() {
li(R0, 1);
stb(R0, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR), R16_thread);
}
void InterpreterMacroAssembler::leave_jfr_critical_section() {
li(R0, 0);
stb(R0, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR), R16_thread);
}
#endif // INCLUDE_JFR
// Lock object
//
// Registers alive

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,6 +73,8 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t* last_Java_fp() const { return *(intptr_t**)_last_Java_sp; }
void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
#endif // CPU_PPC_JAVAFRAMEANCHOR_PPC_HPP

View File

@ -3277,6 +3277,35 @@ void MacroAssembler::safepoint_poll(Label& slow_path, Register temp, bool at_ret
}
}
void MacroAssembler::jump_to_polling_page_return_handler_blob(int safepoint_offset, bool fixed_size) {
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
// Determine saved exception pc using pc relative address computation.
{
Label next_pc;
bl(next_pc);
bind(next_pc);
}
int current_offset = offset();
if (fixed_size) {
// Code size must not depend on offsets.
load_const32(R12, safepoint_offset - current_offset);
mflr(R0);
add(R12, R12, R0);
} else {
mflr(R12);
add_const_optimized(R12, R12, safepoint_offset - current_offset);
}
std(R12, in_bytes(JavaThread::saved_exception_pc_offset()), R16_thread);
add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
mtctr(R0);
bctr();
}
void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();

View File

@ -729,6 +729,7 @@ class MacroAssembler: public Assembler {
// Check if safepoint requested and if so branch
void safepoint_poll(Label& slow_path, Register temp, bool at_return, bool in_nmethod);
void jump_to_polling_page_return_handler_blob(int safepoint_offset, bool fixed_size = false);
void resolve_jobject(Register value, Register tmp1, Register tmp2,
MacroAssembler::PreservationLevel preservation_level);

View File

@ -2740,6 +2740,21 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ li(r_temp_2, 0);
__ stw(r_temp_2, in_bytes(JNIHandleBlock::top_offset()), r_temp_1);
// Prepare for return
// --------------------------------------------------------------------------
__ pop_frame();
__ restore_LR(R11);
#if INCLUDE_JFR
// We need to do a poll test after unwind in case the sampler
// managed to sample the native frame after returning to Java.
Label L_stub;
int safepoint_offset = __ offset();
if (!UseSIGTRAP) {
__ relocate(relocInfo::poll_return_type);
}
__ safepoint_poll(L_stub, r_temp_2, true /* at_return */, true /* in_nmethod: frame already popped */);
#endif // INCLUDE_JFR
// Check for pending exceptions.
// --------------------------------------------------------------------------
@ -2747,13 +2762,16 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ cmpdi(CR0, r_temp_2, 0);
__ bne(CR0, handle_pending_exception);
// Return
// --------------------------------------------------------------------------
__ pop_frame();
__ restore_LR(R11);
// Return.
__ blr();
// Handler for return safepoint (out-of-line).
#if INCLUDE_JFR
if (!UseSIGTRAP) {
__ bind(L_stub);
__ jump_to_polling_page_return_handler_blob(safepoint_offset);
}
#endif // INCLUDE_JFR
// Handler for pending exceptions (out-of-line).
// --------------------------------------------------------------------------
@ -2761,9 +2779,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// is the empty function. We just pop this frame and then jump to
// forward_exception_entry.
__ bind(handle_pending_exception);
__ pop_frame();
__ restore_LR(R11);
__ b64_patchable((address)StubRoutines::forward_exception_entry(),
relocInfo::runtime_call_type);

View File

@ -1584,6 +1584,24 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ st_ptr(R0, JavaThread::pending_jni_exception_check_fn_offset(), R16_thread);
}
#if INCLUDE_JFR
__ enter_jfr_critical_section();
// This poll test is to uphold the invariant that a JFR sampled frame
// must not return to its caller without a prior safepoint poll check.
// The earlier poll check in this routine is insufficient for this purpose
// because the thread has transitioned back to Java.
Label slow_path, fast_path;
__ safepoint_poll(slow_path, R11_scratch1, true /* at_return */, false /* in_nmethod */);
__ b(fast_path);
__ bind(slow_path);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), R16_thread);
__ align(32);
__ bind(fast_path);
#endif // INCLUDE_JFR
__ reset_last_Java_frame();
// Jvmdi/jvmpi support. Whether we've got an exception pending or
@ -1625,11 +1643,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ lfd(F1_RET, _ijava_state_neg(fresult), R11_scratch1);
__ call_stub(result_handler_addr);
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R12_scratch2, R11_scratch1, R0);
JFR_ONLY(__ leave_jfr_critical_section();)
// Must use the return pc which was loaded from the caller's frame
// as the VM uses return-pc-patching for deoptimization.
__ mtlr(R0);
__ mtlr(R12_scratch2);
__ blr();
//-----------------------------------------------------------------------------

View File

@ -1728,16 +1728,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register osr_nmethod = R31;
__ mr(osr_nmethod, R3_RET);
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
JFR_ONLY(__ enter_jfr_critical_section();)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
__ reset_last_Java_frame();
// OSR buffer is in ARG1.
// Remove the interpreter frame.
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R12_scratch2, R11_scratch1, R0);
JFR_ONLY(__ leave_jfr_critical_section();)
// Jump to the osr code.
__ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
__ mtlr(R0);
__ mtlr(R12_scratch2);
__ mtctr(R11_scratch1);
__ bctr();

View File

@ -670,7 +670,6 @@ void JavaFrameAnchor::make_walkable() {
// already walkable?
if (walkable()) { return; }
vmassert(last_Java_sp() != nullptr, "not called from Java code?");
vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong");
}

View File

@ -35,6 +35,53 @@
// Inline functions for RISCV frames:
#if INCLUDE_JFR
// Static helper routines
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
}
inline address frame::interpreter_return_address(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::return_addr_offset]);
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp + frame::interpreter_frame_initial_sp_offset;
}
inline intptr_t* frame::sender_sp(intptr_t* fp) {
assert(fp != nullptr, "invariant");
return fp + frame::sender_sp_offset;
}
inline intptr_t* frame::link(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
}
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(sp[-1]);
}
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(sp[-2]);
}
#endif // INCLUDE_JFR
// Constructors:
inline frame::frame() {

View File

@ -497,9 +497,10 @@ void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
// remove activation
//
// Apply stack watermark barrier.
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from synchronized blocks.
// Apply stack watermark barrier.
// Notify JVMTI.
// Remove the activation from the stack.
//
// If there are locked Java monitors
@ -509,32 +510,14 @@ void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
// installs IllegalMonitorStateException
// Else
// no error processing
void InterpreterMacroAssembler::remove_activation(
TosState state,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
void InterpreterMacroAssembler::remove_activation(TosState state,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
// Note: Registers x13 may be in use for the
// result check if synchronized method
Label unlocked, unlock, no_unlock;
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
j(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(esp, fp, (address)pc(), t0);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// get the value of _do_not_unlock_if_synchronized into x13
const Address do_not_unlock_if_synchronized(xthread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
@ -655,10 +638,27 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_unlock);
// jvmti support
if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
JFR_ONLY(enter_jfr_critical_section();)
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
j(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(esp, fp, pc(), t0);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// JVMTI support. Make sure the safepoint poll test is issued prior.
if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
} else {
notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
}
@ -677,9 +677,13 @@ void InterpreterMacroAssembler::remove_activation(
subw(t0, t0, StackOverflow::stack_guard_enabled);
beqz(t0, no_reserved_zone_enabling);
// look for an overflow into the stack reserved zone, i.e.
// interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
ld(t0, Address(xthread, JavaThread::reserved_stack_activation_offset()));
ble(t1, t0, no_reserved_zone_enabling);
JFR_ONLY(leave_jfr_critical_section();)
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), xthread);
call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -689,11 +693,14 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_reserved_zone_enabling);
}
// remove frame anchor
leave();
JFR_ONLY(leave_jfr_critical_section();)
// restore sender esp
mv(esp, t1);
// remove frame anchor
leave();
// If we're returning to interpreted code we will shortly be
// adjusting SP to allow some space for ESP. If we're returning to
// compiled code the saved sender SP was saved in sender_sp, so this
@ -701,6 +708,19 @@ void InterpreterMacroAssembler::remove_activation(
andi(sp, esp, -16);
}
#if INCLUDE_JFR
void InterpreterMacroAssembler::enter_jfr_critical_section() {
const Address sampling_critical_section(xthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
mv(t0, true);
sb(t0, sampling_critical_section);
}
void InterpreterMacroAssembler::leave_jfr_critical_section() {
const Address sampling_critical_section(xthread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
sb(zr, sampling_critical_section);
}
#endif // INCLUDE_JFR
// Lock object
//
// Args:
@ -1515,7 +1535,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments) {
// interpreter specific
//
// Note: No need to save/restore rbcp & rlocals pointer since these
// Note: No need to save/restore xbcp & xlocals pointer since these
// are callee saved registers and no blocking/ GC can happen
// in leaf calls.
#ifdef ASSERT

View File

@ -290,6 +290,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
JFR_ONLY(void enter_jfr_critical_section();)
JFR_ONLY(void leave_jfr_critical_section();)
virtual void _call_Unimplemented(address call_site) {
save_bcp();
set_last_Java_frame(esp, fp, (address) pc(), t0);

View File

@ -3739,16 +3739,16 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
bind(L_failure);
}
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod) {
ld(t0, Address(xthread, JavaThread::polling_word_offset()));
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg) {
ld(tmp_reg, Address(xthread, JavaThread::polling_word_offset()));
if (acquire) {
membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
}
if (at_return) {
bgtu(in_nmethod ? sp : fp, t0, slow_path, /* is_far */ true);
bgtu(in_nmethod ? sp : fp, tmp_reg, slow_path, /* is_far */ true);
} else {
test_bit(t0, t0, exact_log2(SafepointMechanism::poll_bit()));
bnez(t0, slow_path, true /* is_far */);
test_bit(tmp_reg, tmp_reg, exact_log2(SafepointMechanism::poll_bit()));
bnez(tmp_reg, slow_path, /* is_far */ true);
}
}

View File

@ -44,7 +44,7 @@ class MacroAssembler: public Assembler {
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg = t0);
// Alignment
int align(int modulus, int extra_offset = 0);

View File

@ -1894,6 +1894,24 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
#if INCLUDE_JFR
// We need to do a poll test after unwind in case the sampler
// managed to sample the native frame after returning to Java.
Label L_return;
__ ld(t0, Address(xthread, JavaThread::polling_word_offset()));
address poll_test_pc = __ pc();
__ relocate(relocInfo::poll_return_type);
__ test_bit(t0, t0, log2i_exact(SafepointMechanism::poll_bit()));
__ beqz(t0, L_return);
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ la(t0, InternalAddress(poll_test_pc));
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
__ far_jump(RuntimeAddress(stub));
__ bind(L_return);
#endif // INCLUDE_JFR
// Any exception pending?
Label exception_pending;
__ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));

View File

@ -1372,6 +1372,31 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ bind(L);
}
#if INCLUDE_JFR
__ enter_jfr_critical_section();
// This poll test is to uphold the invariant that a JFR sampled frame
// must not return to its caller without a prior safepoint poll check.
// The earlier poll check in this routine is insufficient for this purpose
// because the thread has transitioned back to Java.
Label slow_path;
Label fast_path;
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
__ j(fast_path);
__ bind(slow_path);
__ push(dtos);
__ push(ltos);
__ set_last_Java_frame(esp, fp, __ pc(), t0);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), xthread);
__ reset_last_Java_frame(true);
__ pop(ltos);
__ pop(dtos);
__ bind(fast_path);
#endif // INCLUDE_JFR
// jvmti support
// Note: This must happen _after_ handling/throwing any exceptions since
// the exception handler code notifies the runtime of method exits
@ -1385,10 +1410,13 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ jalr(result_handler);
// remove activation
__ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
// get sender sp
__ ld(esp, Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
// remove frame anchor
__ leave();
JFR_ONLY(__ leave_jfr_critical_section();)
// restore sender sp
__ mv(sp, esp);

View File

@ -1757,6 +1757,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ mv(x9, x10); // save the nmethod
JFR_ONLY(__ enter_jfr_critical_section();)
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
// x10 is OSR buffer, move it to expected parameter location
@ -1765,9 +1767,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// remove activation
// get sender esp
__ ld(esp,
Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
Address(fp, frame::interpreter_frame_sender_sp_offset * wordSize));
// remove frame anchor
__ leave();
JFR_ONLY(__ leave_jfr_critical_section();)
// Ensure compiled code always sees stack at proper alignment
__ andi(sp, esp, -16);

View File

@ -475,6 +475,7 @@
public:
// To be used, if sp was not extended to match callee's calling convention.
inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp = nullptr, intptr_t* fp = nullptr, CodeBlob* cb = nullptr);
inline frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map = nullptr);
// Access frame via stack pointer.
inline intptr_t* sp_addr_at(int index) const { return &sp()[index]; }

View File

@ -87,6 +87,11 @@ inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp, intptr_t*
inline frame::frame(intptr_t* sp) : frame(sp, nullptr) {}
inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address pc, CodeBlob* cb, const ImmutableOopMap* oop_map)
:_sp(sp), _pc(pc), _cb(cb), _oop_map(oop_map), _on_heap(false), DEBUG_ONLY(_frame_index(-1) COMMA) _unextended_sp(unextended_sp), _fp(fp) {
setup();
}
// Generic constructor. Used by pns() in debug.cpp only
#ifndef PRODUCT
inline frame::frame(void* sp, void* pc, void* unextended_sp)
@ -371,4 +376,42 @@ void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr)
Unimplemented();
}
#if INCLUDE_JFR
// Static helper routines
inline intptr_t* frame::sender_sp(intptr_t* fp) { return fp; }
// Extract common_abi parts.
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(((z_common_abi*)sp)->callers_sp);
}
inline intptr_t* frame::link(const intptr_t* fp) { return frame::fp(fp); }
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(((z_common_abi*)sp)->return_pc);
}
inline address frame::interpreter_return_address(const intptr_t* fp) { return frame::return_address(fp); }
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(*(fp + _z_ijava_idx(bcp)));
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(*(fp + _z_ijava_idx(sender_sp)));
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp - ((frame::z_ijava_state_size + frame::z_top_ijava_frame_abi_size) >> LogBytesPerWord);
}
#endif // INCLUDE_JFR
#endif // CPU_S390_FRAME_S390_INLINE_HPP

View File

@ -701,7 +701,6 @@ void JavaFrameAnchor::make_walkable() {
if (last_Java_sp() == nullptr) return;
// already walkable?
if (walkable()) return;
vmassert(last_Java_pc() == nullptr, "already walkable");
_last_Java_pc = (address)_last_Java_sp[-1];
vmassert(walkable(), "something went wrong");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,6 +35,53 @@
// Inline functions for Intel frames:
#if INCLUDE_JFR
// Static helper routines
inline address frame::interpreter_bcp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::interpreter_frame_bcp_offset]);
}
inline address frame::interpreter_return_address(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<address>(fp[frame::return_addr_offset]);
}
inline intptr_t* frame::interpreter_sender_sp(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::interpreter_frame_sender_sp_offset]);
}
inline bool frame::is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp) {
assert(fp != nullptr, "invariant");
assert(sp != nullptr, "invariant");
return sp <= fp + frame::interpreter_frame_initial_sp_offset;
}
inline intptr_t* frame::sender_sp(intptr_t* fp) {
assert(fp != nullptr, "invariant");
return fp + frame::sender_sp_offset;
}
inline intptr_t* frame::link(const intptr_t* fp) {
assert(fp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(fp[frame::link_offset]);
}
inline address frame::return_address(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<address>(sp[-1]);
}
inline intptr_t* frame::fp(const intptr_t* sp) {
assert(sp != nullptr, "invariant");
return reinterpret_cast<intptr_t*>(sp[-2]);
}
#endif // INCLUDE_JFR
// Constructors:
inline frame::frame() {

View File

@ -778,9 +778,10 @@ void InterpreterMacroAssembler::narrow(Register result) {
// remove activation
//
// Apply stack watermark barrier.
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from synchronized blocks.
// Apply stack watermark barrier.
// Notify JVMTI.
// Remove the activation from the stack.
//
// If there are locked Java monitors
@ -790,12 +791,11 @@ void InterpreterMacroAssembler::narrow(Register result) {
// installs IllegalMonitorStateException
// Else
// no error processing
void InterpreterMacroAssembler::remove_activation(
TosState state,
Register ret_addr,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
void InterpreterMacroAssembler::remove_activation(TosState state,
Register ret_addr,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
// Note: Registers rdx xmm0 may be in use for the
// result check if synchronized method
Label unlocked, unlock, no_unlock;
@ -804,21 +804,6 @@ void InterpreterMacroAssembler::remove_activation(
const Register robj = c_rarg1;
const Register rmon = c_rarg1;
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
jmp(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// get the value of _do_not_unlock_if_synchronized into rdx
const Address do_not_unlock_if_synchronized(rthread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
@ -940,7 +925,24 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_unlock);
// jvmti support
JFR_ONLY(enter_jfr_critical_section();)
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
// that would normally not be safe to use. Such bad returns into unsafe territory of
// the stack, will call InterpreterRuntime::at_unwind.
Label slow_path;
Label fast_path;
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
jmp(fast_path);
bind(slow_path);
push(state);
set_last_Java_frame(noreg, rbp, (address)pc(), rscratch1);
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
reset_last_Java_frame(true);
pop(state);
bind(fast_path);
// JVMTI support. Make sure the safepoint poll test is issued prior.
if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
} else {
@ -964,6 +966,8 @@ void InterpreterMacroAssembler::remove_activation(
cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
jcc(Assembler::lessEqual, no_reserved_zone_enabling);
JFR_ONLY(leave_jfr_critical_section();)
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
call_VM(noreg, CAST_FROM_FN_PTR(address,
@ -972,12 +976,29 @@ void InterpreterMacroAssembler::remove_activation(
bind(no_reserved_zone_enabling);
}
leave(); // remove frame anchor
JFR_ONLY(leave_jfr_critical_section();)
pop(ret_addr); // get return address
mov(rsp, rbx); // set sp to sender sp
pop_cont_fastpath();
}
#if INCLUDE_JFR
void InterpreterMacroAssembler::enter_jfr_critical_section() {
const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
movbool(sampling_critical_section, true);
}
void InterpreterMacroAssembler::leave_jfr_critical_section() {
const Address sampling_critical_section(r15_thread, in_bytes(SAMPLING_CRITICAL_SECTION_OFFSET_JFR));
movbool(sampling_critical_section, false);
}
#endif // INCLUDE_JFR
void InterpreterMacroAssembler::get_method_counters(Register method,
Register mcs, Label& skip) {
Label has_counters;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -262,6 +262,9 @@ class InterpreterMacroAssembler: public MacroAssembler {
void notify_method_entry();
void notify_method_exit(TosState state, NotifyMethodExitMode mode);
JFR_ONLY(void enter_jfr_critical_section();)
JFR_ONLY(void leave_jfr_critical_section();)
private:
Register _locals_register; // register that contains the pointer to the locals

View File

@ -2425,6 +2425,23 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
#if INCLUDE_JFR
// We need to do a poll test after unwind in case the sampler
// managed to sample the native frame after returning to Java.
Label L_return;
address poll_test_pc = __ pc();
__ relocate(relocInfo::poll_return_type);
__ testb(Address(r15_thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
__ jccb(Assembler::zero, L_return);
__ lea(rscratch1, InternalAddress(poll_test_pc));
__ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ jump(RuntimeAddress(stub));
__ bind(L_return);
#endif // INCLUDE_JFR
// Any exception pending?
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);

View File

@ -1147,6 +1147,30 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ bind(L);
}
#if INCLUDE_JFR
__ enter_jfr_critical_section();
// This poll test is to uphold the invariant that a JFR sampled frame
// must not return to its caller without a prior safepoint poll check.
// The earlier poll check in this routine is insufficient for this purpose
// because the thread has transitioned back to Java.
Label slow_path;
Label fast_path;
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
__ jmp(fast_path);
__ bind(slow_path);
__ push(dtos);
__ push(ltos);
__ set_last_Java_frame(noreg, rbp, (address)__ pc(), rscratch1);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), r15_thread);
__ reset_last_Java_frame(true);
__ pop(ltos);
__ pop(dtos);
__ bind(fast_path);
#endif // INCLUDE_JFR
// jvmti support
// Note: This must happen _after_ handling/throwing any exceptions since
// the exception handler code notifies the runtime of method exits
@ -1169,8 +1193,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
frame::interpreter_frame_sender_sp_offset *
wordSize)); // get sender sp
__ leave(); // remove frame anchor
JFR_ONLY(__ leave_jfr_critical_section();)
__ pop(rdi); // get return address
__ mov(rsp, t); // set sp to sender sp
__ jmp(rdi);
if (inc_counter) {

View File

@ -1825,6 +1825,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// it will be preserved in rbx.
__ mov(rbx, rax);
JFR_ONLY(__ enter_jfr_critical_section();)
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
// rax is OSR buffer, move it to expected parameter location
@ -1839,14 +1841,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// pop the interpreter frame
__ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
__ leave(); // remove frame anchor
JFR_ONLY(__ leave_jfr_critical_section();)
__ pop(retaddr); // get return address
__ mov(rsp, sender_sp); // set sp to sender sp
__ mov(rsp, sender_sp); // set sp to sender sp
// Ensure compiled code always sees stack at proper alignment
__ andptr(rsp, -(StackAlignmentInBytes));
// unlike x86 we need no specialized return from compiled code
// to the interpreter or the call stub.
// push the return address
__ push(retaddr);

View File

@ -23,6 +23,7 @@
*/
#include "classfile/classLoader.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "jvmtifiles/jvmti.h"
#include "logging/log.hpp"
@ -1330,6 +1331,15 @@ int os::Posix::clock_tics_per_second() {
return clock_tics_per_sec;
}
#ifdef ASSERT
bool os::Posix::ucontext_is_interpreter(const ucontext_t* uc) {
assert(uc != nullptr, "invariant");
address pc = os::Posix::ucontext_get_pc(uc);
assert(pc != nullptr, "invariant");
return Interpreter::contains(pc);
}
#endif
// Utility to convert the given timeout to an absolute timespec
// (based on the appropriate clock) to use with pthread_cond_timewait,
// and sem_timedwait().

View File

@ -89,6 +89,8 @@ public:
static address ucontext_get_pc(const ucontext_t* ctx);
static void ucontext_set_pc(ucontext_t* ctx, address pc);
DEBUG_ONLY(static bool ucontext_is_interpreter(const ucontext_t* ctx);)
static void to_RTC_abstime(timespec* abstime, int64_t millis);
// clock ticks per second of the system

View File

@ -5840,61 +5840,34 @@ ssize_t os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
return ::send(fd, buf, (int)nBytes, flags);
}
// returns true if thread could be suspended,
// false otherwise
static bool do_suspend(HANDLE* h) {
if (h != nullptr) {
if (SuspendThread(*h) != ~0) {
return true;
}
}
return false;
}
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
#if defined(AMD64) || defined(_M_ARM64)
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
#endif
// resume the thread
// calling resume on an active thread is a no-op
static void do_resume(HANDLE* h) {
if (h != nullptr) {
ResumeThread(*h);
}
}
// retrieve a suspend/resume context capable handle
// from the tid. Caller validates handle return value.
void get_thread_handle_for_extended_context(HANDLE* h,
DWORD tid) {
if (h != nullptr) {
*h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
}
// Retrieve a suspend/resume context capable handle for the tid.
// Caller validates handle return value.
static inline HANDLE get_thread_handle_for_extended_context(DWORD tid) {
return OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
}
// Thread sampling implementation
//
void SuspendedThreadTask::internal_do_task() {
CONTEXT ctxt;
HANDLE h = nullptr;
// get context capable handle for thread
get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
// sanity
if (h == nullptr || h == INVALID_HANDLE_VALUE) {
const HANDLE h = get_thread_handle_for_extended_context(_thread->osthread()->thread_id());
if (h == nullptr) {
return;
}
// suspend the thread
if (do_suspend(&h)) {
ctxt.ContextFlags = (CONTEXT_FULL | CONTEXT_FLOATING_POINT);
// get thread context
GetThreadContext(h, &ctxt);
SuspendedThreadTaskContext context(_thread, &ctxt);
// pass context to Thread Sampling impl
do_task(context);
// resume thread
do_resume(&h);
CONTEXT ctxt;
ctxt.ContextFlags = sampling_context_flags;
if (SuspendThread(h) != OS_ERR) {
if (GetThreadContext(h, &ctxt)) {
const SuspendedThreadTaskContext context(_thread, &ctxt);
// Pass context to Thread Sampling implementation.
do_task(context);
}
ResumeThread(h);
}
// close handle
CloseHandle(h);
}

View File

@ -138,6 +138,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, lr, frame::kind::unknown);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->uc_mcontext.jmp_context.gpr[14]); // R14_bcp
}
frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == (intptr_t) nullptr) {
// fr is the last C frame

View File

@ -98,6 +98,8 @@
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
#define REG_BCP context_x[22]
address os::current_stack_pointer() {
#if defined(__clang__) || defined(__llvm__)
void *sp;
@ -179,6 +181,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, fp, pc);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->REG_BCP);
}
// JVM compiled with -fno-omit-frame-pointer, so RFP is saved on the stack.
frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());

View File

@ -89,6 +89,7 @@
#ifdef AMD64
#define SPELL_REG_SP "rsp"
#define SPELL_REG_FP "rbp"
#define REG_BCP context_r13
#else
#define SPELL_REG_SP "esp"
#define SPELL_REG_FP "ebp"
@ -349,6 +350,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(fr.sp() + 1, fr.fp(), (address)*(fr.sp()));
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->REG_BCP);
}
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
// turned off by -fomit-frame-pointer,
frame os::get_sender_for_C_frame(frame* fr) {

View File

@ -109,6 +109,11 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame();
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
ShouldNotCallThis();
return nullptr;
}
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {

View File

@ -75,6 +75,7 @@
#define REG_FP 29
#define REG_LR 30
#define REG_BCP 22
NOINLINE address os::current_stack_pointer() {
return (address)__builtin_frame_address(0);
@ -148,6 +149,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, fp, pc);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->uc_mcontext.regs[REG_BCP]);
}
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
// The "Procedure Call Standard for the Arm 64-bit Architecture" doesn't

View File

@ -208,6 +208,11 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, fp, pc);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
Unimplemented();
return nullptr;
}
frame os::get_sender_for_C_frame(frame* fr) {
#ifdef __thumb__
// We can't reliably get anything from a thumb C frame.

View File

@ -170,6 +170,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, lr, frame::kind::unknown);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->uc_mcontext.regs->gpr[14]); // R14_bcp
}
frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == 0) {
// fr is the last C frame

View File

@ -77,6 +77,7 @@
#define REG_LR 1
#define REG_FP 8
#define REG_BCP 22
NOINLINE address os::current_stack_pointer() {
return (address)__builtin_frame_address(0);
@ -157,6 +158,13 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame(frame_sp, frame_fp, epc);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->uc_mcontext.__gregs[REG_BCP]);
}
// By default, gcc always saves frame pointer rfp on this stack. This
// may get turned off by -fomit-frame-pointer.
frame os::get_sender_for_C_frame(frame* fr) {

View File

@ -155,6 +155,11 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp, lr);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
Unimplemented();
return nullptr;
}
frame os::get_sender_for_C_frame(frame* fr) {
if (*fr->sp() == 0) {
// fr is the last C frame.

View File

@ -80,6 +80,7 @@
#define REG_SP REG_RSP
#define REG_PC REG_RIP
#define REG_FP REG_RBP
#define REG_BCP REG_R13
#define SPELL_REG_SP "rsp"
#define SPELL_REG_FP "rbp"
#else
@ -157,6 +158,13 @@ frame os::fetch_compiled_frame_from_context(const void* ucVoid) {
return frame(sp + 1, fp, (address)*sp);
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const ucontext_t* uc = (const ucontext_t*)ucVoid;
assert(os::Posix::ucontext_is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->uc_mcontext.gregs[REG_BCP]);
}
// By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
// turned off by -fomit-frame-pointer,
frame os::get_sender_for_C_frame(frame* fr) {

View File

@ -211,6 +211,11 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
}
}
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
ShouldNotCallThis();
return nullptr;
}
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {

View File

@ -58,6 +58,8 @@
# include <stdio.h>
# include <intrin.h>
#define REG_BCP X22
void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, JavaThread* thread) {
f(value, method, args, thread);
}
@ -97,6 +99,22 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame(sp, fp, epc);
}
#ifdef ASSERT
static bool is_interpreter(const CONTEXT* uc) {
assert(uc != nullptr, "invariant");
address pc = reinterpret_cast<address>(uc->Pc);
assert(pc != nullptr, "invariant");
return Interpreter::contains(pc);
}
#endif
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
CONTEXT* uc = (CONTEXT*)ucVoid;
assert(is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->REG_BCP);
}
bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;

View File

@ -58,6 +58,7 @@
#define REG_SP Rsp
#define REG_FP Rbp
#define REG_PC Rip
#define REG_BCP R13
JNIEXPORT
extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
@ -320,6 +321,22 @@ frame os::fetch_frame_from_context(const void* ucVoid) {
return frame(sp, fp, epc);
}
#ifdef ASSERT
static bool is_interpreter(const CONTEXT* uc) {
assert(uc != nullptr, "invariant");
address pc = reinterpret_cast<address>(uc->REG_PC);
assert(pc != nullptr, "invariant");
return Interpreter::contains(pc);
}
#endif
intptr_t* os::fetch_bcp_from_context(const void* ucVoid) {
assert(ucVoid != nullptr, "invariant");
const CONTEXT* const uc = (CONTEXT*)ucVoid;
assert(is_interpreter(uc), "invariant");
return reinterpret_cast<intptr_t*>(uc->REG_BCP);
}
// Returns the current stack pointer. Accurate value needed for
// os::verify_stack_alignment().
address os::current_stack_pointer() {

View File

@ -75,6 +75,9 @@
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
#include "utilities/events.hpp"
#if INCLUDE_JFR
#include "jfr/jfr.inline.hpp"
#endif
// Helper class to access current interpreter state
class LastFrameAccessor : public StackObj {
@ -1167,6 +1170,7 @@ JRT_END
JRT_LEAF(void, InterpreterRuntime::at_unwind(JavaThread* current))
assert(current == JavaThread::current(), "pre-condition");
JFR_ONLY(Jfr::check_and_process_sample_request(current);)
// This function is called by the interpreter when the return poll found a reason
// to call the VM. The reason could be that we are returning into a not yet safe
// to access frame. We handle that below.

View File

@ -34,6 +34,7 @@
#include "jfr/support/jfrResolution.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/java.hpp"
#include "runtime/javaThread.hpp"
bool Jfr::is_enabled() {
return JfrRecorder::is_enabled();

View File

@ -25,7 +25,6 @@
#ifndef SHARE_JFR_JFR_HPP
#define SHARE_JFR_JFR_HPP
#include "jni.h"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/exceptions.hpp"
@ -36,6 +35,7 @@ class ciKlass;
class ciMethod;
class GraphBuilder;
class JavaThread;
struct JavaVMOption;
class Klass;
class outputStream;
class Parse;
@ -72,6 +72,8 @@ class Jfr : AllStatic {
static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
static void on_backpatching(const Method* callee_method, JavaThread* jt);
static void initialize_main_thread(JavaThread* jt);
static bool has_sample_request(JavaThread* jt);
static void check_and_process_sample_request(JavaThread* jt);
};
#endif // SHARE_JFR_JFR_HPP

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_JFR_INLINE_HPP
#define SHARE_JFR_JFR_INLINE_HPP
#include "jfr/jfr.hpp"
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "runtime/javaThread.hpp"
inline bool Jfr::has_sample_request(JavaThread* jt) {
assert(jt != nullptr, "invariant");
return jt->jfr_thread_local()->has_sample_request();
}
inline void Jfr::check_and_process_sample_request(JavaThread* jt) {
if (has_sample_request(jt)) {
JfrThreadSampling::process_sample_request(jt);
}
}
#endif // SHARE_JFR_JFR_INLINE_HPP

View File

@ -277,9 +277,9 @@ JVM_ENTRY_NO_ENV(void, jfr_set_method_sampling_period(JNIEnv* env, jclass jvm, j
assert(EventExecutionSample::eventId == typed_event_id || EventNativeMethodSample::eventId == typed_event_id, "invariant");
JfrEventSetting::set_enabled(typed_event_id, periodMillis > 0);
if (EventExecutionSample::eventId == type) {
JfrThreadSampling::set_java_sample_period(periodMillis);
JfrThreadSampler::set_java_sample_period(periodMillis);
} else {
JfrThreadSampling::set_native_sample_period(periodMillis);
JfrThreadSampler::set_native_sample_period(periodMillis);
}
JVM_END

View File

@ -334,10 +334,11 @@ void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrChe
// JfrStackTrace
writer.write(trace->id());
writer.write((u1)!trace->_reached_root);
writer.write(trace->_nr_of_frames);
const int number_of_frames = trace->number_of_frames();
writer.write<u4>(number_of_frames);
// JfrStackFrames
for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
const JfrStackFrame& frame = trace->_frames[i];
for (int i = 0; i < number_of_frames; ++i) {
const JfrStackFrame& frame = trace->_frames->at(i);
frame.write(writer);
add_to_leakp_set(frame._klass, frame._methodid);
}

View File

@ -710,6 +710,11 @@
<Field type="ulong" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
</Event>
<Event name="SafepointLatency" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Latency"
description="The delay for a thread to reach its next safepoint poll instruction after receiving an asynchronous sampling interrupt" thread="true" stackTrace="true" throttle="true">
<Field type="VMThreadState" name="threadState" label="VM Thread State" />
</Event>
<Event name="ExecuteVMOperation" category="Java Virtual Machine, Runtime" label="VM Operation" description="Execution of a VM Operation" thread="true">
<Field type="VMOperationType" name="operation" label="Operation" />
<Field type="boolean" name="safepoint" label="At Safepoint" description="If the operation occurred at a safepoint" />
@ -1295,6 +1300,10 @@
<Field type="string" name="action" label="Action" />
</Type>
<Type name="VMThreadState" label="JVM Thread State">
<Field type="string" name="state" label="State" />
</Type>
<Type name="Bytecode" label="Bytecode Instruction">
<Field type="string" name="bytecode" label="Instruction" />
</Type>

View File

@ -1,120 +0,0 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "code/debugInfoRec.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "jfr/periodic/sampling/jfrCallTrace.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "oops/method.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/registerMap.hpp"
bool JfrGetCallTrace::find_top_frame(frame& top_frame, Method** method, frame& first_frame) {
assert(top_frame.cb() != nullptr, "invariant");
RegisterMap map(_thread,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
RegisterMap::WalkContinuation::skip);
frame candidate = top_frame;
for (u4 i = 0; i < MAX_STACK_DEPTH * 2; ++i) {
if (candidate.is_entry_frame()) {
JavaCallWrapper *jcw = candidate.entry_frame_call_wrapper_if_safe(_thread);
if (jcw == nullptr || jcw->is_first_frame()) {
return false;
}
}
if (candidate.is_interpreted_frame()) {
JavaThreadState state = _thread->thread_state();
const bool known_valid = (state == _thread_in_native || state == _thread_in_vm || state == _thread_blocked);
if (known_valid || candidate.is_interpreted_frame_valid(_thread)) {
Method* im = candidate.interpreter_frame_method();
if (known_valid && !Method::is_valid_method(im)) {
return false;
}
*method = im;
first_frame = candidate;
return true;
}
}
if (candidate.cb()->is_nmethod()) {
// first check to make sure that we have a sane stack,
// the PC is actually inside the code part of the codeBlob,
// and we are past is_frame_complete_at (stack has been setup)
if (!candidate.safe_for_sender(_thread)) {
return false;
}
nmethod* nm = (nmethod*)candidate.cb();
*method = nm->method();
if (_in_java) {
PcDesc* pc_desc = nm->pc_desc_near(candidate.pc() + 1);
if (pc_desc == nullptr || pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) {
return false;
}
candidate.set_pc(pc_desc->real_pc(nm));
assert(nm->pc_desc_at(candidate.pc()) != nullptr, "invalid pc");
}
first_frame = candidate;
return true;
}
if (!candidate.safe_for_sender(_thread) ||
candidate.is_stub_frame() ||
candidate.cb()->frame_size() <= 0) {
return false;
}
candidate = candidate.sender(&map);
if (candidate.cb() == nullptr) {
return false;
}
}
return false;
}
bool JfrGetCallTrace::get_topframe(void* ucontext, frame& topframe) {
if (!_thread->pd_get_top_frame_for_profiling(&topframe, ucontext, _in_java)) {
return false;
}
if (topframe.cb() == nullptr) {
return false;
}
frame first_java_frame;
Method* method = nullptr;
if (find_top_frame(topframe, &method, first_java_frame)) {
if (method == nullptr) {
return false;
}
topframe = first_java_frame;
return true;
}
return false;
}

View File

@ -0,0 +1,305 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "asm/codeBuffer.hpp"
#include "interpreter/interpreter.hpp"
#include "jfr/periodic/sampling/jfrSampleRequest.hpp"
#include "runtime/continuationEntry.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/stubRoutines.hpp"
static inline bool is_entry_frame(address pc) {
return StubRoutines::returns_to_call_stub(pc);
}
static inline bool is_entry_frame(const JfrSampleRequest& request) {
return is_entry_frame(static_cast<address>(request._sample_pc));
}
static inline bool is_interpreter(address pc) {
return Interpreter::contains(pc);
}
static inline bool is_interpreter(const JfrSampleRequest& request) {
return is_interpreter(static_cast<address>(request._sample_pc));
}
static inline address interpreter_frame_bcp(const JfrSampleRequest& request) {
assert(is_interpreter(request), "invariant");
return frame::interpreter_bcp(static_cast<intptr_t*>(request._sample_bcp));
}
static inline bool in_stack(intptr_t* ptr, JavaThread* jt) {
assert(jt != nullptr, "invariant");
return jt->is_in_full_stack_checked(reinterpret_cast<address>(ptr));
}
static inline bool sp_in_stack(const JfrSampleRequest& request, JavaThread* jt) {
return in_stack(static_cast<intptr_t*>(request._sample_sp), jt);
}
static inline bool fp_in_stack(const JfrSampleRequest& request, JavaThread* jt) {
return in_stack(static_cast<intptr_t*>(request._sample_bcp), jt);
}
static inline void update_interpreter_frame_sender_pc(JfrSampleRequest& request, intptr_t* fp) {
request._sample_pc = frame::interpreter_return_address(fp);
}
static inline void update_interpreter_frame_pc(JfrSampleRequest& request, JavaThread* jt) {
assert(fp_in_stack(request, jt), "invariant");
assert(is_interpreter(request), "invariant");
request._sample_pc = frame::interpreter_return_address(static_cast<intptr_t*>(request._sample_bcp));
}
static inline address interpreter_frame_return_address(const JfrSampleRequest& request) {
assert(is_interpreter(request), "invariant");
return frame::interpreter_return_address(static_cast<intptr_t*>(request._sample_bcp));
}
static inline intptr_t* frame_sender_sp(const JfrSampleRequest& request, JavaThread* jt) {
assert(fp_in_stack(request, jt), "invariant");
return frame::sender_sp(static_cast<intptr_t*>(request._sample_bcp));
}
static inline void update_frame_sender_sp(JfrSampleRequest& request, JavaThread* jt) {
request._sample_sp = frame_sender_sp(request, jt);
}
static inline void update_frame_sender_sp(JfrSampleRequest& request, intptr_t* fp) {
request._sample_sp = frame::sender_sp(fp);
}
static inline intptr_t* frame_link(const JfrSampleRequest& request) {
return frame::link(static_cast<intptr_t*>(request._sample_bcp));
}
static inline void update_sp(JfrSampleRequest& request, int frame_size) {
assert(frame_size >= 0, "invariant");
request._sample_sp = static_cast<intptr_t*>(request._sample_sp) + frame_size;
}
static inline void update_pc(JfrSampleRequest& request) {
assert(request._sample_sp != nullptr, "invariant");
request._sample_pc = frame::return_address(static_cast<intptr_t*>(request._sample_sp));
}
static inline void update_fp(JfrSampleRequest& request) {
assert(request._sample_sp != nullptr, "invariant");
request._sample_bcp = is_interpreter(request) ? frame::fp(static_cast<intptr_t*>(request._sample_sp)) : nullptr;
}
// Less extensive sanity checks for an interpreter frame.
static bool is_valid_interpreter_frame(const JfrSampleRequest& request, JavaThread* jt) {
assert(sp_in_stack(request, jt), "invariant");
assert(fp_in_stack(request, jt), "invariant");
return frame::is_interpreter_frame_setup_at(static_cast<intptr_t*>(request._sample_bcp), request._sample_sp);
}
static inline bool is_continuation_frame(address pc) {
return ContinuationEntry::return_pc() == pc;
}
static inline bool is_continuation_frame(const JfrSampleRequest& request) {
return is_continuation_frame(static_cast<address>(request._sample_pc));
}
static intptr_t* sender_for_interpreter_frame(JfrSampleRequest& request, JavaThread* jt) {
update_interpreter_frame_pc(request, jt); // pick up return address
if (is_continuation_frame(request) || is_entry_frame(request)) {
request._sample_pc = nullptr;
return nullptr;
}
update_frame_sender_sp(request, jt);
intptr_t* fp = nullptr;
if (is_interpreter(request)) {
fp = frame_link(request);
}
request._sample_bcp = nullptr;
return fp;
}
static bool build(JfrSampleRequest& request, intptr_t* fp, JavaThread* jt);
static bool build_for_interpreter(JfrSampleRequest& request, JavaThread* jt) {
assert(is_interpreter(request), "invariant");
assert(jt != nullptr, "invariant");
if (!fp_in_stack(request, jt)) {
return false;
}
if (is_valid_interpreter_frame(request, jt)) {
// Set fp as sp for interpreter frames.
request._sample_sp = request._sample_bcp;
// Get real bcp.
void* const bcp = interpreter_frame_bcp(request);
// Setting bcp = 1 marks the sample request to represent a native method.
request._sample_bcp = bcp != nullptr ? bcp : reinterpret_cast<address>(1);
return true;
}
intptr_t* fp = sender_for_interpreter_frame(request, jt);
if (request._sample_pc == nullptr || request._sample_sp == nullptr) {
return false;
}
return build(request, fp, jt);
}
// Attempt to build a Jfr sample request.
static bool build(JfrSampleRequest& request, intptr_t* fp, JavaThread* jt) {
assert(request._sample_sp != nullptr, "invariant");
assert(request._sample_pc != nullptr, "invariant");
assert(jt != nullptr, "invariant");
assert(jt->thread_state() == _thread_in_Java, "invariant");
// 1. Interpreter frame?
if (is_interpreter(request)) {
request._sample_bcp = fp;
return build_for_interpreter(request, jt);
}
const CodeBlob* const cb = CodeCache::find_blob(request._sample_pc);
if (cb != nullptr) {
// 2. Is nmethod?
return cb->is_nmethod();
// 3. What kind of CodeBlob or Stub?
// Longer plan is to make stubs and blobs parsable,
// and we will have a list of cases here for each blob type
// describing how to locate the sender. We can't get to the
// sender of a blob or stub until they have a standardized
// layout and proper metadata descriptions.
}
return false;
}
static bool build_from_ljf(JfrSampleRequest& request,
const JfrThreadLocal* tl,
JavaThread* jt) {
assert(tl != nullptr, "invariant");
assert(jt != nullptr, "invariant");
assert(jt->jfr_thread_local() == tl, "invariant");
assert(sp_in_stack(request, jt), "invariant");
// Last Java frame is available, but might not be walkable, fix it.
address last_pc = jt->last_Java_pc();
if (last_pc == nullptr) {
last_pc = frame::return_address(static_cast<intptr_t*>(request._sample_sp));
if (last_pc == nullptr) {
return false;
}
}
assert(last_pc != nullptr, "invariant");
if (is_interpreter(last_pc)) {
if (tl->in_sampling_critical_section()) {
return false;
}
request._sample_pc = last_pc;
request._sample_bcp = jt->frame_anchor()->last_Java_fp();
return build_for_interpreter(request, jt);
}
request._sample_pc = last_pc;
return build(request, nullptr, jt);
}
static bool build_from_context(JfrSampleRequest& request,
const void* ucontext,
const JfrThreadLocal* tl,
JavaThread* jt) {
assert(ucontext != nullptr, "invariant");
assert(tl != nullptr, "invariant");
assert(jt != nullptr, "invariant");
assert(jt->jfr_thread_local() == tl, "invariant");
assert(!jt->has_last_Java_frame(), "invariant");
intptr_t* fp;
request._sample_pc = os::fetch_frame_from_context(ucontext, reinterpret_cast<intptr_t**>(&request._sample_sp), &fp);
assert(sp_in_stack(request, jt), "invariant");
if (is_interpreter(request)) {
if (tl->in_sampling_critical_section() || !in_stack(fp, jt)) {
return false;
}
if (frame::is_interpreter_frame_setup_at(fp, request._sample_sp)) {
// Set fp as sp for interpreter frames.
request._sample_sp = fp;
void* bcp = os::fetch_bcp_from_context(ucontext);
// Setting bcp = 1 marks the sample request to represent a native method.
request._sample_bcp = bcp != nullptr ? bcp : reinterpret_cast<void*>(1);
return true;
}
request._sample_bcp = fp;
fp = sender_for_interpreter_frame(request, jt);
if (request._sample_pc == nullptr || request._sample_sp == nullptr) {
return false;
}
}
return build(request, fp, jt);
}
static inline JfrSampleResult set_request_and_arm_local_poll(JfrSampleRequest& request, JfrThreadLocal* tl, JavaThread* jt) {
assert(tl != nullptr, "invariant");
assert(jt->jfr_thread_local() == tl, "invariant");
tl->set_sample_state(JAVA_SAMPLE);
SafepointMechanism::arm_local_poll_release(jt);
// For a Java sample, request._sample_ticks is also the start time for the SafepointLatency event.
request._sample_ticks = JfrTicks::now();
tl->set_sample_request(request);
return SAMPLE_JAVA;
}
// A biased sample request is denoted by an empty bcp and an empty pc.
static inline JfrSampleResult set_biased_java_sample(JfrSampleRequest& request, JfrThreadLocal* tl, JavaThread* jt) {
if (request._sample_bcp != nullptr) {
request._sample_bcp = nullptr;
}
assert(request._sample_bcp == nullptr, "invariant");
request._sample_pc = nullptr;
return set_request_and_arm_local_poll(request, tl, jt);
}
static inline JfrSampleResult set_unbiased_java_sample(JfrSampleRequest& request, JfrThreadLocal* tl, JavaThread* jt) {
assert(request._sample_sp != nullptr, "invariant");
assert(sp_in_stack(request, jt), "invariant");
assert(request._sample_bcp != nullptr || !is_interpreter(request), "invariant");
return set_request_and_arm_local_poll(request, tl, jt);
}
JfrSampleResult JfrSampleRequestBuilder::build_java_sample_request(const void* ucontext,
JfrThreadLocal* tl,
JavaThread* jt) {
assert(ucontext != nullptr, "invariant");
assert(tl != nullptr, "invariant");
assert(tl->sample_state() == NO_SAMPLE, "invariant");
assert(jt != nullptr, "invariant");
assert(jt->thread_state() == _thread_in_Java, "invariant");
JfrSampleRequest request;
// Prioritize the ljf, if one exists.
request._sample_sp = jt->last_Java_sp();
if (request._sample_sp != nullptr) {
if (build_from_ljf(request, tl, jt)) {
return set_unbiased_java_sample(request, tl, jt);
}
} else if (build_from_context(request, ucontext, tl, jt)) {
return set_unbiased_java_sample(request, tl, jt);
}
return set_biased_java_sample(request, tl, jt);
}

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_PERIODIC_SAMPLING_JFRSAMPLEREQUEST_HPP
#define SHARE_JFR_PERIODIC_SAMPLING_JFRSAMPLEREQUEST_HPP
#include "jfr/utilities/jfrTime.hpp"
#include "memory/allocation.hpp"
#include "utilities/growableArray.hpp"
class JavaThread;
class JfrThreadLocal;
enum JfrSampleResult {
THREAD_SUSPENSION_ERROR,
WRONG_THREAD_STATE,
UNPARSABLE_TOP_FRAME,
INVALID_STACK_TRACE,
CRASH,
NO_LAST_JAVA_FRAME,
UNKNOWN,
FAIL,
SKIP,
SAMPLE_NATIVE,
SAMPLE_JAVA,
NOF_SAMPLING_RESULTS
};
enum JfrSampleRequestType {
NO_SAMPLE = 0,
NATIVE_SAMPLE = 1,
JAVA_SAMPLE = 2,
NOF_SAMPLE_TYPES
};
struct JfrSampleRequest {
void* _sample_sp;
void* _sample_pc;
void* _sample_bcp;
JfrTicks _sample_ticks;
JfrSampleRequest() :
_sample_sp(nullptr),
_sample_pc(nullptr),
_sample_bcp(nullptr),
_sample_ticks() {}
JfrSampleRequest(const JfrTicks& ticks) :
_sample_sp(nullptr),
_sample_pc(nullptr),
_sample_bcp(nullptr),
_sample_ticks(ticks) {}
};
typedef GrowableArrayCHeap<JfrSampleRequest, mtTracing> JfrSampleRequestQueue;
class JfrSampleRequestBuilder : AllStatic {
public:
static JfrSampleResult build_java_sample_request(const void* ucontext,
JfrThreadLocal* tl,
JavaThread* jt);
};
#endif // SHARE_JFR_PERIODIC_SAMPLING_JFRSAMPLEREQUEST_HPP

View File

@ -22,340 +22,48 @@
*
*/
#include "classfile/javaThreadStatus.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/periodic/sampling/jfrCallTrace.hpp"
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/periodic/sampling/jfrSampleRequest.hpp"
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfrfiles/jfrEventClasses.hpp"
#include "jfr/utilities/jfrTryLock.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/semaphore.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/suspendedThreadTask.hpp"
#include "runtime/threadCrashProtection.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "utilities/systemMemoryBarrier.hpp"
enum JfrSampleType {
NO_SAMPLE = 0,
JAVA_SAMPLE = 1,
NATIVE_SAMPLE = 2
};
static bool thread_state_in_java(JavaThread* thread) {
assert(thread != nullptr, "invariant");
switch(thread->thread_state()) {
case _thread_new:
case _thread_uninitialized:
case _thread_new_trans:
case _thread_in_vm_trans:
case _thread_blocked_trans:
case _thread_in_native_trans:
case _thread_blocked:
case _thread_in_vm:
case _thread_in_native:
case _thread_in_Java_trans:
break;
case _thread_in_Java:
return true;
default:
ShouldNotReachHere();
break;
}
return false;
}
static bool thread_state_in_native(JavaThread* thread) {
assert(thread != nullptr, "invariant");
switch(thread->thread_state()) {
case _thread_new:
case _thread_uninitialized:
case _thread_new_trans:
case _thread_blocked_trans:
case _thread_blocked:
case _thread_in_vm:
case _thread_in_vm_trans:
case _thread_in_Java_trans:
case _thread_in_Java:
case _thread_in_native_trans:
break;
case _thread_in_native:
return true;
default:
ShouldNotReachHere();
break;
}
return false;
}
class JfrThreadSampleClosure {
public:
JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native);
~JfrThreadSampleClosure() {}
EventExecutionSample* next_event() { return &_events[_added_java++]; }
EventNativeMethodSample* next_event_native() { return &_events_native[_added_native++]; }
void commit_events(JfrSampleType type);
bool do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type);
uint java_entries() { return _added_java; }
uint native_entries() { return _added_native; }
private:
bool sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
bool sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
EventExecutionSample* _events;
EventNativeMethodSample* _events_native;
Thread* _self;
uint _added_java;
uint _added_native;
};
class OSThreadSampler : public SuspendedThreadTask {
public:
OSThreadSampler(JavaThread* thread,
JfrThreadSampleClosure& closure,
JfrStackFrame *frames,
u4 max_frames) : SuspendedThreadTask((Thread*)thread),
_success(false),
_thread_oop(thread->threadObj()),
_stacktrace(frames, max_frames),
_closure(closure),
_suspend_time() {}
void take_sample();
void do_task(const SuspendedThreadTaskContext& context);
void protected_task(const SuspendedThreadTaskContext& context);
bool success() const { return _success; }
const JfrStackTrace& stacktrace() const { return _stacktrace; }
private:
bool _success;
oop _thread_oop;
JfrStackTrace _stacktrace;
JfrThreadSampleClosure& _closure;
JfrTicks _suspend_time;
};
class OSThreadSamplerCallback : public CrashProtectionCallback {
public:
OSThreadSamplerCallback(OSThreadSampler& sampler, const SuspendedThreadTaskContext &context) :
_sampler(sampler), _context(context) {
}
virtual void call() {
_sampler.protected_task(_context);
}
private:
OSThreadSampler& _sampler;
const SuspendedThreadTaskContext& _context;
};
void OSThreadSampler::do_task(const SuspendedThreadTaskContext& context) {
#ifndef ASSERT
guarantee(JfrOptionSet::sample_protection(), "Sample Protection should be on in product builds");
#endif
assert(_suspend_time.value() == 0, "already timestamped!");
_suspend_time = JfrTicks::now();
if (JfrOptionSet::sample_protection()) {
OSThreadSamplerCallback cb(*this, context);
ThreadCrashProtection crash_protection;
if (!crash_protection.call(cb)) {
log_error(jfr)("Thread method sampler crashed");
}
} else {
protected_task(context);
}
}
/*
* From this method and down the call tree we attempt to protect against crashes
* using a signal handler / __try block. Don't take locks, rely on destructors or
* leave memory (in case of signal / exception) in an inconsistent state. */
void OSThreadSampler::protected_task(const SuspendedThreadTaskContext& context) {
JavaThread* const jt = JavaThread::cast(context.thread());
// Skip sample if we signaled a thread that moved to other state
if (!thread_state_in_java(jt)) {
return;
}
JfrGetCallTrace trace(true, jt);
frame topframe;
if (trace.get_topframe(context.ucontext(), topframe)) {
if (_stacktrace.record_async(jt, topframe)) {
/* If we managed to get a topframe and a stacktrace, create an event
* and put it into our array. We can't call Jfr::_stacktraces.add()
* here since it would allocate memory using malloc. Doing so while
* the stopped thread is inside malloc would deadlock. */
_success = true;
EventExecutionSample *ev = _closure.next_event();
ev->set_starttime(_suspend_time);
ev->set_endtime(_suspend_time); // fake to not take an end time
ev->set_sampledThread(JfrThreadLocal::thread_id(jt));
ev->set_state(static_cast<u8>(JavaThreadStatus::RUNNABLE));
}
}
}
void OSThreadSampler::take_sample() {
run();
}
class JfrNativeSamplerCallback : public CrashProtectionCallback {
public:
JfrNativeSamplerCallback(JfrThreadSampleClosure& closure, JavaThread* jt, JfrStackFrame* frames, u4 max_frames) :
_closure(closure), _jt(jt), _thread_oop(jt->threadObj()), _stacktrace(frames, max_frames), _success(false) {
}
virtual void call();
bool success() { return _success; }
JfrStackTrace& stacktrace() { return _stacktrace; }
private:
JfrThreadSampleClosure& _closure;
JavaThread* _jt;
oop _thread_oop;
JfrStackTrace _stacktrace;
bool _success;
};
static void write_native_event(JfrThreadSampleClosure& closure, JavaThread* jt, oop thread_oop) {
EventNativeMethodSample *ev = closure.next_event_native();
ev->set_starttime(JfrTicks::now());
ev->set_sampledThread(JfrThreadLocal::thread_id(jt));
ev->set_state(static_cast<u8>(JavaThreadStatus::RUNNABLE));
}
void JfrNativeSamplerCallback::call() {
// When a thread is only attach it will be native without a last java frame
if (!_jt->has_last_Java_frame()) {
return;
}
frame topframe = _jt->last_frame();
frame first_java_frame;
Method* method = nullptr;
JfrGetCallTrace gct(false, _jt);
if (!gct.find_top_frame(topframe, &method, first_java_frame)) {
return;
}
if (method == nullptr) {
return;
}
topframe = first_java_frame;
_success = _stacktrace.record_async(_jt, topframe);
if (_success) {
write_native_event(_closure, _jt, _thread_oop);
}
}
bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
// Process the oops in the thread head before calling into code that wants to
// stack walk over Loom continuations. The stack walking code will otherwise
// skip frames in stack chunks on the Java heap.
StackWatermarkSet::start_processing(thread, StackWatermarkKind::gc);
OSThreadSampler sampler(thread, *this, frames, max_frames);
sampler.take_sample();
/* We don't want to allocate any memory using malloc/etc while the thread
* is stopped, so everything is stored in stack allocated memory until this
* point where the thread has been resumed again, if the sampling was a success
* we need to store the stacktrace in the stacktrace repository and update
* the event with the id that was returned. */
if (!sampler.success()) {
return false;
}
EventExecutionSample *event = &_events[_added_java - 1];
traceid id = JfrStackTraceRepository::add(sampler.stacktrace());
assert(id != 0, "Stacktrace id should not be 0");
event->set_stackTrace(id);
return true;
}
bool JfrThreadSampleClosure::sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
// Process the oops in the thread head before calling into code that wants to
// stack walk over Loom continuations. The stack walking code will otherwise
// skip frames in stack chunks on the Java heap.
StackWatermarkSet::start_processing(thread, StackWatermarkKind::gc);
JfrNativeSamplerCallback cb(*this, thread, frames, max_frames);
if (JfrOptionSet::sample_protection()) {
ThreadCrashProtection crash_protection;
if (!crash_protection.call(cb)) {
log_error(jfr)("Thread method sampler crashed for native");
}
} else {
cb.call();
}
if (!cb.success()) {
return false;
}
EventNativeMethodSample *event = &_events_native[_added_native - 1];
traceid id = JfrStackTraceRepository::add(cb.stacktrace());
assert(id != 0, "Stacktrace id should not be 0");
event->set_stackTrace(id);
return true;
}
static const uint MAX_NR_OF_JAVA_SAMPLES = 5;
static const uint MAX_NR_OF_NATIVE_SAMPLES = 1;
void JfrThreadSampleClosure::commit_events(JfrSampleType type) {
if (JAVA_SAMPLE == type) {
assert(_added_java > 0 && _added_java <= MAX_NR_OF_JAVA_SAMPLES, "invariant");
if (EventExecutionSample::is_enabled()) {
for (uint i = 0; i < _added_java; ++i) {
_events[i].commit();
}
}
} else {
assert(NATIVE_SAMPLE == type, "invariant");
assert(_added_native > 0 && _added_native <= MAX_NR_OF_NATIVE_SAMPLES, "invariant");
if (EventNativeMethodSample::is_enabled()) {
for (uint i = 0; i < _added_native; ++i) {
_events_native[i].commit();
}
}
}
}
JfrThreadSampleClosure::JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native) :
_events(events),
_events_native(events_native),
_self(Thread::current()),
_added_java(0),
_added_native(0) {
}
class JfrThreadSampler : public NonJavaThread {
friend class JfrThreadSampling;
// The JfrSamplerThread suspends, if necessary, JavaThreads for sampling.
// It creates a sample description of the top Java frame, called a Jfr Sample Request.
// The request is installed into a thread-local queue associated with the sampled thread.
// Before resuming the sampled thread, its thread-local poll page is armed.
// This mechanism lets the sampled thread discover and process the installed
// sample request at its next safepoint poll instruction.
class JfrSamplerThread : public NonJavaThread {
friend class JfrThreadSampler;
private:
Semaphore _sample;
Thread* _sampler_thread;
JfrStackFrame* const _frames;
JavaThread* _last_thread_java;
JavaThread* _last_thread_native;
int64_t _java_period_millis;
int64_t _native_period_millis;
const size_t _min_size; // for enqueue buffer monitoring
int _cur_index;
const u4 _max_frames;
volatile bool _disenrolled;
const JfrBuffer* get_enqueue_buffer();
const JfrBuffer* renew_if_full(const JfrBuffer* enqueue_buffer);
JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current);
void task_stacktrace(JfrSampleType type, JavaThread** last_thread);
JfrThreadSampler(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames);
~JfrThreadSampler();
void task_stacktrace(JfrSampleRequestType type, JavaThread** last_thread);
JfrSamplerThread(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames);
void start_thread();
@ -363,68 +71,27 @@ class JfrThreadSampler : public NonJavaThread {
void disenroll();
void set_java_period(int64_t period_millis);
void set_native_period(int64_t period_millis);
bool sample_java_thread(JavaThread* jt);
bool sample_native_thread(JavaThread* jt);
protected:
virtual void post_run();
public:
virtual const char* name() const { return "JFR Thread Sampler"; }
virtual const char* type_name() const { return "JfrThreadSampler"; }
bool is_JfrSampler_thread() const { return true; }
void run();
static Monitor* transition_block() { return JfrThreadSampler_lock; }
static void on_javathread_suspend(JavaThread* thread);
int64_t get_java_period() const { return Atomic::load(&_java_period_millis); };
int64_t get_native_period() const { return Atomic::load(&_native_period_millis); };
virtual void post_run();
public:
virtual const char* name() const { return "JFR Sampler Thread"; }
virtual const char* type_name() const { return "JfrSamplerThread"; }
bool is_JfrSampler_thread() const { return true; }
int64_t java_period() const { return Atomic::load(&_java_period_millis); };
int64_t native_period() const { return Atomic::load(&_native_period_millis); };
};
static void clear_transition_block(JavaThread* jt) {
assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
jt->clear_trace_flag();
JfrThreadLocal* const tl = jt->jfr_thread_local();
MutexLocker ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag);
if (tl->is_trace_block()) {
JfrThreadSampler::transition_block()->notify();
}
}
static bool is_excluded(JavaThread* thread) {
assert(thread != nullptr, "invariant");
return thread->is_hidden_from_external_view() || thread->in_deopt_handler() || thread->jfr_thread_local()->is_excluded();
}
bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type) {
assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
if (is_excluded(thread)) {
return false;
}
bool ret = false;
thread->set_trace_flag(); // Provides StoreLoad, needed to keep read of thread state from floating up.
if (UseSystemMemoryBarrier) {
SystemMemoryBarrier::emit();
}
if (JAVA_SAMPLE == type) {
if (thread_state_in_java(thread)) {
ret = sample_thread_in_java(thread, frames, max_frames);
}
} else {
assert(NATIVE_SAMPLE == type, "invariant");
if (thread_state_in_native(thread)) {
ret = sample_thread_in_native(thread, frames, max_frames);
}
}
clear_transition_block(thread);
return ret;
}
JfrThreadSampler::JfrThreadSampler(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames) :
JfrSamplerThread::JfrSamplerThread(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames) :
_sample(),
_sampler_thread(nullptr),
_frames(JfrCHeapObj::new_array<JfrStackFrame>(max_frames)),
_last_thread_java(nullptr),
_last_thread_native(nullptr),
_java_period_millis(java_period_millis),
_native_period_millis(native_period_millis),
_min_size(max_frames * 2 * wordSize), // each frame tags at most 2 words, min size is a full stacktrace
_cur_index(-1),
_max_frames(max_frames),
_disenrolled(true) {
@ -432,54 +99,12 @@ JfrThreadSampler::JfrThreadSampler(int64_t java_period_millis, int64_t native_pe
assert(_native_period_millis >= 0, "invariant");
}
JfrThreadSampler::~JfrThreadSampler() {
JfrCHeapObj::free(_frames, sizeof(JfrStackFrame) * _max_frames);
void JfrSamplerThread::post_run() {
this->NonJavaThread::post_run();
delete this;
}
void JfrThreadSampler::set_java_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
Atomic::store(&_java_period_millis, period_millis);
}
void JfrThreadSampler::set_native_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
Atomic::store(&_native_period_millis, period_millis);
}
static inline bool is_released(JavaThread* jt) {
return !jt->is_trace_suspend();
}
void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) {
if (is_released(thread)) {
return;
}
JfrThreadLocal* const tl = thread->jfr_thread_local();
MonitorLocker ml(transition_block(), Mutex::_no_safepoint_check_flag);
tl->set_trace_block();
while (!is_released(thread)) {
ml.wait();
}
tl->clear_trace_block();
}
JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
assert(t_list != nullptr, "invariant");
assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
assert((current == nullptr && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
if ((uint)_cur_index + 1 == t_list->length()) {
// wrap
_cur_index = 0;
} else {
_cur_index++;
}
assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
JavaThread* const next = t_list->thread_at(_cur_index);
return next != first_sampled ? next : nullptr;
}
void JfrThreadSampler::start_thread() {
void JfrSamplerThread::start_thread() {
if (os::create_thread(this, os::os_thread)) {
os::start_thread(this);
} else {
@ -487,7 +112,7 @@ void JfrThreadSampler::start_thread() {
}
}
void JfrThreadSampler::enroll() {
void JfrSamplerThread::enroll() {
if (_disenrolled) {
log_trace(jfr)("Enrolling thread sampler");
_sample.signal();
@ -495,7 +120,7 @@ void JfrThreadSampler::enroll() {
}
}
void JfrThreadSampler::disenroll() {
void JfrSamplerThread::disenroll() {
if (!_disenrolled) {
_sample.wait();
_disenrolled = true;
@ -503,14 +128,23 @@ void JfrThreadSampler::disenroll() {
}
}
static int64_t get_monotonic_ms() {
// Currently we only need to serialize a single thread state
// _thread_in_Java for the SafepointLatency event.
class VMThreadStateSerializer : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
writer.write_count(1);
writer.write_key(_thread_in_Java);
writer.write("_thread_in_Java");
}
};
static inline int64_t get_monotonic_ms() {
return os::javaTimeNanos() / 1000000;
}
void JfrThreadSampler::run() {
assert(_sampler_thread == nullptr, "invariant");
_sampler_thread = this;
void JfrSamplerThread::run() {
JfrSerializer::register_serializer(TYPE_VMTHREADSTATE, true, new VMThreadStateSerializer());
int64_t last_java_ms = get_monotonic_ms();
int64_t last_native_ms = last_java_ms;
@ -523,9 +157,9 @@ void JfrThreadSampler::run() {
}
_sample.signal();
int64_t java_period_millis = get_java_period();
int64_t java_period_millis = java_period();
java_period_millis = java_period_millis == 0 ? max_jlong : MAX2<int64_t>(java_period_millis, 1);
int64_t native_period_millis = get_native_period();
int64_t native_period_millis = native_period();
native_period_millis = native_period_millis == 0 ? max_jlong : MAX2<int64_t>(native_period_millis, 1);
// If both periods are max_jlong, it implies the sampler is in the process of
@ -567,110 +201,201 @@ void JfrThreadSampler::run() {
}
}
void JfrThreadSampler::post_run() {
this->NonJavaThread::post_run();
delete this;
JavaThread* JfrSamplerThread::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
assert(t_list != nullptr, "invariant");
assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
assert((current == nullptr && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
if ((uint)_cur_index + 1 == t_list->length()) {
// wrap
_cur_index = 0;
} else {
_cur_index++;
}
assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
JavaThread* const next = t_list->thread_at(_cur_index);
return next != first_sampled ? next : nullptr;
}
const JfrBuffer* JfrThreadSampler::get_enqueue_buffer() {
const JfrBuffer* buffer = JfrTraceIdLoadBarrier::get_sampler_enqueue_buffer(this);
return buffer != nullptr ? renew_if_full(buffer) : JfrTraceIdLoadBarrier::renew_sampler_enqueue_buffer(this);
static inline bool is_excluded(JavaThread* jt) {
assert(jt != nullptr, "invariant");
return jt->is_Compiler_thread() || jt->is_hidden_from_external_view() || jt->is_JfrRecorder_thread() || jt->jfr_thread_local()->is_excluded();
}
const JfrBuffer* JfrThreadSampler::renew_if_full(const JfrBuffer* enqueue_buffer) {
assert(enqueue_buffer != nullptr, "invariant");
return enqueue_buffer->free_size() < _min_size ? JfrTraceIdLoadBarrier::renew_sampler_enqueue_buffer(this) : enqueue_buffer;
}
void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) {
ResourceMark rm;
EventExecutionSample samples[MAX_NR_OF_JAVA_SAMPLES];
EventNativeMethodSample samples_native[MAX_NR_OF_NATIVE_SAMPLES];
JfrThreadSampleClosure sample_task(samples, samples_native);
const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES;
void JfrSamplerThread::task_stacktrace(JfrSampleRequestType type, JavaThread** last_thread) {
const uint sample_limit = JAVA_SAMPLE == type ? 5 : 1;
uint num_samples = 0;
JavaThread* start = nullptr;
elapsedTimer sample_time;
sample_time.start();
{
elapsedTimer sample_time;
sample_time.start();
{
MutexLocker tlock(Threads_lock);
ThreadsListHandle tlh;
// Resolve a sample session relative start position index into the thread list array.
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
MutexLocker tlock(Threads_lock);
ThreadsListHandle tlh;
// Resolve a sample session relative start position index into the thread list array.
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
// Explicitly monitor the available space of the thread-local buffer used by the load barrier
// for enqueuing klasses as part of tagging methods. We do this because if space becomes sparse,
// we cannot rely on the implicit allocation of a new buffer as part of the regular tag mechanism.
// If the free list is empty, a malloc could result, and the problem with that is that the thread
// we have suspended could be the holder of the malloc lock. Instead, the buffer is pre-emptively
// renewed before thread suspension.
const JfrBuffer* enqueue_buffer = get_enqueue_buffer();
assert(enqueue_buffer != nullptr, "invariant");
while (num_samples < sample_limit) {
current = next_thread(tlh.list(), start, current);
if (current == nullptr) {
break;
}
if (start == nullptr) {
start = current; // remember the thread where we started to attempt sampling
}
if (current->is_Compiler_thread()) {
continue;
}
assert(enqueue_buffer->free_size() >= _min_size, "invariant");
if (sample_task.do_sample_thread(current, _frames, _max_frames, type)) {
num_samples++;
}
enqueue_buffer = renew_if_full(enqueue_buffer);
// while (num_samples < sample_limit) {
while (true) {
current = next_thread(tlh.list(), start, current);
if (current == nullptr) {
break;
}
if (is_excluded(current)) {
continue;
}
if (start == nullptr) {
start = current; // remember the thread where we started to attempt sampling
}
bool success;
if (JAVA_SAMPLE == type) {
success = sample_java_thread(current);
} else {
assert(type == NATIVE_SAMPLE, "invariant");
success = sample_native_thread(current);
}
if (success) {
num_samples++;
}
*last_thread = current; // remember the thread we last attempted to sample
}
sample_time.stop();
log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
sample_time.seconds(), sample_task.java_entries(), sample_task.native_entries());
}
if (num_samples > 0) {
sample_task.commit_events(type);
*last_thread = current; // remember the thread we last attempted to sample
}
sample_time.stop();
log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
sample_time.seconds(), type == JAVA_SAMPLE ? num_samples : 0, type == NATIVE_SAMPLE ? num_samples : 0);
}
static JfrThreadSampling* _instance = nullptr;
// Platform-specific thread suspension and CPU context retrieval.
class OSThreadSampler : public SuspendedThreadTask {
private:
JfrSampleResult _result;
public:
OSThreadSampler(JavaThread* jt) : SuspendedThreadTask(jt),
_result(THREAD_SUSPENSION_ERROR) {}
void request_sample() { run(); }
JfrSampleResult result() const { return _result; }
JfrThreadSampling& JfrThreadSampling::instance() {
void do_task(const SuspendedThreadTaskContext& context) {
JavaThread* const jt = JavaThread::cast(context.thread());
assert(jt != nullptr, "invariant");
if (jt->thread_state() == _thread_in_Java) {
JfrThreadLocal* const tl = jt->jfr_thread_local();
if (tl->sample_state() == NO_SAMPLE) {
_result = JfrSampleRequestBuilder::build_java_sample_request(context.ucontext(), tl, jt);
}
}
}
};
// Sampling a thread in state _thread_in_Java
// involves a platform-specific thread suspend and CPU context retrieval.
bool JfrSamplerThread::sample_java_thread(JavaThread* jt) {
if (jt->thread_state() != _thread_in_Java) {
return false;
}
OSThreadSampler sampler(jt);
sampler.request_sample();
if (sampler.result() != SAMPLE_JAVA) {
// Wrong thread state or suspension error.
return false;
}
// If we get to do it before the sampled thread, we install
// the new Jfr Sample Request into the thread-local queue
// associated with the sampled thread. This makes the just
// sampled thread eligible for yet another sample.
JfrThreadLocal* const tl = jt->jfr_thread_local();
JfrMutexTryLock lock(tl->sample_monitor());
if (lock.acquired() && tl->sample_state() == JAVA_SAMPLE) {
tl->enqueue_request();
assert(tl->sample_state() == NO_SAMPLE, "invariant");
}
return true;
}
static JfrSamplerThread* _sampler_thread = nullptr;
// We can sample a JavaThread running in state _thread_in_native
// without thread suspension and CPU context retrieval,
// if we carefully order the loads of the thread state.
bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
if (jt->thread_state() != _thread_in_native) {
return false;
}
JfrThreadLocal* const tl = jt->jfr_thread_local();
assert(tl != nullptr, "invariant");
if (tl->sample_state() != NO_SAMPLE) {
return false;
}
tl->set_sample_state(NATIVE_SAMPLE);
SafepointMechanism::arm_local_poll_release(jt);
// Barriers needed to keep the next read of thread state from floating up.
if (UseSystemMemoryBarrier) {
SystemMemoryBarrier::emit();
} else {
OrderAccess::storeload();
}
if (jt->thread_state() != _thread_in_native || !jt->has_last_Java_frame()) {
MonitorLocker lock(tl->sample_monitor(), Monitor::_no_safepoint_check_flag);
tl->set_sample_state(NO_SAMPLE);
lock.notify_all();
return false;
}
return JfrThreadSampling::process_native_sample_request(tl, jt, _sampler_thread);
}
void JfrSamplerThread::set_java_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
Atomic::store(&_java_period_millis, period_millis);
}
void JfrSamplerThread::set_native_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
Atomic::store(&_native_period_millis, period_millis);
}
// JfrThreadSampler;
static JfrThreadSampler* _instance = nullptr;
JfrThreadSampler& JfrThreadSampler::instance() {
return *_instance;
}
JfrThreadSampling* JfrThreadSampling::create() {
JfrThreadSampler::JfrThreadSampler() {}
JfrThreadSampler::~JfrThreadSampler() {
if (_sampler_thread != nullptr) {
_sampler_thread->disenroll();
}
}
JfrThreadSampler* JfrThreadSampler::create() {
assert(_instance == nullptr, "invariant");
_instance = new JfrThreadSampling();
_instance = new JfrThreadSampler();
return _instance;
}
void JfrThreadSampling::destroy() {
void JfrThreadSampler::destroy() {
if (_instance != nullptr) {
delete _instance;
_instance = nullptr;
}
}
JfrThreadSampling::JfrThreadSampling() : _sampler(nullptr) {}
JfrThreadSampling::~JfrThreadSampling() {
if (_sampler != nullptr) {
_sampler->disenroll();
}
}
#ifdef ASSERT
static void assert_periods(const JfrThreadSampler* sampler, int64_t java_period_millis, int64_t native_period_millis) {
assert(sampler != nullptr, "invariant");
assert(sampler->get_java_period() == java_period_millis, "invariant");
assert(sampler->get_native_period() == native_period_millis, "invariant");
static void assert_periods(const JfrSamplerThread* sampler_thread, int64_t java_period_millis, int64_t native_period_millis) {
assert(sampler_thread != nullptr, "invariant");
assert(sampler_thread->java_period() == java_period_millis, "invariant");
assert(sampler_thread->native_period() == native_period_millis, "invariant");
}
#endif
@ -678,66 +403,62 @@ static void log(int64_t java_period_millis, int64_t native_period_millis) {
log_trace(jfr)("Updated thread sampler for java: " INT64_FORMAT " ms, native " INT64_FORMAT " ms", java_period_millis, native_period_millis);
}
void JfrThreadSampling::create_sampler(int64_t java_period_millis, int64_t native_period_millis) {
assert(_sampler == nullptr, "invariant");
void JfrThreadSampler::create_sampler(int64_t java_period_millis, int64_t native_period_millis) {
assert(_sampler_thread == nullptr, "invariant");
log_trace(jfr)("Creating thread sampler for java:" INT64_FORMAT " ms, native " INT64_FORMAT " ms", java_period_millis, native_period_millis);
_sampler = new JfrThreadSampler(java_period_millis, native_period_millis, JfrOptionSet::stackdepth());
_sampler->start_thread();
_sampler->enroll();
_sampler_thread = new JfrSamplerThread(java_period_millis, native_period_millis, JfrOptionSet::stackdepth());
_sampler_thread->start_thread();
_sampler_thread->enroll();
}
void JfrThreadSampling::update_run_state(int64_t java_period_millis, int64_t native_period_millis) {
void JfrThreadSampler::update_run_state(int64_t java_period_millis, int64_t native_period_millis) {
if (java_period_millis > 0 || native_period_millis > 0) {
if (_sampler == nullptr) {
if (_sampler_thread == nullptr) {
create_sampler(java_period_millis, native_period_millis);
} else {
_sampler->enroll();
_sampler_thread->enroll();
}
DEBUG_ONLY(assert_periods(_sampler, java_period_millis, native_period_millis);)
DEBUG_ONLY(assert_periods(_sampler_thread, java_period_millis, native_period_millis);)
log(java_period_millis, native_period_millis);
return;
}
if (_sampler != nullptr) {
DEBUG_ONLY(assert_periods(_sampler, java_period_millis, native_period_millis);)
_sampler->disenroll();
if (_sampler_thread != nullptr) {
DEBUG_ONLY(assert_periods(_sampler_thread, java_period_millis, native_period_millis);)
_sampler_thread->disenroll();
}
}
void JfrThreadSampling::set_sampling_period(bool is_java_period, int64_t period_millis) {
void JfrThreadSampler::set_period(bool is_java_period, int64_t period_millis) {
int64_t java_period_millis = 0;
int64_t native_period_millis = 0;
if (is_java_period) {
java_period_millis = period_millis;
if (_sampler != nullptr) {
_sampler->set_java_period(java_period_millis);
native_period_millis = _sampler->get_native_period();
if (_sampler_thread != nullptr) {
_sampler_thread->set_java_period(java_period_millis);
native_period_millis = _sampler_thread->native_period();
}
} else {
native_period_millis = period_millis;
if (_sampler != nullptr) {
_sampler->set_native_period(native_period_millis);
java_period_millis = _sampler->get_java_period();
if (_sampler_thread != nullptr) {
_sampler_thread->set_native_period(native_period_millis);
java_period_millis = _sampler_thread->java_period();
}
}
update_run_state(java_period_millis, native_period_millis);
}
void JfrThreadSampling::set_java_sample_period(int64_t period_millis) {
void JfrThreadSampler::set_java_sample_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
if (_instance == nullptr && 0 == period_millis) {
return;
}
instance().set_sampling_period(true, period_millis);
instance().set_period(true, period_millis);
}
void JfrThreadSampling::set_native_sample_period(int64_t period_millis) {
void JfrThreadSampler::set_native_sample_period(int64_t period_millis) {
assert(period_millis >= 0, "invariant");
if (_instance == nullptr && 0 == period_millis) {
return;
}
instance().set_sampling_period(false, period_millis);
}
void JfrThreadSampling::on_javathread_suspend(JavaThread* thread) {
JfrThreadSampler::on_javathread_suspend(thread);
instance().set_period(false, period_millis);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,28 +27,23 @@
#include "jfr/utilities/jfrAllocation.hpp"
class JavaThread;
class JfrThreadSampler;
class JfrThreadSampling : public JfrCHeapObj {
class JfrThreadSampler : public JfrCHeapObj {
friend class JfrRecorder;
private:
JfrThreadSampler* _sampler;
void create_sampler(int64_t java_period_millis, int64_t native_period_millis);
void update_run_state(int64_t java_period_millis, int64_t native_period_millis);
void set_sampling_period(bool is_java_period, int64_t period_millis);
void set_period(bool is_java_period, int64_t period_millis);
JfrThreadSampling();
~JfrThreadSampling();
JfrThreadSampler();
~JfrThreadSampler();
static JfrThreadSampling& instance();
static JfrThreadSampling* create();
static JfrThreadSampler& instance();
static JfrThreadSampler* create();
static void destroy();
public:
static void set_java_sample_period(int64_t period_millis);
static void set_native_sample_period(int64_t period_millis);
static void on_javathread_suspend(JavaThread* thread);
};
#endif // SHARE_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLER_HPP

View File

@ -0,0 +1,397 @@
/*
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "classfile/javaThreadStatus.hpp"
#include "code/codeCache.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/nmethod.hpp"
#include "interpreter/interpreter.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/periodic/sampling/jfrSampleRequest.hpp"
#include "jfr/periodic/sampling/jfrThreadSampling.hpp"
#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.hpp"
#include "runtime/continuation.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/stackFrameStream.inline.hpp"
template <typename EventType>
static inline void send_sample_event(const JfrTicks& start_time, const JfrTicks& end_time, traceid sid, traceid tid) {
EventType event(UNTIMED);
event.set_starttime(start_time);
event.set_endtime(end_time);
event.set_sampledThread(tid);
event.set_state(static_cast<u8>(JavaThreadStatus::RUNNABLE));
event.set_stackTrace(sid);
event.commit();
}
static inline void send_safepoint_latency_event(const JfrSampleRequest& request, const JfrTicks& end_time, traceid sid, const JavaThread* jt) {
assert(jt != nullptr, "invariant");
assert(!jt->jfr_thread_local()->has_cached_stack_trace(), "invariant");
EventSafepointLatency event(UNTIMED);
event.set_starttime(request._sample_ticks);
event.set_endtime(end_time);
if (event.should_commit()) {
event.set_threadState(_thread_in_Java);
jt->jfr_thread_local()->set_cached_stack_trace_id(sid);
event.commit();
jt->jfr_thread_local()->clear_cached_stack_trace();
}
}
static inline bool is_interpreter(address pc) {
return Interpreter::contains(pc);
}
static inline bool is_interpreter(const JfrSampleRequest& request) {
return request._sample_bcp != nullptr;
}
static inline bool is_in_continuation(const frame& frame, JavaThread* jt) {
return JfrThreadLocal::is_vthread(jt) &&
(Continuation::is_frame_in_continuation(jt, frame) || Continuation::is_continuation_enterSpecial(frame));
}
// A sampled interpreter frame is handled differently from a sampled compiler frame.
//
// The JfrSampleRequest description partially describes a _potential_ interpreter Java frame.
// It's partial because the sampler thread only sets the fp and bcp fields.
//
// We want to ensure that what we discovered inside interpreter code _really_ is what we assume, a valid interpreter frame.
//
// Therefore, instead of letting the sampler thread read what it believes to be a Method*, we delay until we are at a safepoint to ensure the Method* is valid.
//
// If the JfrSampleRequest represents a valid interpreter frame, the Method* is retrieved and the sender frame is returned per the sender_frame.
//
// If it is not a valid interpreter frame, then the JfrSampleRequest is invalidated, and the current frame is returned per the sender frame.
//
static bool compute_sender_frame(JfrSampleRequest& request, frame& sender_frame, bool& in_continuation, JavaThread* jt) {
assert(is_interpreter(request), "invariant");
assert(jt != nullptr, "invariant");
assert(jt->has_last_Java_frame(), "invariant");
// For a request representing an interpreter frame, request._sample_sp is actually the frame pointer, fp.
const void* const sampled_fp = request._sample_sp;
StackFrameStream stream(jt, false, false);
// Search for the sampled interpreter frame and get its Method*.
while (!stream.is_done()) {
const frame* const frame = stream.current();
assert(frame != nullptr, "invariant");
const intptr_t* const real_fp = frame->real_fp();
assert(real_fp != nullptr, "invariant");
if (real_fp == sampled_fp && frame->is_interpreted_frame()) {
Method* const method = frame->interpreter_frame_method();
assert(method != nullptr, "invariant");
request._sample_pc = method;
// Got the Method*. Validate bcp.
if (!method->is_native() && !method->contains(static_cast<address>(request._sample_bcp))) {
request._sample_bcp = frame->interpreter_frame_bcp();
}
in_continuation = is_in_continuation(*frame, jt);
break;
}
if (real_fp >= sampled_fp) {
// What we sampled is not an official interpreter frame.
// Invalidate the sample request and use current.
request._sample_bcp = nullptr;
sender_frame = *stream.current();
in_continuation = is_in_continuation(sender_frame, jt);
return true;
}
stream.next();
}
assert(!stream.is_done(), "invariant");
// Step to sender.
stream.next();
// If the top frame is in a continuation, check that the sender frame is too.
if (in_continuation && !is_in_continuation(*stream.current(), jt)) {
// Leave sender frame empty.
return true;
}
sender_frame = *stream.current();
assert(request._sample_pc != nullptr, "invariant");
assert(request._sample_bcp != nullptr, "invariant");
assert(Method::is_valid_method(static_cast<const Method*>(request._sample_pc)), "invariant");
assert(static_cast<const Method*>(request._sample_pc)->is_native() ||
static_cast<const Method*>(request._sample_pc)->contains(static_cast<address>(request._sample_bcp)), "invariant");
return true;
}
static inline const PcDesc* get_pc_desc(nmethod* nm, void* pc) {
assert(nm != nullptr, "invariant");
assert(pc != nullptr, "invariant");
return nm->pc_desc_near(static_cast<address>(pc));
}
static inline bool is_valid(const PcDesc* pc_desc) {
return pc_desc != nullptr && pc_desc->scope_decode_offset() != DebugInformationRecorder::serialized_null;
}
static bool compute_top_frame(const JfrSampleRequest& request, frame& top_frame, bool& in_continuation, JavaThread* jt) {
assert(jt != nullptr, "invariant");
if (!jt->has_last_Java_frame()) {
return false;
}
if (is_interpreter(request)) {
return compute_sender_frame(const_cast<JfrSampleRequest&>(request), top_frame, in_continuation, jt);
}
void* const sampled_pc = request._sample_pc;
CodeBlob* sampled_cb;
if (sampled_pc == nullptr || (sampled_cb = CodeCache::find_blob(sampled_pc)) == nullptr) {
// A biased sample is requested or no code blob.
top_frame = jt->last_frame();
in_continuation = is_in_continuation(top_frame, jt);
return true;
}
// We will never describe a sample request that represents an unparsable stub or blob.
assert(sampled_cb->frame_complete_offset() != CodeOffsets::frame_never_safe, "invariant");
const void* const sampled_sp = request._sample_sp;
assert(sampled_sp != nullptr, "invariant");
nmethod* const sampled_nm = sampled_cb->as_nmethod_or_null();
StackFrameStream stream(jt, false /* update registers */, false /* process frames */);
if (stream.current()->is_safepoint_blob_frame()) {
if (sampled_nm != nullptr) {
// Move to the physical sender frame of the SafepointBlob stub frame using the frame size, not the logical iterator.
const int safepoint_blob_stub_frame_size = stream.current()->cb()->frame_size();
intptr_t* const sender_sp = stream.current()->unextended_sp() + safepoint_blob_stub_frame_size;
if (sender_sp > sampled_sp) {
const address saved_exception_pc = jt->saved_exception_pc();
assert(saved_exception_pc != nullptr, "invariant");
const nmethod* const exception_nm = CodeCache::find_blob(saved_exception_pc)->as_nmethod();
assert(exception_nm != nullptr, "invariant");
if (exception_nm == sampled_nm && sampled_nm->is_at_poll_return(saved_exception_pc)) {
// We sit at the poll return site in the sampled compiled nmethod with only the return address on the stack.
// The sampled_nm compiled frame is no longer extant, but we might be able to reconstruct a synthetic
// compiled frame at this location. We do this by overlaying a reconstructed frame on top of
// the huge SafepointBlob stub frame. Of course, the synthetic frame only contains random stack memory,
// but it is safe because stack walking cares only about the form of the frame (i.e., an sp and a pc).
// We also do not have to worry about stackbanging because we currently have a huge SafepointBlob stub frame
// on the stack. For extra assurance, we know that we can create this frame size at this
// very location because we just popped such a frame before we hit the return poll site.
//
// Let's attempt to correct for the safepoint bias.
const PcDesc* const pc_desc = get_pc_desc(sampled_nm, sampled_pc);
if (is_valid(pc_desc)) {
intptr_t* const synthetic_sp = sender_sp - sampled_nm->frame_size();
top_frame = frame(synthetic_sp, synthetic_sp, sender_sp, pc_desc->real_pc(sampled_nm), sampled_nm);
in_continuation = is_in_continuation(top_frame, jt);
return true;
}
}
}
}
stream.next(); // skip the SafepointBlob stub frame
}
assert(!stream.current()->is_safepoint_blob_frame(), "invariant");
// Search the first frame that is above the sampled sp.
for (; !stream.is_done(); stream.next()) {
frame* const current = stream.current();
if (current->real_fp() <= sampled_sp) {
// Continue searching for a matching frame.
continue;
}
if (sampled_nm == nullptr) {
// The sample didn't have an nmethod; we decide to trace from its sender.
// Another instance of safepoint bias.
top_frame = *current;
break;
}
// Check for a matching compiled method.
if (current->cb()->as_nmethod_or_null() == sampled_nm) {
if (current->pc() != sampled_pc) {
// Let's adjust for the safepoint bias if we can.
const PcDesc* const pc_desc = get_pc_desc(sampled_nm, sampled_pc);
if (is_valid(pc_desc)) {
current->adjust_pc(pc_desc->real_pc(sampled_nm));
}
}
}
// Either a hit or a mismatched sample in which case we trace from the sender.
// Yet another instance of safepoint bias,to be addressed with
// more exact and stricter versions when parsable blobs become available.
top_frame = *current;
break;
}
in_continuation = is_in_continuation(top_frame, jt);
return true;
}
static void record_thread_in_java(const JfrSampleRequest& request, const JfrTicks& now, const JfrThreadLocal* tl, JavaThread* jt, Thread* current) {
assert(jt != nullptr, "invariant");
assert(tl != nullptr, "invariant");
assert(current != nullptr, "invariant");
frame top_frame;
bool in_continuation;
if (!compute_top_frame(request, top_frame, in_continuation, jt)) {
return;
}
traceid sid;
{
ResourceMark rm(current);
JfrStackTrace stacktrace;
if (!stacktrace.record(jt, top_frame, in_continuation, request)) {
// Unable to record stacktrace. Fail.
return;
}
sid = JfrStackTraceRepository::add(stacktrace);
}
assert(sid != 0, "invariant");
const traceid tid = in_continuation ? tl->vthread_id_with_epoch_update(jt) : JfrThreadLocal::jvm_thread_id(jt);
send_sample_event<EventExecutionSample>(request._sample_ticks, now, sid, tid);
if (current == jt) {
send_safepoint_latency_event(request, now, sid, jt);
}
}
static void drain_enqueued_requests(const JfrTicks& now, JfrThreadLocal* tl, JavaThread* jt, Thread* current) {
assert(tl != nullptr, "invariant");
assert(jt != nullptr, "invariant");
assert(current != nullptr, "invariant");
assert(jt->jfr_thread_local() == tl, "invariant");
assert_lock_strong(tl->sample_monitor());
if (tl->has_enqueued_requests()) {
for (const JfrSampleRequest& request : *tl->sample_requests()) {
record_thread_in_java(request, now, tl, jt, current);
}
tl->clear_enqueued_requests();
}
assert(!tl->has_enqueued_requests(), "invariant");
}
class SampleMonitor : public StackObj {
private:
JfrThreadLocal* const _tl;
Monitor* const _sample_monitor;
public:
SampleMonitor(JfrThreadLocal* tl) : _tl(tl), _sample_monitor(tl->sample_monitor()) {
assert(tl != nullptr, "invariant");
assert(_sample_monitor != nullptr, "invariant");
_sample_monitor->lock_without_safepoint_check();
}
~SampleMonitor() {
assert_lock_strong(_sample_monitor);
_tl->set_sample_state(NO_SAMPLE);
_sample_monitor->notify_all();
_sample_monitor->unlock();
}
};
// Only entered by the JfrSampler thread.
bool JfrThreadSampling::process_native_sample_request(JfrThreadLocal* tl, JavaThread* jt, Thread* sampler_thread) {
assert(tl != nullptr, "invairant");
assert(jt != nullptr, "invariant");
assert(sampler_thread != nullptr, "invariant");
assert(sampler_thread->is_JfrSampler_thread(), "invariant");
assert(tl == jt->jfr_thread_local(), "invariant");
assert(jt != sampler_thread, "only asynchronous processing of native samples");
assert(jt->has_last_Java_frame(), "invariant");
assert(tl->sample_state() == NATIVE_SAMPLE, "invariant");
const JfrTicks start_time = JfrTicks::now();
traceid tid;
traceid sid;
{
SampleMonitor sm(tl);
// Because the thread was in native, it is in a walkable state, because
// it will hit a safepoint poll on the way back from native. To ensure timely
// progress, any requests in the queue can be safely processed now.
drain_enqueued_requests(start_time, tl, jt, sampler_thread);
// Process the current stacktrace using the ljf.
{
ResourceMark rm(sampler_thread);
JfrStackTrace stacktrace;
const frame top_frame = jt->last_frame();
if (!stacktrace.record_inner(jt, top_frame, is_in_continuation(top_frame, jt), 0 /* skip level */)) {
// Unable to record stacktrace. Fail.
return false;
}
sid = JfrStackTraceRepository::add(stacktrace);
}
// Read the tid under the monitor to ensure that if its a virtual thread,
// it is not unmounted until we are done with it.
tid = JfrThreadLocal::thread_id(jt);
}
assert(tl->sample_state() == NO_SAMPLE, "invariant");
send_sample_event<EventNativeMethodSample>(start_time, start_time, sid, tid);
return true;
}
// Entry point for a sampled thread that discovered pending Jfr Sample Requests as part of a safepoint poll.
void JfrThreadSampling::process_sample_request(JavaThread* jt) {
assert(JavaThread::current() == jt, "should be current thread");
assert(jt->thread_state() == _thread_in_vm || jt->thread_state() == _thread_in_Java, "invariant");
const JfrTicks now = JfrTicks::now();
JfrThreadLocal* const tl = jt->jfr_thread_local();
assert(tl != nullptr, "invariant");
MonitorLocker ml(tl->sample_monitor(), Monitor::_no_safepoint_check_flag);
for (;;) {
const int sample_state = tl->sample_state();
if (sample_state == NATIVE_SAMPLE) {
// Wait until stack trace is processed.
ml.wait();
} else if (sample_state == JAVA_SAMPLE) {
tl->enqueue_request();
} else {
// State has been processed.
break;
}
}
drain_enqueued_requests(now, tl, jt, jt);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,24 +22,21 @@
*
*/
#ifndef SHARE_JFR_PERIODIC_SAMPLING_JFRCALLTRACE_HPP
#define SHARE_JFR_PERIODIC_SAMPLING_JFRCALLTRACE_HPP
#ifndef SHARE_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLING_HPP
#define SHARE_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLING_HPP
#include "memory/allocation.hpp"
class frame;
class Method;
class JavaThread;
class JfrThreadLocal;
class Thread;
class JfrGetCallTrace : public StackObj {
class JfrThreadSampling : AllStatic {
friend class JfrSamplerThread;
private:
JavaThread* _thread;
bool _in_java;
static bool process_native_sample_request(JfrThreadLocal* tl, JavaThread* jt, Thread* sampler_thread);
public:
JfrGetCallTrace(bool in_java, JavaThread* thread) : _thread(thread), _in_java(in_java) {}
bool find_top_frame(frame& topframe, Method** method, frame& first_frame);
bool get_topframe(void* ucontext, frame& top);
static void process_sample_request(JavaThread* jt);
};
#endif // SHARE_JFR_PERIODIC_SAMPLING_JFRCALLTRACE_HPP
#endif // SHARE_JFR_PERIODIC_SAMPLING_JFRTHREADSAMPLING_HPP

View File

@ -301,7 +301,7 @@ bool JfrRecorder::create_components() {
if (!create_stringpool()) {
return false;
}
if (!create_thread_sampling()) {
if (!create_thread_sampler()) {
return false;
}
if (!create_event_throttler()) {
@ -317,7 +317,7 @@ static JfrRepository* _repository = nullptr;
static JfrStackTraceRepository* _stack_trace_repository;
static JfrStringPool* _stringpool = nullptr;
static JfrOSInterface* _os_interface = nullptr;
static JfrThreadSampling* _thread_sampling = nullptr;
static JfrThreadSampler* _thread_sampler = nullptr;
static JfrCheckpointManager* _checkpoint_manager = nullptr;
bool JfrRecorder::create_java_event_writer() {
@ -384,10 +384,10 @@ bool JfrRecorder::create_stringpool() {
return _stringpool != nullptr && _stringpool->initialize();
}
bool JfrRecorder::create_thread_sampling() {
assert(_thread_sampling == nullptr, "invariant");
_thread_sampling = JfrThreadSampling::create();
return _thread_sampling != nullptr;
bool JfrRecorder::create_thread_sampler() {
assert(_thread_sampler == nullptr, "invariant");
_thread_sampler = JfrThreadSampler::create();
return _thread_sampler != nullptr;
}
bool JfrRecorder::create_event_throttler() {
@ -424,15 +424,15 @@ void JfrRecorder::destroy_components() {
JfrOSInterface::destroy();
_os_interface = nullptr;
}
if (_thread_sampling != nullptr) {
JfrThreadSampling::destroy();
_thread_sampling = nullptr;
if (_thread_sampler != nullptr) {
JfrThreadSampler::destroy();
_thread_sampler = nullptr;
}
JfrEventThrottler::destroy();
}
bool JfrRecorder::create_recorder_thread() {
return JfrRecorderThread::start(_checkpoint_manager, _post_box, JavaThread::current());
return JfrRecorderThreadEntry::start(_checkpoint_manager, _post_box, JavaThread::current());
}
void JfrRecorder::destroy() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ class JfrRecorder : public JfrCHeapObj {
static bool create_stacktrace_repository();
static bool create_storage();
static bool create_stringpool();
static bool create_thread_sampling();
static bool create_thread_sampler();
static bool create_event_throttler();
static bool create_components();
static void destroy_components();

View File

@ -34,7 +34,8 @@ constexpr static const JfrSamplerParams _disabled_params = {
false // reconfigure
};
static JfrEventThrottler* _throttler = nullptr;
static JfrEventThrottler* _object_allocation_throttler = nullptr;
static JfrEventThrottler* _safepoint_latency_throttler = nullptr;
JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) :
JfrAdaptiveSampler(),
@ -47,30 +48,49 @@ JfrEventThrottler::JfrEventThrottler(JfrEventId event_id) :
_update(false) {}
bool JfrEventThrottler::create() {
assert(_throttler == nullptr, "invariant");
_throttler = new JfrEventThrottler(JfrObjectAllocationSampleEvent);
return _throttler != nullptr && _throttler->initialize();
assert(_object_allocation_throttler == nullptr, "invariant");
_object_allocation_throttler = new JfrEventThrottler(JfrObjectAllocationSampleEvent);
if (_object_allocation_throttler == nullptr || !_object_allocation_throttler->initialize()) {
return false;
}
assert(_safepoint_latency_throttler == nullptr, "invariant");
_safepoint_latency_throttler = new JfrEventThrottler(JfrSafepointLatencyEvent);
return _safepoint_latency_throttler != nullptr && _safepoint_latency_throttler->initialize();
}
void JfrEventThrottler::destroy() {
delete _throttler;
_throttler = nullptr;
delete _object_allocation_throttler;
_object_allocation_throttler = nullptr;
delete _safepoint_latency_throttler;
_safepoint_latency_throttler = nullptr;
}
// There is currently only one throttler instance, for the jdk.ObjectAllocationSample event.
// When introducing additional throttlers, also add a lookup map keyed by event id.
// There is currently only two throttler instances, one for the jdk.ObjectAllocationSample event
// and another for the SamplingLatency event.
// When introducing many more throttlers, consider adding a lookup map keyed by event id.
JfrEventThrottler* JfrEventThrottler::for_event(JfrEventId event_id) {
assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized");
assert(event_id == JfrObjectAllocationSampleEvent, "Event type has an unconfigured throttler");
return event_id == JfrObjectAllocationSampleEvent ? _throttler : nullptr;
assert(_object_allocation_throttler != nullptr, "ObjectAllocation throttler has not been properly initialized");
assert(_safepoint_latency_throttler != nullptr, "SafepointLatency throttler has not been properly initialized");
assert(event_id == JfrObjectAllocationSampleEvent || event_id == JfrSafepointLatencyEvent, "Event type has an unconfigured throttler");
if (event_id == JfrObjectAllocationSampleEvent) {
return _object_allocation_throttler;
}
if (event_id == JfrSafepointLatencyEvent) {
return _safepoint_latency_throttler;
}
return nullptr;
}
void JfrEventThrottler::configure(JfrEventId event_id, int64_t sample_size, int64_t period_ms) {
if (event_id != JfrObjectAllocationSampleEvent) {
if (event_id == JfrObjectAllocationSampleEvent) {
assert(_object_allocation_throttler != nullptr, "ObjectAllocation throttler has not been properly initialized");
_object_allocation_throttler->configure(sample_size, period_ms);
return;
}
assert(_throttler != nullptr, "JfrEventThrottler has not been properly initialized");
_throttler->configure(sample_size, period_ms);
if (event_id == JfrSafepointLatencyEvent) {
assert(_safepoint_latency_throttler != nullptr, "SafepointLatency throttler has not been properly initialized");
_safepoint_latency_throttler->configure(sample_size, period_ms);
}
}
/*
@ -92,8 +112,8 @@ void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) {
// Predicate for event selection.
bool JfrEventThrottler::accept(JfrEventId event_id, int64_t timestamp /* 0 */) {
JfrEventThrottler* const throttler = for_event(event_id);
if (throttler == nullptr) return true;
return _throttler->_disabled ? true : _throttler->sample(timestamp);
assert(throttler != nullptr, "invariant");
return throttler->_disabled ? true : throttler->sample(timestamp);
}
/*

View File

@ -36,11 +36,19 @@
#include "utilities/preserveException.hpp"
#include "utilities/macros.hpp"
class JfrRecorderThread : public JavaThread {
public:
JfrRecorderThread(ThreadFunction entry_point) : JavaThread(entry_point) {}
virtual ~JfrRecorderThread() {}
virtual bool is_JfrRecorder_thread() const { return true; }
};
static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
assert(thread_oop.not_null(), "invariant");
assert(proc != nullptr, "invariant");
JavaThread* new_thread = new JavaThread(proc);
JfrRecorderThread* new_thread = new JfrRecorderThread(proc);
// At this point it may be possible that no
// osthread was created for the JavaThread due to lack of resources.
@ -54,16 +62,16 @@ static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAP
}
}
JfrPostBox* JfrRecorderThread::_post_box = nullptr;
JfrPostBox* JfrRecorderThreadEntry::_post_box = nullptr;
JfrPostBox& JfrRecorderThread::post_box() {
JfrPostBox& JfrRecorderThreadEntry::post_box() {
return *_post_box;
}
// defined in JfrRecorderThreadLoop.cpp
void recorderthread_entry(JavaThread*, JavaThread*);
bool JfrRecorderThread::start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS) {
bool JfrRecorderThreadEntry::start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS) {
assert(cp_manager != nullptr, "invariant");
assert(post_box != nullptr, "invariant");
_post_box = post_box;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@ class JfrCheckpointManager;
class JfrPostBox;
class Thread;
class JfrRecorderThread : AllStatic {
class JfrRecorderThreadEntry : AllStatic {
private:
static JfrPostBox* _post_box;

View File

@ -46,7 +46,7 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
#define FLUSHPOINT (msgs & (MSGBIT(MSG_FLUSHPOINT)))
#define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
JfrPostBox& post_box = JfrRecorderThread::post_box();
JfrPostBox& post_box = JfrRecorderThreadEntry::post_box();
log_debug(jfr, system)("Recorder thread STARTED");
{

View File

@ -77,9 +77,7 @@ int64_t JfrStackFilterRegistry::add(const JfrStackFilter* filter) {
}
const JfrStackFilter* JfrStackFilterRegistry::lookup(int64_t id) {
if (id < 0) {
return nullptr;
}
assert(id >= 0, "invariant");
assert(range_check(id), "invariant");
return _elements[id];
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/stacktrace/jfrStackFrame.hpp"
#include "jfr/support/jfrMethodLookup.hpp"
#include "oops/method.inline.hpp"
JfrStackFrame::JfrStackFrame() : _klass(nullptr), _methodid(0), _line(0), _bci(0), _type(0) {}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(0), _bci(bci), _type(type) {}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
template <typename Writer>
static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
w.write(methodid);
w.write(static_cast<u4>(line));
w.write(static_cast<u4>(bci));
w.write(static_cast<u8>(type));
}
void JfrStackFrame::write(JfrChunkWriter& cw) const {
write_frame(cw, _methodid, _line, _bci, _type);
}
void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
write_frame(cpw, _methodid, _line, _bci, _type);
}
bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
}
void JfrStackFrame::resolve_lineno() const {
assert(_klass, "no klass pointer");
assert(_line == 0, "already have linenumber");
const Method* const method = JfrMethodLookup::lookup(_klass, _methodid);
assert(method != nullptr, "invariant");
assert(method->method_holder() == _klass, "invariant");
_line = method->line_number_from_bci(_bci);
}

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKFRAME_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKFRAME_HPP
#include "jfr/utilities/jfrTypes.hpp"
class JfrCheckpointWriter;
class JfrChunkWriter;
class InstanceKlass;
class JfrStackFrame {
friend class ObjectSampleCheckpoint;
private:
const InstanceKlass* _klass;
traceid _methodid;
mutable int _line;
int _bci;
u1 _type;
public:
JfrStackFrame();
JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* klass);
JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* klass);
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
enum : u1 {
FRAME_INTERPRETER = 0,
FRAME_JIT,
FRAME_INLINE,
FRAME_NATIVE,
NUM_FRAME_TYPES
};
};
template <typename>
class GrowableArray;
typedef GrowableArray<JfrStackFrame> JfrStackFrames;
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKFRAME_HPP

View File

@ -26,40 +26,37 @@
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
#include "jfr/recorder/stacktrace/jfrVframeStream.inline.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/support/jfrMethodLookup.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfrStackFilter.hpp"
#include "jfrStackFilterRegistry.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTag.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "runtime/continuation.hpp"
#include "runtime/continuationEntry.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/vframe.inline.hpp"
#include "utilities/growableArray.hpp"
static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
static inline void copy_frames(JfrStackFrames* lhs_frames, const JfrStackFrames* rhs_frames) {
assert(lhs_frames != nullptr, "invariant");
assert(rhs_frames != nullptr, "invariant");
if (length > 0) {
*lhs_frames = NEW_C_HEAP_ARRAY(JfrStackFrame, length, mtTracing);
memcpy(*lhs_frames, rhs_frames, length * sizeof(JfrStackFrame));
}
assert(rhs_frames->length() > 0, "invariant");
assert(lhs_frames->capacity() == rhs_frames->length(), "invariant");
assert(lhs_frames->length() == rhs_frames->length(), "invariant");
assert(lhs_frames->capacity() == lhs_frames->length(), "invariant");
memcpy(lhs_frames->adr_at(0), rhs_frames->adr_at(0), rhs_frames->length() * sizeof(JfrStackFrame));
}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(0), _bci(bci), _type(type) {}
JfrStackFrame::JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* ik) :
_klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
JfrStackTrace::JfrStackTrace() :
_next(nullptr),
_frames(frames),
_frames(new JfrStackFrames(JfrOptionSet::stackdepth())), // ResourceArea
_id(0),
_hash(0),
_nr_of_frames(0),
_max_frames(max_frames),
_count(0),
_max_frames(JfrOptionSet::stackdepth()),
_frames_ownership(false),
_reached_root(false),
_lineno(false),
@ -67,237 +64,128 @@ JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
JfrStackTrace::JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next) :
_next(next),
_frames(nullptr),
_frames(new (mtTracing) JfrStackFrames(trace.number_of_frames(), trace.number_of_frames(), mtTracing)), // CHeap
_id(id),
_hash(trace._hash),
_nr_of_frames(trace._nr_of_frames),
_count(trace._count),
_max_frames(trace._max_frames),
_frames_ownership(true),
_reached_root(trace._reached_root),
_lineno(trace._lineno),
_written(false) {
copy_frames(&_frames, trace._nr_of_frames, trace._frames);
copy_frames(_frames, trace._frames);
}
JfrStackTrace::~JfrStackTrace() {
if (_frames_ownership) {
FREE_C_HEAP_ARRAY(JfrStackFrame, _frames);
delete _frames;
}
}
int JfrStackTrace::number_of_frames() const {
assert(_frames != nullptr, "invariant");
return _frames->length();
}
template <typename Writer>
static void write_stacktrace(Writer& w, traceid id, bool reached_root, u4 nr_of_frames, const JfrStackFrame* frames) {
w.write((u8)id);
w.write((u1)!reached_root);
w.write(nr_of_frames);
for (u4 i = 0; i < nr_of_frames; ++i) {
frames[i].write(w);
static void write_stacktrace(Writer& w, traceid id, bool reached_root, const JfrStackFrames* frames) {
w.write(static_cast<u8>(id));
w.write(static_cast<u1>(!reached_root));
const int nr_of_frames = frames->length();
w.write(static_cast<u4>(nr_of_frames));
for (int i = 0; i < nr_of_frames; ++i) {
frames->at(i).write(w);
}
}
void JfrStackTrace::write(JfrChunkWriter& sw) const {
assert(!_written, "invariant");
write_stacktrace(sw, _id, _reached_root, _nr_of_frames, _frames);
write_stacktrace(sw, _id, _reached_root, _frames);
_written = true;
}
void JfrStackTrace::write(JfrCheckpointWriter& cpw) const {
assert(!_written, "invariant");
write_stacktrace(cpw, _id, _reached_root, _nr_of_frames, _frames);
write_stacktrace(cpw, _id, _reached_root, _frames);
_written = true;
}
bool JfrStackFrame::equals(const JfrStackFrame& rhs) const {
return _methodid == rhs._methodid && _bci == rhs._bci && _type == rhs._type;
}
bool JfrStackTrace::equals(const JfrStackTrace& rhs) const {
if (_reached_root != rhs._reached_root || _nr_of_frames != rhs._nr_of_frames || _hash != rhs._hash) {
if (_reached_root != rhs._reached_root || _frames->length() != rhs.number_of_frames() || _hash != rhs._hash) {
return false;
}
for (u4 i = 0; i < _nr_of_frames; ++i) {
if (!_frames[i].equals(rhs._frames[i])) {
for (int i = 0; i < _frames->length(); ++i) {
if (!_frames->at(i).equals(rhs._frames->at(i))) {
return false;
}
}
return true;
}
template <typename Writer>
static void write_frame(Writer& w, traceid methodid, int line, int bci, u1 type) {
w.write((u8)methodid);
w.write((u4)line);
w.write((u4)bci);
w.write((u8)type);
static inline bool is_in_continuation(const frame& frame, JavaThread* jt) {
return JfrThreadLocal::is_vthread(jt) &&
(Continuation::is_frame_in_continuation(jt, frame) || Continuation::is_continuation_enterSpecial(frame));
}
void JfrStackFrame::write(JfrChunkWriter& cw) const {
write_frame(cw, _methodid, _line, _bci, _type);
static inline bool is_interpreter(const JfrSampleRequest& request) {
return request._sample_bcp != nullptr;
}
void JfrStackFrame::write(JfrCheckpointWriter& cpw) const {
write_frame(cpw, _methodid, _line, _bci, _type);
}
class JfrVframeStream : public vframeStreamCommon {
private:
bool _vthread;
const ContinuationEntry* _cont_entry;
bool _async_mode;
bool step_to_sender();
void next_frame();
public:
JfrVframeStream(JavaThread* jt, const frame& fr, bool stop_at_java_call_stub, bool async_mode);
void next_vframe();
};
static RegisterMap::WalkContinuation walk_continuation(JavaThread* jt) {
// NOTE: WalkContinuation::skip, because of interactions with ZGC relocation
// and load barriers. This code is run while generating stack traces for
// the ZPage allocation event, even when ZGC is relocating objects.
// When ZGC is relocating, it is forbidden to run code that performs
// load barriers. With WalkContinuation::include, we visit heap stack
// chunks and could be using load barriers.
return (UseZGC && !StackWatermarkSet::processing_started(jt))
? RegisterMap::WalkContinuation::skip
: RegisterMap::WalkContinuation::include;
}
JfrVframeStream::JfrVframeStream(JavaThread* jt, const frame& fr, bool stop_at_java_call_stub, bool async_mode) :
vframeStreamCommon(jt,
RegisterMap::UpdateMap::skip,
RegisterMap::ProcessFrames::skip,
walk_continuation(jt)),
_vthread(JfrThreadLocal::is_vthread(jt)),
_cont_entry(_vthread ? jt->last_continuation() : nullptr),
_async_mode(async_mode) {
assert(!_vthread || _cont_entry != nullptr, "invariant");
_reg_map.set_async(async_mode);
_frame = fr;
_stop_at_java_call_stub = stop_at_java_call_stub;
while (!fill_from_frame()) {
step_to_sender();
}
}
inline bool JfrVframeStream::step_to_sender() {
if (_async_mode && !_frame.safe_for_sender(_thread)) {
_mode = at_end_mode;
return false;
}
_frame = _frame.sender(&_reg_map);
return true;
}
inline void JfrVframeStream::next_frame() {
static constexpr const u4 loop_max = MAX_STACK_DEPTH * 2;
u4 loop_count = 0;
do {
if (_vthread && Continuation::is_continuation_enterSpecial(_frame)) {
if (_cont_entry->is_virtual_thread()) {
// An entry of a vthread continuation is a termination point.
_mode = at_end_mode;
break;
}
_cont_entry = _cont_entry->parent();
}
if (_async_mode) {
++loop_count;
if (loop_count > loop_max) {
_mode = at_end_mode;
break;
}
}
} while (step_to_sender() && !fill_from_frame());
}
// Solaris SPARC Compiler1 needs an additional check on the grandparent
// of the top_frame when the parent of the top_frame is interpreted and
// the grandparent is compiled. However, in this method we do not know
// the relationship of the current _frame relative to the top_frame so
// we implement a more broad sanity check. When the previous callee is
// interpreted and the current sender is compiled, we verify that the
// current sender is also walkable. If it is not walkable, then we mark
// the current vframeStream as at the end.
void JfrVframeStream::next_vframe() {
// handle frames with inlining
if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) {
return;
}
next_frame();
}
static const size_t min_valid_free_size_bytes = 16;
static inline bool is_full(const JfrBuffer* enqueue_buffer) {
return enqueue_buffer->free_size() < min_valid_free_size_bytes;
}
bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
assert(jt != nullptr, "invariant");
assert(!_lineno, "invariant");
Thread* current_thread = Thread::current();
assert(current_thread->is_JfrSampler_thread(), "invariant");
assert(jt != current_thread, "invariant");
// Explicitly monitor the available space of the thread-local buffer used for enqueuing klasses as part of tagging methods.
// We do this because if space becomes sparse, we cannot rely on the implicit allocation of a new buffer as part of the
// regular tag mechanism. If the free list is empty, a malloc could result, and the problem with that is that the thread
// we have suspended could be the holder of the malloc lock. If there is no more available space, the attempt is aborted.
const JfrBuffer* const enqueue_buffer = JfrTraceIdLoadBarrier::get_sampler_enqueue_buffer(current_thread);
HandleMark hm(current_thread); // RegisterMap uses Handles to support continuations.
JfrVframeStream vfs(jt, frame, false, true);
u4 count = 0;
_reached_root = true;
void JfrStackTrace::record_interpreter_top_frame(const JfrSampleRequest& request) {
assert(_hash == 0, "invariant");
assert(_count == 0, "invariant");
assert(_frames != nullptr, "invariant");
assert(_frames->length() == 0, "invariant");
_hash = 1;
while (!vfs.at_end()) {
if (count >= _max_frames) {
_reached_root = false;
break;
}
const Method* method = vfs.method();
if (!Method::is_valid_method(method) || is_full(enqueue_buffer)) {
// we throw away everything we've gathered in this sample since
// none of it is safe
return false;
}
const traceid mid = JfrTraceId::load(method);
u1 type = vfs.is_interpreted_frame() ? JfrStackFrame::FRAME_INTERPRETER : JfrStackFrame::FRAME_JIT;
int bci = 0;
if (method->is_native()) {
type = JfrStackFrame::FRAME_NATIVE;
} else {
bci = vfs.bci();
}
intptr_t* frame_id = vfs.frame_id();
vfs.next_vframe();
if (type == JfrStackFrame::FRAME_JIT && !vfs.at_end() && frame_id == vfs.frame_id()) {
// This frame and the caller frame are both the same physical
// frame, so this frame is inlined into the caller.
type = JfrStackFrame::FRAME_INLINE;
}
_hash = (_hash * 31) + mid;
_hash = (_hash * 31) + bci;
_hash = (_hash * 31) + type;
_frames[count] = JfrStackFrame(mid, bci, type, method->line_number_from_bci(bci), method->method_holder());
count++;
}
_lineno = true;
_nr_of_frames = count;
return count > 0;
const Method* method = reinterpret_cast<Method*>(request._sample_pc);
assert(method != nullptr, "invariant");
const traceid mid = JfrTraceId::load(method);
const int bci = method->is_native() ? 0 : method->bci_from(reinterpret_cast<address>(request._sample_bcp));
const u1 type = method->is_native() ? JfrStackFrame::FRAME_NATIVE : JfrStackFrame::FRAME_INTERPRETER;
_hash = (_hash * 31) + mid;
_hash = (_hash * 31) + bci;
_hash = (_hash * 31) + type;
_frames->append(JfrStackFrame(mid, bci, type, method->method_holder()));
_count++;
}
bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip, int64_t stack_filter_id) {
bool JfrStackTrace::record(JavaThread* jt, const frame& frame, bool in_continuation, const JfrSampleRequest& request) {
if (is_interpreter(request)) {
record_interpreter_top_frame(request);
if (frame.pc() == nullptr) {
// No sender frame. Done.
return true;
}
}
return record(jt, frame, in_continuation, 0);
}
bool JfrStackTrace::record(JavaThread* jt, int skip, int64_t stack_filter_id) {
assert(jt != nullptr, "invariant");
assert(jt == Thread::current(), "invariant");
assert(jt->thread_state() != _thread_in_native, "invariant");
assert(!_lineno, "invariant");
assert(jt == JavaThread::current(), "invariant");
if (!jt->has_last_Java_frame()) {
return false;
}
const frame last_frame = jt->last_frame();
return record(jt, last_frame, is_in_continuation(last_frame, jt), skip, stack_filter_id);
}
bool JfrStackTrace::record(JavaThread* jt, const frame& frame, bool in_continuation, int skip, int64_t stack_filter_id /* -1 */) {
// Must use ResetNoHandleMark here to bypass if any NoHandleMark exist on stack.
// This is because RegisterMap uses Handles to support continuations.
ResetNoHandleMark rnhm;
HandleMark hm(jt);
JfrVframeStream vfs(jt, frame, false, false);
u4 count = 0;
return record_inner(jt, frame, in_continuation, skip, stack_filter_id);
}
bool JfrStackTrace::record_inner(JavaThread* jt, const frame& frame, bool in_continuation, int skip, int64_t stack_filter_id /* -1 */) {
assert(jt != nullptr, "invariant");
assert(!_lineno, "invariant");
assert(_frames != nullptr, "invariant");
assert(_frames->length() == 0 || _frames->length() == 1, "invariant");
assert(!in_continuation || is_in_continuation(frame, jt), "invariant");
Thread* const current_thread = Thread::current();
HandleMark hm(current_thread); // RegisterMap uses Handles to support continuations.
JfrVframeStream vfs(jt, frame, in_continuation, false);
_reached_root = true;
for (int i = 0; i < skip; ++i) {
if (vfs.at_end()) {
@ -305,10 +193,12 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip, int64_t
}
vfs.next_vframe();
}
const JfrStackFilter* stack_filter = JfrStackFilterRegistry::lookup(stack_filter_id);
_hash = 1;
const JfrStackFilter* stack_filter = stack_filter_id < 0 ? nullptr : JfrStackFilterRegistry::lookup(stack_filter_id);
if (_hash == 0) {
_hash = 1;
}
while (!vfs.at_end()) {
if (count >= _max_frames) {
if (_count >= _max_frames) {
_reached_root = false;
break;
}
@ -328,7 +218,7 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip, int64_t
bci = vfs.bci();
}
intptr_t* frame_id = vfs.frame_id();
const intptr_t* const frame_id = vfs.frame_id();
vfs.next_vframe();
if (type == JfrStackFrame::FRAME_JIT && !vfs.at_end() && frame_id == vfs.frame_id()) {
// This frame and the caller frame are both the same physical
@ -338,35 +228,16 @@ bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip, int64_t
_hash = (_hash * 31) + mid;
_hash = (_hash * 31) + bci;
_hash = (_hash * 31) + type;
_frames[count] = JfrStackFrame(mid, bci, type, method->method_holder());
count++;
_frames->append(JfrStackFrame(mid, bci, type, method->method_holder()));
_count++;
}
_nr_of_frames = count;
return count > 0;
}
bool JfrStackTrace::record(JavaThread* current_thread, int skip, int64_t stack_filter_id) {
assert(current_thread != nullptr, "invariant");
assert(current_thread == Thread::current(), "invariant");
if (!current_thread->has_last_Java_frame()) {
return false;
}
return record(current_thread, current_thread->last_frame(), skip, stack_filter_id);
}
void JfrStackFrame::resolve_lineno() const {
assert(_klass, "no klass pointer");
assert(_line == 0, "already have linenumber");
const Method* const method = JfrMethodLookup::lookup(_klass, _methodid);
assert(method != nullptr, "invariant");
assert(method->method_holder() == _klass, "invariant");
_line = method->line_number_from_bci(_bci);
return _count > 0;
}
void JfrStackTrace::resolve_linenos() const {
assert(!_lineno, "invariant");
for (unsigned int i = 0; i < _nr_of_frames; i++) {
_frames[i].resolve_lineno();
for (int i = 0; i < _frames->length(); i++) {
_frames->at(i).resolve_lineno();
}
_lineno = true;
}

View File

@ -25,6 +25,7 @@
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRSTACKTRACE_HPP
#include "jfr/recorder/stacktrace/jfrStackFrame.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
@ -33,48 +34,22 @@ class InstanceKlass;
class JavaThread;
class JfrCheckpointWriter;
class JfrChunkWriter;
class JfrStackFrame {
friend class ObjectSampleCheckpoint;
private:
const InstanceKlass* _klass;
traceid _methodid;
mutable int _line;
int _bci;
u1 _type;
public:
JfrStackFrame(const traceid& id, int bci, u1 type, const InstanceKlass* klass);
JfrStackFrame(const traceid& id, int bci, u1 type, int lineno, const InstanceKlass* klass);
bool equals(const JfrStackFrame& rhs) const;
void write(JfrChunkWriter& cw) const;
void write(JfrCheckpointWriter& cpw) const;
void resolve_lineno() const;
enum : u1 {
FRAME_INTERPRETER = 0,
FRAME_JIT,
FRAME_INLINE,
FRAME_NATIVE,
NUM_FRAME_TYPES
};
};
struct JfrSampleRequest;
class JfrStackTrace : public JfrCHeapObj {
friend class JfrNativeSamplerCallback;
friend class JfrStackTraceRepository;
friend class LeakProfilerStackTraceWriter;
friend class JfrThreadSampling;
friend class ObjectSampleCheckpoint;
friend class ObjectSampler;
friend class OSThreadSampler;
friend class StackTraceResolver;
private:
const JfrStackTrace* _next;
JfrStackFrame* _frames;
JfrStackFrames* _frames;
traceid _id;
traceid _hash;
u4 _nr_of_frames;
u4 _count;
u4 _max_frames;
bool _frames_ownership;
bool _reached_root;
@ -88,25 +63,29 @@ class JfrStackTrace : public JfrCHeapObj {
bool equals(const JfrStackTrace& rhs) const;
void set_id(traceid id) { _id = id; }
void set_nr_of_frames(u4 nr_of_frames) { _nr_of_frames = nr_of_frames; }
void set_hash(unsigned int hash) { _hash = hash; }
void set_reached_root(bool reached_root) { _reached_root = reached_root; }
void resolve_linenos() const;
bool record(JavaThread* current_thread, int skip, int64_t stack_frame_id);
bool record(JavaThread* current_thread, const frame& frame, int skip, int64_t stack_frame_id);
bool record_async(JavaThread* other_thread, const frame& frame);
int number_of_frames() const;
bool have_lineno() const { return _lineno; }
bool full_stacktrace() const { return _reached_root; }
bool record_inner(JavaThread* jt, const frame& frame, bool in_continuation, int skip, int64_t stack_filter_id = -1);
bool record(JavaThread* jt, const frame& frame, bool in_continuation, int skip, int64_t stack_filter_id = -1);
void record_interpreter_top_frame(const JfrSampleRequest& request);
JfrStackTrace(traceid id, const JfrStackTrace& trace, const JfrStackTrace* next);
JfrStackTrace(JfrStackFrame* frames, u4 max_frames);
~JfrStackTrace();
public:
// ResourceArea allocation, remember ResourceMark.
JfrStackTrace();
~JfrStackTrace();
traceid hash() const { return _hash; }
traceid id() const { return _id; }
bool record(JavaThread* current_thread, int skip, int64_t stack_filter_id);
bool record(JavaThread* jt, const frame& frame, bool in_continuation, const JfrSampleRequest& request);
bool should_write() const { return !_written; }
};

View File

@ -25,6 +25,7 @@
#include "jfr/metadata/jfrSerializer.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/mutexLocker.hpp"
@ -65,7 +66,7 @@ JfrStackTraceRepository* JfrStackTraceRepository::create() {
return _instance;
}
class JfrFrameType : public JfrSerializer {
class JfrFrameTypeSerializer : public JfrSerializer {
public:
void serialize(JfrCheckpointWriter& writer) {
writer.write_count(JfrStackFrame::NUM_FRAME_TYPES);
@ -81,7 +82,7 @@ class JfrFrameType : public JfrSerializer {
};
bool JfrStackTraceRepository::initialize() {
return JfrSerializer::register_serializer(TYPE_FRAMETYPE, true, new JfrFrameType());
return JfrSerializer::register_serializer(TYPE_FRAMETYPE, true, new JfrFrameTypeSerializer());
}
void JfrStackTraceRepository::destroy() {
@ -150,19 +151,9 @@ traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */
if (!current_thread->is_Java_thread() || current_thread->is_hidden_from_external_view()) {
return 0;
}
JfrStackFrame* frames = tl->stackframes();
if (frames == nullptr) {
// pending oom
return 0;
}
assert(frames != nullptr, "invariant");
assert(tl->stackframes() == frames, "invariant");
return instance().record(JavaThread::cast(current_thread), skip, stack_filter_id, frames, tl->stackdepth());
}
traceid JfrStackTraceRepository::record(JavaThread* current_thread, int skip, int64_t stack_filter_id, JfrStackFrame *frames, u4 max_frames) {
JfrStackTrace stacktrace(frames, max_frames);
return stacktrace.record(current_thread, skip, stack_filter_id) ? add(instance(), stacktrace) : 0;
ResourceMark rm(current_thread);
JfrStackTrace stacktrace;
return stacktrace.record(JavaThread::cast(current_thread), skip, stack_filter_id) ? add(instance(), stacktrace) : 0;
}
traceid JfrStackTraceRepository::add(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) {
@ -185,7 +176,8 @@ void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_threa
JfrThreadLocal* const tl = current_thread->jfr_thread_local();
assert(tl != nullptr, "invariant");
assert(!tl->has_cached_stack_trace(), "invariant");
JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
ResourceMark rm(current_thread);
JfrStackTrace stacktrace;
stacktrace.record(current_thread, skip, -1);
const traceid hash = stacktrace.hash();
if (hash != 0) {
@ -195,7 +187,7 @@ void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_threa
traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
assert(stacktrace._nr_of_frames > 0, "invariant");
assert(stacktrace.number_of_frames() > 0, "invariant");
const size_t index = stacktrace._hash % TABLE_SIZE;
const JfrStackTrace* table_entry = _table[index];

View File

@ -31,12 +31,14 @@
class JavaThread;
class JfrChunkWriter;
class JfrStackTrace;
class JfrStackTraceRepository : public JfrCHeapObj {
friend class JfrDeprecatedEdge;
friend class JfrRecorder;
friend class JfrRecorderService;
friend class JfrThreadSampleClosure;
friend class JfrThreadSampler;
friend class ObjectSampleCheckpoint;
friend class ObjectSampler;
friend class RecordStackTrace;
@ -68,13 +70,11 @@ class JfrStackTraceRepository : public JfrCHeapObj {
static void iterate_leakprofiler(Callback& cb);
static traceid next_id();
traceid add_trace(const JfrStackTrace& stacktrace);
static traceid add(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace);
static traceid add(const JfrStackTrace& stacktrace);
traceid record(JavaThread* current_thread, int skip, int64_t stack_filter_id, JfrStackFrame* frames, u4 max_frames);
traceid add_trace(const JfrStackTrace& stacktrace);
public:
static traceid add(const JfrStackTrace& stacktrace);
static traceid record(Thread* current_thread, int skip = 0, int64_t stack_filter_id = -1);
};

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "jfr/recorder/stacktrace/jfrVframeStream.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/registerMap.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
static inline RegisterMap::WalkContinuation walk_continuation(JavaThread* jt) {
// NOTE: WalkContinuation::skip, because of interactions with ZGC relocation
// and load barriers. This code is run while generating stack traces for
// the ZPage allocation event, even when ZGC is relocating objects.
// When ZGC is relocating, it is forbidden to run code that performs
// load barriers. With WalkContinuation::include, we visit heap stack
// chunks and could be using load barriers.
//
// NOTE: Shenandoah GC also seems to require this check - actual details as to why
// is unknown but to be filled in by others.
return ((UseZGC || UseShenandoahGC) && !StackWatermarkSet::processing_started(jt))
? RegisterMap::WalkContinuation::skip
: RegisterMap::WalkContinuation::include;
}
JfrVframeStream::JfrVframeStream(JavaThread* jt, const frame& fr, bool in_continuation, bool stop_at_java_call_stub) :
vframeStreamCommon(jt, RegisterMap::UpdateMap::skip, RegisterMap::ProcessFrames::skip, walk_continuation(jt)),
_vthread(in_continuation), _cont_entry(_vthread ? jt->last_continuation() : nullptr) {
assert(!_vthread || JfrThreadLocal::is_vthread(jt), "invariant");
assert(!_vthread || _cont_entry != nullptr, "invariant");
_frame = fr;
_stop_at_java_call_stub = stop_at_java_call_stub;
while (!fill_from_frame()) {
_frame = _frame.sender(&_reg_map);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,10 +22,20 @@
*
*/
#include "runtime/atomic.hpp"
#include "runtime/suspendedThreadTask.hpp"
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_HPP
void SuspendedThreadTask::run() {
internal_do_task();
_done = true;
}
#include "runtime/vframe.hpp"
class JfrVframeStream : public vframeStreamCommon {
private:
bool _vthread;
const ContinuationEntry* _cont_entry;
void step_to_sender();
void next_frame();
public:
JfrVframeStream(JavaThread* jt, const frame& fr, bool in_continuation, bool stop_at_java_call_stub);
void next_vframe();
};
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_HPP

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_INLINE_HPP
#define SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_INLINE_HPP
#include "jfr/recorder/stacktrace/jfrVframeStream.hpp"
#include "runtime/continuationEntry.inline.hpp"
#include "runtime/vframe.inline.hpp"
inline void JfrVframeStream::next_frame() {
do {
if (_vthread && Continuation::is_continuation_enterSpecial(_frame)) {
if (_cont_entry->is_virtual_thread()) {
// An entry of a vthread continuation is a termination point.
_mode = at_end_mode;
break;
}
_cont_entry = _cont_entry->parent();
}
_frame = _frame.sender(&_reg_map);
} while (!fill_from_frame());
}
inline void JfrVframeStream::next_vframe() {
// handle frames with inlining
if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) {
return;
}
next_frame();
}
#endif // SHARE_JFR_RECORDER_STACKTRACE_JFRVFRAMESTREAM_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,6 @@
#ifndef SHARE_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
#define SHARE_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/storage/jfrBuffer.hpp"
#include "jfr/support/jfrThreadId.hpp"
@ -65,6 +64,10 @@
#define THREAD_LOCAL_WRITER_OFFSET_JFR \
JfrThreadLocal::java_event_writer_offset() + THREAD_LOCAL_OFFSET_JFR
#define SUSPEND_THREAD_CONDITIONAL(thread) if ((thread)->is_trace_suspend()) JfrThreadSampling::on_javathread_suspend(thread)
#define SAMPLE_STATE_OFFSET_JFR \
JfrThreadLocal::sample_state_offset() + THREAD_LOCAL_OFFSET_JFR
#define SAMPLING_CRITICAL_SECTION_OFFSET_JFR \
JfrThreadLocal::sampling_critical_section_offset() + THREAD_LOCAL_OFFSET_JFR
#endif // SHARE_JFR_SUPPORT_JFRTHREADEXTENSION_HPP

View File

@ -46,6 +46,9 @@
#include "utilities/sizes.hpp"
JfrThreadLocal::JfrThreadLocal() :
_sample_request(),
_sample_request_queue(8),
_sample_monitor(Monitor::nosafepoint, "jfr thread sample monitor"),
_java_event_writer(nullptr),
_java_buffer(nullptr),
_native_buffer(nullptr),
@ -54,7 +57,7 @@ JfrThreadLocal::JfrThreadLocal() :
_load_barrier_buffer_epoch_1(nullptr),
_checkpoint_buffer_epoch_0(nullptr),
_checkpoint_buffer_epoch_1(nullptr),
_stackframes(nullptr),
_sample_state(0),
_dcmd_arena(nullptr),
_thread(),
_vthread_id(0),
@ -68,12 +71,11 @@ JfrThreadLocal::JfrThreadLocal() :
_user_time(0),
_cpu_time(0),
_wallclock_time(os::javaTimeNanos()),
_stackdepth(0),
_entering_suspend_flag(0),
_non_reentrant_nesting(0),
_vthread_epoch(0),
_vthread_excluded(false),
_jvm_thread_excluded(false),
_enqueued_requests(false),
_vthread(false),
_notified(false),
_dead(false) {
@ -165,10 +167,6 @@ void JfrThreadLocal::release(Thread* t) {
JfrStorage::release_thread_local(java_buffer(), t);
_java_buffer = nullptr;
}
if (_stackframes != nullptr) {
FREE_C_HEAP_ARRAY(JfrStackFrame, _stackframes);
_stackframes = nullptr;
}
if (_load_barrier_buffer_epoch_0 != nullptr) {
_load_barrier_buffer_epoch_0->set_retired();
_load_barrier_buffer_epoch_0 = nullptr;
@ -245,12 +243,6 @@ JfrBuffer* JfrThreadLocal::install_java_buffer() const {
return _java_buffer;
}
JfrStackFrame* JfrThreadLocal::install_stackframes() const {
assert(_stackframes == nullptr, "invariant");
_stackframes = NEW_C_HEAP_ARRAY(JfrStackFrame, stackdepth(), mtTracing);
return _stackframes;
}
ByteSize JfrThreadLocal::java_event_writer_offset() {
return byte_offset_of(JfrThreadLocal, _java_event_writer);
}
@ -279,6 +271,14 @@ ByteSize JfrThreadLocal::notified_offset() {
return byte_offset_of(JfrThreadLocal, _notified);
}
ByteSize JfrThreadLocal::sample_state_offset() {
return byte_offset_of(JfrThreadLocal, _sample_state);
}
ByteSize JfrThreadLocal::sampling_critical_section_offset() {
return byte_offset_of(JfrThreadLocal, _sampling_critical_section);
}
void JfrThreadLocal::set(bool* exclusion_field, bool state) {
assert(exclusion_field != nullptr, "invariant");
*exclusion_field = state;
@ -337,10 +337,6 @@ bool JfrThreadLocal::is_included(const Thread* t) {
return t->jfr_thread_local()->is_included();
}
u4 JfrThreadLocal::stackdepth() const {
return _stackdepth != 0 ? _stackdepth : (u4)JfrOptionSet::stackdepth();
}
bool JfrThreadLocal::is_impersonating(const Thread* t) {
return t->jfr_thread_local()->_thread_id_alias != max_julong;
}
@ -397,6 +393,19 @@ traceid JfrThreadLocal::vthread_id(const Thread* t) {
return Atomic::load(&t->jfr_thread_local()->_vthread_id);
}
traceid JfrThreadLocal::vthread_id_with_epoch_update(const JavaThread* jt) const {
assert(is_vthread(jt), "invariant");
const traceid tid = vthread_id(jt);
assert(tid != 0, "invariant");
if (!is_vthread_excluded()) {
const u2 current_epoch = AccessThreadTraceId::current_epoch();
if (vthread_epoch(jt) != current_epoch) {
set_vthread_epoch_checked(jt, tid, current_epoch);
}
}
return tid;
}
u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) {
assert(jt != nullptr, "invariant");
return Atomic::load(&jt->jfr_thread_local()->_vthread_epoch);
@ -412,19 +421,7 @@ traceid JfrThreadLocal::thread_id(const Thread* t) {
return jvm_thread_id(tl);
}
const JavaThread* jt = JavaThread::cast(t);
if (!is_vthread(jt)) {
return jvm_thread_id(tl);
}
// virtual thread
const traceid tid = vthread_id(jt);
assert(tid != 0, "invariant");
if (!tl->is_vthread_excluded()) {
const u2 current_epoch = AccessThreadTraceId::current_epoch();
if (vthread_epoch(jt) != current_epoch) {
set_vthread_epoch_checked(jt, tid, current_epoch);
}
}
return tid;
return is_vthread(jt) ? tl->vthread_id_with_epoch_update(jt) : jvm_thread_id(tl);
}
// When not recording, there is no checkpoint system

View File

@ -25,13 +25,17 @@
#ifndef SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
#define SHARE_JFR_SUPPORT_JFRTHREADLOCAL_HPP
#include "jfr/periodic/sampling/jfrSampleRequest.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrBlob.hpp"
#include "jfr/utilities/jfrTime.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/atomic.hpp"
#include "runtime/mutexLocker.hpp"
class Arena;
class JavaThread;
class JfrBuffer;
class JfrStackFrame;
class Thread;
class JfrThreadLocal {
@ -40,6 +44,9 @@ class JfrThreadLocal {
friend class JfrJavaSupport;
friend class JVMCIVMStructs;
private:
mutable JfrSampleRequest _sample_request;
JfrSampleRequestQueue _sample_request_queue;
Monitor _sample_monitor;
jobject _java_event_writer;
mutable JfrBuffer* _java_buffer;
mutable JfrBuffer* _native_buffer;
@ -48,7 +55,7 @@ class JfrThreadLocal {
JfrBuffer* _load_barrier_buffer_epoch_1;
JfrBuffer* _checkpoint_buffer_epoch_0;
JfrBuffer* _checkpoint_buffer_epoch_1;
mutable JfrStackFrame* _stackframes;
volatile int _sample_state;
Arena* _dcmd_arena;
JfrBlobHandle _thread;
mutable traceid _vthread_id;
@ -62,19 +69,18 @@ class JfrThreadLocal {
jlong _user_time;
jlong _cpu_time;
jlong _wallclock_time;
mutable u4 _stackdepth;
volatile jint _entering_suspend_flag;
int32_t _non_reentrant_nesting;
u2 _vthread_epoch;
bool _vthread_excluded;
bool _jvm_thread_excluded;
volatile bool _enqueued_requests;
bool _vthread;
bool _notified;
bool _dead;
bool _sampling_critical_section;
JfrBuffer* install_native_buffer() const;
JfrBuffer* install_java_buffer() const;
JfrStackFrame* install_stackframes() const;
void release(Thread* t);
static void release(JfrThreadLocal* tl, Thread* t);
static void initialize_main_thread(JavaThread* jt);
@ -140,18 +146,78 @@ class JfrThreadLocal {
_java_event_writer = java_event_writer;
}
JfrStackFrame* stackframes() const {
return _stackframes != nullptr ? _stackframes : install_stackframes();
int sample_state() const {
return Atomic::load_acquire(&_sample_state);
}
void set_stackframes(JfrStackFrame* frames) {
_stackframes = frames;
void set_sample_state(int state) {
Atomic::release_store(&_sample_state, state);
}
u4 stackdepth() const;
Monitor* sample_monitor() {
return &_sample_monitor;
}
void set_stackdepth(u4 depth) {
_stackdepth = depth;
JfrSampleRequestQueue* sample_requests() {
return &_sample_request_queue;
}
JfrSampleRequest sample_request() const {
return _sample_request;
}
void set_sample_request(JfrSampleRequest request) {
_sample_request = request;
}
void set_sample_ticks() {
_sample_request._sample_ticks = JfrTicks::now();
}
void set_sample_ticks(const JfrTicks& ticks) {
_sample_request._sample_ticks = ticks;
}
bool has_sample_ticks() const {
return _sample_request._sample_ticks.value() != 0;
}
const JfrTicks& sample_ticks() const {
return _sample_request._sample_ticks;
}
bool has_enqueued_requests() const {
return Atomic::load_acquire(&_enqueued_requests);
}
void enqueue_request() {
assert_lock_strong(sample_monitor());
assert(sample_state() == JAVA_SAMPLE, "invariant");
if (_sample_request_queue.append(_sample_request) == 0) {
Atomic::release_store(&_enqueued_requests, true);
}
set_sample_state(NO_SAMPLE);
}
void clear_enqueued_requests() {
assert_lock_strong(sample_monitor());
assert(has_enqueued_requests(), "invariant");
assert(_sample_request_queue.is_nonempty(), "invariant");
_sample_request_queue.clear();
Atomic::release_store(&_enqueued_requests, false);
}
bool has_native_sample_request() const {
return sample_state() == NATIVE_SAMPLE;
}
bool has_java_sample_request() const {
return sample_state() == JAVA_SAMPLE || has_enqueued_requests();
}
bool has_sample_request() const {
return sample_state() != NO_SAMPLE || has_enqueued_requests();
}
int64_t last_allocated_bytes() const {
@ -171,6 +237,7 @@ class JfrThreadLocal {
static traceid thread_id(const Thread* t);
static bool is_vthread(const JavaThread* jt);
static u2 vthread_epoch(const JavaThread* jt);
traceid vthread_id_with_epoch_update(const JavaThread* jt) const;
// Exposed to external code that use a thread id unconditionally.
// Jfr might not even be running.
@ -211,18 +278,6 @@ class JfrThreadLocal {
return _stack_trace_hash;
}
void set_trace_block() {
_entering_suspend_flag = 1;
}
void clear_trace_block() {
_entering_suspend_flag = 0;
}
bool is_trace_block() const {
return _entering_suspend_flag != 0;
}
u8 data_lost() const {
return _data_lost;
}
@ -269,6 +324,10 @@ class JfrThreadLocal {
return _dead;
}
bool in_sampling_critical_section() const {
return _sampling_critical_section;
}
static int32_t make_non_reentrant(Thread* thread);
static void make_reentrant(Thread* thread, int32_t previous_nesting);
@ -297,6 +356,8 @@ class JfrThreadLocal {
static ByteSize vthread_epoch_offset();
static ByteSize vthread_excluded_offset();
static ByteSize notified_offset();
static ByteSize sample_state_offset();
static ByteSize sampling_critical_section_offset();
friend class JfrJavaThread;
friend class JfrCheckpointManager;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,6 +56,7 @@
JFR_LOG_TAG(jfr, system, streaming) \
JFR_LOG_TAG(jfr, system, throttle) \
JFR_LOG_TAG(jfr, system, periodic) \
JFR_LOG_TAG(jfr, system, sampling) \
JFR_LOG_TAG(jfr, periodic) \
JFR_LOG_TAG(jfr, metadata) \
JFR_LOG_TAG(jfr, event) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,11 +42,11 @@ class ContinuationEntry {
friend class JVMCIVMStructs;
ContinuationEntryPD _pd;
#ifdef ASSERT
private:
private:
static const int COOKIE_VALUE = 0x1234;
int cookie;
public:
public:
static int cookie_value() { return COOKIE_VALUE; }
static ByteSize cookie_offset() { return byte_offset_of(ContinuationEntry, cookie); }
@ -55,7 +55,7 @@ public:
}
#endif
public:
public:
static int _return_pc_offset; // friend gen_continuation_enter
static int _thaw_call_pc_offset;
static int _cleanup_offset;
@ -63,14 +63,14 @@ public:
static void set_enter_code(nmethod* nm, int interpreted_entry_offset);
static bool is_interpreted_call(address call_address);
private:
private:
static address _return_pc;
static address _thaw_call_pc;
static address _cleanup_pc;
static nmethod* _enter_special;
static int _interpreted_entry_offset;
private:
private:
ContinuationEntry* _parent;
oopDesc* _cont;
oopDesc* _chunk;
@ -86,7 +86,7 @@ private:
#endif
uint32_t _pin_count;
public:
public:
static ByteSize parent_offset() { return byte_offset_of(ContinuationEntry, _parent); }
static ByteSize cont_offset() { return byte_offset_of(ContinuationEntry, _cont); }
static ByteSize chunk_offset() { return byte_offset_of(ContinuationEntry, _chunk); }
@ -96,7 +96,10 @@ public:
static ByteSize parent_cont_fastpath_offset() { return byte_offset_of(ContinuationEntry, _parent_cont_fastpath); }
static ByteSize parent_held_monitor_count_offset() { return byte_offset_of(ContinuationEntry, _parent_held_monitor_count); }
public:
static address return_pc() { return _return_pc; }
static address return_pc_address() { return (address)&_return_pc; }
public:
static size_t size() { return align_up((int)sizeof(ContinuationEntry), 2*wordSize); }
ContinuationEntry* parent() const { return _parent; }

View File

@ -70,6 +70,9 @@
#if INCLUDE_ZGC
#include "gc/z/zStackChunkGCData.inline.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/jfr.inline.hpp"
#endif
#include <type_traits>
@ -608,6 +611,7 @@ void FreezeBase::unwind_frames() {
ContinuationEntry* entry = _cont.entry();
entry->flush_stack_processing(_thread);
assert_frames_in_continuation_are_safe(_thread);
JFR_ONLY(Jfr::check_and_process_sample_request(_thread);)
assert(LockingMode != LM_LEGACY || !monitors_on_stack(_thread), "unexpected monitors on stack");
set_anchor_to_entry(_thread, entry);
}

View File

@ -101,6 +101,7 @@
#include "utilities/preserveException.hpp"
#include "utilities/xmlstream.hpp"
#if INCLUDE_JFR
#include "jfr/jfr.inline.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/metadata/jfrSerializer.hpp"
#endif
@ -473,6 +474,7 @@ bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArr
// This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) {
JFR_ONLY(Jfr::check_and_process_sample_request(current);)
// When we get here we are about to unwind the deoptee frame. In order to
// catch not yet safe to use frames, the following stack watermark barrier
// poll will make such frames safe to use.

View File

@ -229,7 +229,15 @@ void frame::set_pc(address newpc) {
_deopt_state = unknown;
_pc = newpc;
_cb = CodeCache::find_blob(_pc);
}
// This is optimized for intra-blob pc adjustments only.
void frame::adjust_pc(address newpc) {
assert(_cb != nullptr, "invariant");
assert(_cb == CodeCache::find_blob(newpc), "invariant");
// Unsafe to use the is_deoptimized tester after changing pc
_deopt_state = unknown;
_pc = newpc;
}
// type testers

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -127,6 +127,7 @@ class frame {
address get_deopt_original_pc() const;
void set_pc(address newpc);
void adjust_pc(address newpc);
intptr_t* sp() const { assert_absolute(); return _sp; }
void set_sp( intptr_t* newsp ) { _sp = newsp; }
@ -505,6 +506,18 @@ class frame {
// assert(frame::verify_return_pc(return_address), "must be a return pc");
#endif
#if INCLUDE_JFR
// Static helper routines
static address interpreter_bcp(const intptr_t* fp);
static address interpreter_return_address(const intptr_t* fp);
static intptr_t* interpreter_sender_sp(const intptr_t* fp);
static bool is_interpreter_frame_setup_at(const intptr_t* fp, const void* sp);
static intptr_t* sender_sp(intptr_t* fp);
static intptr_t* link(const intptr_t* fp);
static address return_address(const intptr_t* sp);
static intptr_t* fp(const intptr_t* sp);
#endif
#include CPU_HEADER(frame)
};

View File

@ -1078,7 +1078,6 @@ void JavaThread::handle_special_runtime_exit_condition() {
frame_anchor()->make_walkable();
wait_for_object_deoptimization();
}
JFR_ONLY(SUSPEND_THREAD_CONDITIONAL(this);)
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -216,7 +216,6 @@ class JavaThread: public Thread {
enum SuspendFlags {
// NOTE: avoid using the sign-bit as cc generates different test code
// when the sign-bit is used, and sometimes incorrectly - see CR 6398077
_trace_flag = 0x00000004U, // call tracing backend
_obj_deopt = 0x00000008U // suspend for object reallocation and relocking for JVMTI agent
};
@ -227,11 +226,8 @@ class JavaThread: public Thread {
inline void clear_suspend_flag(SuspendFlags f);
public:
inline void set_trace_flag();
inline void clear_trace_flag();
inline void set_obj_deopt_flag();
inline void clear_obj_deopt_flag();
bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
bool is_obj_deopt_suspend() { return (_suspend_flags & _obj_deopt) != 0; }
// Asynchronous exception support
@ -751,7 +747,7 @@ private:
// Support for object deoptimization and JFR suspension
void handle_special_runtime_exit_condition();
bool has_special_runtime_exit_condition() {
return (_suspend_flags & (_obj_deopt JFR_ONLY(| _trace_flag))) != 0;
return (_suspend_flags & _obj_deopt) != 0;
}
// Stack-locking support (not for LM_LIGHTWEIGHT)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -57,12 +57,6 @@ inline void JavaThread::clear_suspend_flag(SuspendFlags f) {
while (Atomic::cmpxchg(&_suspend_flags, flags, (flags & ~f)) != flags);
}
inline void JavaThread::set_trace_flag() {
set_suspend_flag(_trace_flag);
}
inline void JavaThread::clear_trace_flag() {
clear_suspend_flag(_trace_flag);
}
inline void JavaThread::set_obj_deopt_flag() {
set_suspend_flag(_obj_deopt);
}

View File

@ -117,7 +117,6 @@ Mutex* Verify_lock = nullptr;
Mutex* JfrStacktrace_lock = nullptr;
Monitor* JfrMsg_lock = nullptr;
Mutex* JfrBuffer_lock = nullptr;
Monitor* JfrThreadSampler_lock = nullptr;
#endif
Mutex* CodeHeapStateAnalytics_lock = nullptr;
@ -282,7 +281,6 @@ void mutex_init() {
MUTEX_DEFN(JfrBuffer_lock , PaddedMutex , event);
MUTEX_DEFN(JfrMsg_lock , PaddedMonitor, event);
MUTEX_DEFN(JfrStacktrace_lock , PaddedMutex , event);
MUTEX_DEFN(JfrThreadSampler_lock , PaddedMonitor, nosafepoint);
#endif
MUTEX_DEFN(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-3);

View File

@ -136,7 +136,6 @@ extern Mutex* FinalImageRecipes_lock; // Protecting the tables used b
extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table
extern Monitor* JfrMsg_lock; // protects JFR messaging
extern Mutex* JfrBuffer_lock; // protects JFR buffer operations
extern Monitor* JfrThreadSampler_lock; // used to suspend/resume JFR thread sampler
#endif
extern Mutex* Metaspace_lock; // protects Metaspace virtualspace and chunk expansions

View File

@ -623,6 +623,7 @@ class os: AllStatic {
static address fetch_frame_from_context(const void* ucVoid, intptr_t** sp, intptr_t** fp);
static frame fetch_frame_from_context(const void* ucVoid);
static frame fetch_compiled_frame_from_context(const void* ucVoid);
static intptr_t* fetch_bcp_from_context(const void* ucVoid);
// For saving an os specific context generated by an assert or guarantee.
static void save_assert_context(const void* ucVoid);

Some files were not shown because too many files have changed in this diff Show More