8371388: [BACKOUT] JDK-8365047: Remove exception handler stub code in C2

Reviewed-by: chagedorn, epeter
This commit is contained in:
Tobias Hartmann 2025-11-07 09:17:21 +00:00
parent 205a163a90
commit 48bbc950f1
41 changed files with 302 additions and 354 deletions

View File

@ -1,7 +1,6 @@
//
// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
// Copyright 2025 Arm Limited and/or its affiliates.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@ -1195,10 +1194,15 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_codestub_branch_size();
}
static uint size_deopt_handler() {
// count one branch instruction and one far call instruction sequence
// count one adr and one far branch instruction
return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
}
};
@ -2257,6 +2261,25 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// mov rscratch1 #exception_blob_entry_point
// br rscratch1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
@ -2267,18 +2290,14 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ b(start);
__ adr(lr, __ pc());
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
// REQUIRED MATCHER CODE

View File

@ -449,18 +449,12 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ b(start);
__ adr(lr, pc());
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {

View File

@ -71,7 +71,7 @@ friend class ArrayCopyStub;
// CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 4 * NativeInstruction::instruction_size
_deopt_handler_size = 7 * NativeInstruction::instruction_size
};
public:

View File

@ -394,6 +394,12 @@ void NativePostCallNop::make_deopt() {
NativeDeoptInstruction::insert(addr_at(0));
}
#ifdef ASSERT
static bool is_movk_to_zr(uint32_t insn) {
return ((insn & 0xffe0001f) == 0xf280001f);
}
#endif
bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
if (((oopmap_slot & 0xff) != oopmap_slot) || ((cb_offset & 0xffffff) != cb_offset)) {
return false; // cannot encode

View File

@ -526,24 +526,14 @@ inline NativeLdSt* NativeLdSt_at(address addr) {
// can store an offset from the initial nop to the nmethod.
class NativePostCallNop: public NativeInstruction {
private:
static bool is_movk_to_zr(uint32_t insn) {
return ((insn & 0xffe0001f) == 0xf280001f);
}
public:
bool check() const {
// Check the first instruction is NOP.
if (is_nop()) {
uint32_t insn = *(uint32_t*)addr_at(4);
// Check next instruction is MOVK zr, xx.
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// a MOVK as well.
return is_movk_to_zr(insn);
}
return false;
uint64_t insns = *(uint64_t*)addr_at(0);
// Check for two instructions: nop; movk zr, xx
// These instructions only ever appear together in a post-call
// NOP, so it's unnecessary to check that the third instruction is
// a MOVK as well.
return (insns & 0xffe0001fffffffff) == 0xf280001fd503201f;
}
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const {

View File

@ -260,6 +260,8 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in aarch64.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -105,8 +105,14 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return ( 3 * 4 );
}
static uint size_deopt_handler() {
return ( 9 * 4 );
}
@ -870,6 +876,26 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
// OK to trash LR, because exception blob will kill it
__ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything can be live.
@ -880,26 +906,19 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
Label start;
__ bind(start);
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
int entry_offset = __ offset();
address deopt_pc = __ pc();
// Preserve R0 and reserve space for the address of the entry point
__ push(RegisterSet(R0) | RegisterSet(R1));
// Store the entry point address
__ mov_relative_address(R0, deopt_pc);
__ str(R0, Address(SP, wordSize));
__ pop(R0); // restore R0
__ b(start);
__ sub(SP, SP, wordSize); // make room for saved PC
__ push(LR); // save LR that may be live when we get here
__ mov_relative_address(LR, deopt_pc);
__ str(LR, Address(SP, wordSize)); // save deopt PC
__ pop(LR); // restore LR
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
bool Matcher::match_rule_supported(int opcode) {

View File

@ -272,20 +272,14 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
Label start;
__ bind(start);
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
int entry_offset = __ offset();
__ mov_relative_address(LR, __ pc());
__ push(LR); // stub expects LR to be saved
__ b(start);
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
assert(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}

View File

@ -54,7 +54,7 @@
enum {
_call_stub_size = 16,
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
_deopt_handler_size = 20
_deopt_handler_size = 16
};
public:

View File

@ -182,6 +182,8 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------ generate_exception_blob ---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in sparc.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -264,17 +264,12 @@ int LIR_Assembler::emit_deopt_handler() {
}
int offset = code_offset();
Label start;
__ bind(start);
__ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
int entry_offset = __ offset();
__ b(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}

View File

@ -63,7 +63,7 @@ enum {
_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller
_call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
_exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
_deopt_handler_size = MacroAssembler::bl64_patchable_size + BytesPerInstWord
_deopt_handler_size = MacroAssembler::bl64_patchable_size
};
// '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub()

View File

@ -2088,11 +2088,17 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// The exception_handler is a b64_patchable.
return MacroAssembler::b64_patchable_size;
}
static uint size_deopt_handler() {
// The deopt_handler is a bl64_patchable.
return MacroAssembler::bl64_patchable_size + BytesPerInstWord;
return MacroAssembler::bl64_patchable_size;
}
};
@ -2108,6 +2114,22 @@ public:
source %{
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
relocInfo::runtime_call_type);
assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
__ end_a_stub();
return offset;
}
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
@ -2118,21 +2140,12 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
Label start;
__ bind(start);
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
relocInfo::runtime_call_type);
int entry_offset = __ offset();
__ b(start);
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
__ end_a_stub();
return entry_offset;
return offset;
}
//=============================================================================

View File

@ -46,6 +46,7 @@
//------------------------------generate_exception_blob---------------------------
// Creates exception blob at the end.
// Using exception blob, this code is jumped from a compiled method.
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -83,6 +83,7 @@ class RegisterSaver {
static OopMap* push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors = false);
static void restore_live_registers_and_pop_frame(MacroAssembler* masm,
@ -261,6 +262,7 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
int* out_frame_size_in_bytes,
bool generate_oop_map,
int return_pc_adjustment,
ReturnPCLocation return_pc_location,
bool save_vectors) {
// Push an abi_reg_args-frame and store all registers which may be live.
@ -269,6 +271,7 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// propagated to the RegisterMap of the caller frame during
// StackFrameStream construction (needed for deoptimization; see
// compiledVFrame::create_stack_value).
// If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
// Updated return pc is returned in R31 (if not return_pc_is_pre_saved).
// calculate frame size
@ -302,11 +305,14 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
// Do the save_LR by hand and adjust the return pc if requested.
switch (return_pc_location) {
case return_pc_is_lr: __ mflr(R31); break;
case return_pc_is_pre_saved: break;
case return_pc_is_pre_saved: assert(return_pc_adjustment == 0, "unsupported"); break;
case return_pc_is_thread_saved_exception_pc: __ ld(R31, thread_(saved_exception_pc)); break;
default: ShouldNotReachHere();
}
if (return_pc_location != return_pc_is_pre_saved) {
if (return_pc_adjustment != 0) {
__ addi(R31, R31, return_pc_adjustment);
}
__ std(R31, frame_size_in_bytes + _abi0(lr), R1_SP);
}
@ -2901,15 +2907,22 @@ void SharedRuntime::generate_deopt_blob() {
// deopt_handler: call_deopt_stub
// cur. return pc --> ...
//
// So currently SR_LR points behind the call in the deopt handler.
// We adjust it such that it points to the start of the deopt handler.
// The return_pc has been stored in the frame of the deoptee and
// will replace the address of the deopt_handler in the call
// to Deoptimization::fetch_unroll_info below.
// We can't grab a free register here, because all registers may
// contain live values, so let the RegisterSaver do the adjustment
// of the return pc.
const int return_pc_adjustment_no_exception = -MacroAssembler::bl64_patchable_size;
// Push the "unpack frame"
// Save everything in sight.
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ true,
return_pc_adjustment_no_exception,
RegisterSaver::return_pc_is_lr);
assert(map != nullptr, "OopMap must have been created");
@ -2944,6 +2957,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_exception=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
// Deopt during an exception. Save exec mode for unpack_frames.
@ -2961,6 +2975,7 @@ void SharedRuntime::generate_deopt_blob() {
RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&first_frame_size_in_bytes,
/*generate_oop_map=*/ false,
/*return_pc_adjustment_reexecute=*/ 0,
RegisterSaver::return_pc_is_pre_saved);
__ li(exec_mode_reg, Deoptimization::Unpack_reexecute);
#endif
@ -3251,6 +3266,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr)
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map=*/ true,
/*return_pc_adjustment=*/0,
return_pc_location, save_vectors);
// The following is basically a call_VM. However, we need the precise
@ -3351,6 +3367,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination
map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm,
&frame_size_in_bytes,
/*generate_oop_map*/ true,
/*return_pc_adjustment*/ 0,
RegisterSaver::return_pc_is_lr);
// Use noreg as last_Java_pc, the return pc will be reconstructed

View File

@ -377,18 +377,12 @@ int LIR_Assembler::emit_deopt_handler() {
int offset = code_offset();
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ j(start);
__ auipc(ra, 0);
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {

View File

@ -72,7 +72,7 @@ private:
// See emit_exception_handler for detail
_exception_handler_size = DEBUG_ONLY(256) NOT_DEBUG(32), // or smaller
// See emit_deopt_handler for detail
// far_call (2) + j (1)
// auipc (1) + far_jump (2)
_deopt_handler_size = 1 * MacroAssembler::instruction_size +
2 * MacroAssembler::instruction_size
};

View File

@ -1049,10 +1049,15 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_branch_size();
}
static uint size_deopt_handler() {
// count far call + j
// count auipc + far branch
return NativeInstruction::instruction_size + MacroAssembler::far_branch_size();
}
};
@ -1833,6 +1838,25 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// auipc t1, #exception_blob_entry_point
// jr (offset)t1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
@ -1843,17 +1867,12 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
}
int offset = __ offset();
Label start;
__ bind(start);
__ far_call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ j(start);
__ auipc(ra, 0);
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
// REQUIRED MATCHER CODE

View File

@ -249,6 +249,8 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in riscv.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -272,25 +272,14 @@ int LIR_Assembler::emit_deopt_handler() {
// Not enough space left for the handler.
bailout("deopt handler overflow");
return -1;
}
int offset = code_offset();
Label start;
__ bind(start);
} int offset = code_offset();
// Size must be constant (see HandlerImpl::emit_deopt_handler).
__ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1_scratch);
int entry_offset = __ offset();
__ z_bru(start);
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {

View File

@ -43,6 +43,8 @@
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in s390.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -1649,10 +1649,15 @@ source_hpp %{ // Header information of the source block.
class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return NativeJump::max_instruction_size();
}
static uint size_deopt_handler() {
return NativeCall::max_instruction_size() + MacroAssembler::jump_pcrelative_size();
return NativeCall::max_instruction_size();
}
};
@ -1667,6 +1672,43 @@ public:
source %{
// This exception handler code snippet is placed after the method's
// code. It is the return point if an exception occurred. it jumps to
// the exception blob.
//
// If the method gets deoptimized, the method and this code snippet
// get patched.
//
// 1) Trampoline code gets patched into the end of this exception
// handler. the trampoline code jumps to the deoptimization blob.
//
// 2) The return address in the method's code will get patched such
// that it jumps to the trampoline.
//
// 3) The handler will get patched such that it does not jump to the
// exception blob, but to an entry in the deoptimization blob being
// aware of the exception.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
Register temp_reg = Z_R1;
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
// Use unconditional pc-relative jump with 32-bit range here.
__ load_const_optimized(temp_reg, (address)OptoRuntime::exception_blob()->content_begin());
__ z_br(temp_reg);
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
@ -1678,22 +1720,14 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
int offset = __ offset();
Label start;
__ bind(start);
// Size_deopt_handler() must be exact on zarch, so for simplicity
// we do not use load_const_opt here.
__ load_const(Z_R1, SharedRuntime::deopt_blob()->unpack());
__ call(Z_R1);
int entry_offset = __ offset();
__ z_bru(start);
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
__ end_a_stub();
return entry_offset;
return offset;
}
//=============================================================================

View File

@ -2544,10 +2544,14 @@ void SharedRuntime::generate_deopt_blob() {
// Normal entry (non-exception case)
//
// We have been called from the deopt handler of the deoptee.
// Z_R14 points to the entry point of the deopt handler.
// Z_R14 points behind the call in the deopt handler. We adjust
// it such that it points to the start of the deopt handler.
// The return_pc has been stored in the frame of the deoptee and
// will replace the address of the deopt_handler in the call
// to Deoptimization::fetch_unroll_info below.
// The (int) cast is necessary, because -((unsigned int)14)
// is an unsigned int.
__ add2reg(Z_R14, -(int)NativeCall::max_instruction_size());
const Register exec_mode_reg = Z_tmp_1;

View File

@ -450,20 +450,14 @@ int LIR_Assembler::emit_deopt_handler() {
}
int offset = code_offset();
InternalAddress here(__ pc());
Label start;
__ bind(start);
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ jmp(start);
__ pushptr(here.addr(), rscratch1);
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
__ end_a_stub();
return entry_offset;
return offset;
}
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {

View File

@ -48,7 +48,7 @@
enum {
_call_stub_size = 28,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 10
_deopt_handler_size = 17
};
public:

View File

@ -242,6 +242,8 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() {
//------------------------------generate_exception_blob---------------------------
// creates exception blob at the end
// Using exception blob, this code is jumped from a compiled method.
// (see emit_exception_handler in x86_64.ad file)
//
// Given an exception pc at a call we call into the runtime for the
// handler in this method. This handler might merely restore state

View File

@ -2767,11 +2767,21 @@ class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// NativeCall instruction size is the same as NativeJump.
// exception handler starts out as jump and can be patched to
// a call be deoptimization. (4932387)
// Note that this value is also credited (in output.cpp) to
// the size of the code section.
return NativeJump::instruction_size;
}
static uint size_deopt_handler() {
// one call and one jmp.
return 10;
// three 5 byte instructions plus one move for unreachable address.
return 15+3;
}
};
@ -2863,6 +2873,24 @@ int MachNode::compute_padding(int current_offset) const {
}
}
// Emit exception handler code.
// Stuff framesize into a register and call a VM stub routine.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
@ -2875,18 +2903,21 @@ int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
}
int offset = __ offset();
Label start;
__ bind(start);
address the_pc = (address) __ pc();
Label next;
// push a "the_pc" on the stack without destroying any registers
// as they all may be live.
__ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
int entry_offset = __ offset();
__ jmp(start);
// push address of "next"
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
__ bind(next);
// adjust it so it matches "the_pc"
__ subptr(Address(rsp, 0), __ offset() - offset);
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset));
__ end_a_stub();
return entry_offset;
return offset;
}
static Assembler::Width widthForType(BasicType bt) {

View File

@ -621,7 +621,7 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
assert(nm->insts_contains_inclusive(pc), "");
address deopt = nm->deopt_handler_entry();
address deopt = nm->deopt_handler_begin();
assert(deopt != nullptr, "");
frame fr = os::fetch_frame_from_context(uc);

View File

@ -2795,7 +2795,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
if (cb != nullptr && cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod();
frame fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
address deopt = nm->deopt_handler_entry();
address deopt = nm->deopt_handler_begin();
assert(nm->insts_contains_inclusive(pc), "");
nm->set_original_pc(&fr, pc);
// Set pc to handler

View File

@ -1057,9 +1057,7 @@ void ciEnv::register_method(ciMethod* target,
}
assert(offsets->value(CodeOffsets::Deopt) != -1, "must have deopt entry");
assert(compiler->type() == compiler_c2 ||
offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry");
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must have exception entry");
nm = nmethod::new_nmethod(method,
compile_id(),

View File

@ -1302,7 +1302,7 @@ nmethod::nmethod(
}
// Native wrappers do not have deopt handlers. Make the values
// something that will never match a pc like the nmethod vtable entry
_deopt_handler_entry_offset = 0;
_deopt_handler_offset = 0;
_unwind_handler_offset = 0;
CHECKED_CAST(_oops_size, uint16_t, align_up(code_buffer->total_oop_size(), oopSize));
@ -1442,7 +1442,7 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
_skipped_instructions_size = nm._skipped_instructions_size;
_stub_offset = nm._stub_offset;
_exception_offset = nm._exception_offset;
_deopt_handler_entry_offset = nm._deopt_handler_entry_offset;
_deopt_handler_offset = nm._deopt_handler_offset;
_unwind_handler_offset = nm._unwind_handler_offset;
_num_stack_arg_slots = nm._num_stack_arg_slots;
_oops_size = nm._oops_size;
@ -1704,26 +1704,19 @@ nmethod::nmethod(
_exception_offset = -1;
}
if (offsets->value(CodeOffsets::Deopt) != -1) {
_deopt_handler_entry_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
_deopt_handler_offset = code_offset() + offsets->value(CodeOffsets::Deopt);
} else {
_deopt_handler_entry_offset = -1;
_deopt_handler_offset = -1;
}
} else
#endif
{
// Exception handler and deopt handler are in the stub section
assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set");
assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set");
bool has_exception_handler = (offsets->value(CodeOffsets::Exceptions) != -1);
assert(has_exception_handler == (compiler->type() != compiler_c2),
"C2 compiler doesn't provide exception handler stub code.");
if (has_exception_handler) {
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
} else {
_exception_offset = -1;
}
_deopt_handler_entry_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
_exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions);
_deopt_handler_offset = _stub_offset + offsets->value(CodeOffsets::Deopt);
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
// C1 generates UnwindHandler at the end of instructions section.
@ -4031,7 +4024,7 @@ const char* nmethod::nmethod_section_label(address pos) const {
// Check stub_code before checking exception_handler or deopt_handler.
if (pos == this->stub_begin()) label = "[Stub Code]";
if (JVMCI_ONLY(_exception_offset >= 0 &&) pos == exception_begin()) label = "[Exception Handler]";
if (JVMCI_ONLY(_deopt_handler_entry_offset != -1 &&) pos == deopt_handler_entry()) label = "[Deopt Handler Entry Point]";
if (JVMCI_ONLY(_deopt_handler_offset != -1 &&) pos == deopt_handler_begin()) label = "[Deopt Handler Code]";
return label;
}

View File

@ -229,7 +229,7 @@ class nmethod : public CodeBlob {
int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
int _deopt_handler_entry_offset;
int _deopt_handler_offset;
// Offset (from insts_end) of the unwind handler if it exists
int16_t _unwind_handler_offset;
// Number of arguments passed on the stack
@ -617,7 +617,7 @@ public:
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return code_end() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_entry () const { return header_begin() + _deopt_handler_entry_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
oop* oops_begin () const { return (oop*) data_begin(); }
oop* oops_end () const { return (oop*) data_end(); }

View File

@ -34,7 +34,7 @@
inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc); }
inline bool nmethod::is_deopt_entry(address pc) {
return pc == deopt_handler_entry();
return pc == deopt_handler_begin();
}
// class ExceptionCache methods

View File

@ -1347,18 +1347,20 @@ CodeBuffer* PhaseOutput::init_buffer() {
// nmethod and CodeBuffer count stubs & constants as part of method's code.
// class HandlerImpl is platform-specific and defined in the *.ad files.
int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
stub_req += MAX_stubs_size; // ensure per-stub margin
code_req += MAX_inst_size; // ensure per-instruction margin
if (StressCodeBuffers)
code_req = const_req = stub_req = deopt_handler_req = 0x10; // force expansion
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
int total_req =
const_req +
code_req +
pad_req +
stub_req +
exception_handler_req +
deopt_handler_req; // deopt handler
CodeBuffer* cb = code_buffer();
@ -1787,6 +1789,8 @@ void PhaseOutput::fill_buffer(C2_MacroAssembler* masm, uint* blk_starts) {
// Only java methods have exception handlers and deopt handlers
// class HandlerImpl is platform-specific and defined in the *.ad files.
if (C->method()) {
// Emit the exception handler code.
_code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(masm));
if (C->failing()) {
return; // CodeBuffer::expand failed
}

View File

@ -498,9 +498,6 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
RegisterMap::WalkContinuation::skip);
// Now get the deoptee with a valid map
frame deoptee = stub_frame.sender(&map);
if (exec_mode == Unpack_deopt) {
assert(deoptee.is_deoptimized_frame(), "frame is not marked for deoptimization");
}
// Set the deoptee nmethod
assert(current->deopt_compiled_method() == nullptr, "Pending deopt!");
nmethod* nm = deoptee.cb()->as_nmethod_or_null();

View File

@ -206,7 +206,7 @@ address frame::raw_pc() const {
if (is_deoptimized_frame()) {
nmethod* nm = cb()->as_nmethod_or_null();
assert(nm != nullptr, "only nmethod is expected here");
return nm->deopt_handler_entry() - pc_return_offset;
return nm->deopt_handler_begin() - pc_return_offset;
} else {
return (pc() - pc_return_offset);
}
@ -355,7 +355,7 @@ void frame::deoptimize(JavaThread* thread) {
// If the call site is a MethodHandle call site use the MH deopt handler.
nmethod* nm = _cb->as_nmethod();
address deopt = nm->deopt_handler_entry();
address deopt = nm->deopt_handler_begin();
NativePostCallNop* inst = nativePostCallNop_at(pc());

View File

@ -87,9 +87,6 @@
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
#ifdef COMPILER2
#include "opto/runtime.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/jfr.inline.hpp"
#endif
@ -604,11 +601,6 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr
// The deferred StackWatermarkSet::after_unwind check will be performed in
// * OptoRuntime::handle_exception_C_helper for C2 code
// * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
#ifdef COMPILER2
if (nm->compiler_type() == compiler_c2) {
return OptoRuntime::exception_blob()->entry_point();
}
#endif // COMPILER2
return nm->exception_begin();
}
}

View File

@ -535,7 +535,7 @@
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _state, volatile signed char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _deopt_handler_entry_offset, int) \
nonstatic_field(nmethod, _deopt_handler_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
nonstatic_field(nmethod, _immutable_data_ref_count_offset, int) \

View File

@ -48,7 +48,7 @@ public class NMethod extends CodeBlob {
/** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField;
private static CIntegerField deoptHandlerEntryOffsetField;
private static CIntegerField deoptHandlerOffsetField;
private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField;
private static CIntField handlerTableOffsetField;
@ -86,7 +86,7 @@ public class NMethod extends CodeBlob {
immutableDataField = type.getAddressField("_immutable_data");
immutableDataSizeField = type.getCIntegerField("_immutable_data_size");
exceptionOffsetField = type.getCIntegerField("_exception_offset");
deoptHandlerEntryOffsetField = type.getCIntegerField("_deopt_handler_entry_offset");
deoptHandlerOffsetField = type.getCIntegerField("_deopt_handler_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
@ -121,16 +121,16 @@ public class NMethod extends CodeBlob {
public boolean isOSRMethod() { return getEntryBCI() != VM.getVM().getInvocationEntryBCI(); }
/** Boundaries for different parts */
public Address constantsBegin() { return contentBegin(); }
public Address constantsEnd() { return codeBegin(); }
public Address instsBegin() { return codeBegin(); }
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptHandlerEntry() { return headerBegin().addOffsetTo(getDeoptHandlerEntryOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return dataBegin(); }
public Address oopsBegin() { return dataBegin(); }
public Address oopsEnd() { return dataEnd(); }
public Address constantsBegin() { return contentBegin(); }
public Address constantsEnd() { return codeBegin(); }
public Address instsBegin() { return codeBegin(); }
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptHandlerOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return dataBegin(); }
public Address oopsBegin() { return dataBegin(); }
public Address oopsEnd() { return dataEnd(); }
public Address immutableDataBegin() { return immutableDataField.getValue(addr); }
public Address immutableDataEnd() { return immutableDataBegin().addOffsetTo(getImmutableDataSize()); }
@ -262,7 +262,7 @@ public class NMethod extends CodeBlob {
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
public boolean isDeoptPc (Address pc) { return isDeoptEntry(pc); }
public boolean isDeoptEntry (Address pc) { return pc == deoptHandlerEntry(); }
public boolean isDeoptEntry (Address pc) { return pc == deoptHandlerBegin(); }
/** Tells whether frames described by this nmethod can be
deoptimized. Note: native wrappers cannot be deoptimized. */
@ -490,7 +490,7 @@ public class NMethod extends CodeBlob {
private int getEntryBCI() { return (int) entryBCIField .getValue(addr); }
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptHandlerEntryOffset() { return (int) deoptHandlerEntryOffsetField .getValue(addr); }
private int getDeoptHandlerOffset() { return (int) deoptHandlerOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }

View File

@ -87,7 +87,7 @@ public abstract class Frame implements Cloneable {
CodeBlob cb = VM.getVM().getCodeCache().findBlob(pc);
if (cb != null && cb.isJavaMethod()) {
NMethod nm = (NMethod) cb;
if (pc.equals(nm.deoptHandlerEntry())) {
if (pc.equals(nm.deoptHandlerBegin())) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(this.getUnextendedSP() != null, "null SP in Java frame");
}

View File

@ -1,159 +0,0 @@
/*
* Copyright 2025 Arm Limited and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test id=vthread-deopt-c1
* @summary Deoptimization test for virtual threads (C1)
* @requires vm.continuations
* @requires vm.compiler1.enabled
* @requires vm.opt.TieredStopAtLevel != 0
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:-BackgroundCompilation
* -XX:TieredStopAtLevel=1
* Deoptimization
*/
/**
* @test id=vthread-deopt-c2
* @summary Deoptimization test for virtual threads (C2)
* @requires vm.continuations
* @requires vm.compiler2.enabled
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:.
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
* -XX:-BackgroundCompilation
* -XX:-TieredCompilation
* Deoptimization
*/
import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.Objects;
import jdk.test.whitebox.WhiteBox;
public class Deoptimization {
static final WhiteBox white_box = WhiteBox.getWhiteBox();
static class TestTask implements Runnable {
CyclicBarrier start_barrier = null;
AtomicInteger completed_number = new AtomicInteger(0);
public void reset(int barrier_parties) {
start_barrier = new CyclicBarrier(barrier_parties);
completed_number.set(0);
}
public int getNumberWaiting() {
return start_barrier.getNumberWaiting();
}
public int getNumberCompleted() {
return completed_number.get();
}
public void await() throws BrokenBarrierException, InterruptedException {
start_barrier.await();
}
public void run() {
try {
await();
} catch(BrokenBarrierException e) {
return;
} catch(InterruptedException e) {
return;
}
completed_number.getAndIncrement();
}
}
static void test(TestTask task, Method method, int vthreads_num) throws Exception {
task.reset(vthreads_num + 1 /* 1 for the main thread */);
Thread[] vthreads = new Thread[vthreads_num];
for (int i = 0; i < vthreads_num; i++) {
vthreads[i] = Thread.startVirtualThread(task);
}
while (task.getNumberWaiting() != vthreads_num) {
Thread.onSpinWait();
}
if (method != null) {
if (!white_box.isMethodCompiled(method, false)) {
throw new Error("Unexpectedly, it is not compiled.");
}
white_box.deoptimizeMethod(method);
if (white_box.isMethodCompiled(method, false)) {
throw new Error("Unexpectedly, it is compiled.");
}
}
task.await();
for (int i = 0; i < vthreads_num; i++) {
vthreads[i].join();
}
if (task.getNumberCompleted() != vthreads_num) {
throw new Error("Some threads didn't reach completion");
}
}
static int getIntegerOption(String option_name) {
Object option_object = white_box.getVMFlag(option_name);
String option_string = Objects.toString(option_object);
return Integer.parseInt(option_string);
}
public static void main(String[] args) throws Exception {
int tiered_stop_at_level = getIntegerOption("TieredStopAtLevel");
Method method_run = TestTask.class.getMethod("run");
white_box.testSetDontInlineMethod(method_run, true);
Method method_await = TestTask.class.getMethod("await");
white_box.testSetDontInlineMethod(method_await, true);
TestTask task = new TestTask();
// Warm-up
test(task, null, 2);
white_box.enqueueMethodForCompilation(method_run, tiered_stop_at_level);
// Deoptimization test
test(task, method_run, 10000);
}
}