8337987: Relocate jfr and throw_exception stubs from StubGenerator to SharedRuntime

Reviewed-by: fyang, kvn, yzheng
This commit is contained in:
Andrew Dinn 2024-08-19 09:00:19 +00:00
parent 15b20cb1fd
commit f0374a0bc1
43 changed files with 1392 additions and 1545 deletions

View File

@ -744,7 +744,7 @@ void MacroAssembler::reserved_stack_check() {
// We have already removed our own frame.
// throw_delayed_StackOverflowError will think that it's been
// called by our caller.
lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
br(rscratch1);
should_not_reach_here();

View File

@ -120,7 +120,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ ldr(rscratch1,Address(method, entry_offset));
__ br(rscratch1);
__ bind(L_no_such_method);
__ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
__ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry()));
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@ -451,7 +451,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
jump_from_method_handle(_masm, rmethod, temp1, for_compiler_entry);
if (iid == vmIntrinsics::_linkToInterface) {
__ bind(L_incompatible_class_change_error);
__ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ far_jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry()));
}
}
}

View File

@ -66,6 +66,12 @@
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
// FIXME -- this is used by C1
@ -2764,3 +2770,193 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
// frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
// n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
enum layout {
rfp_off = 0,
rfp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 512;
int locs_size = 64;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // Save FP and LR before call
assert(is_even(framesize/2), "sp not 16-byte aligned");
// lr and fp are already in place
__ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
__ mov(c_rarg0, rthread);
BLOCK_COMMENT("call runtime_entry");
__ mov(rscratch1, runtime_entry);
__ blr(rscratch1);
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
// Reinitialize the ptrue predicate register, in case the external runtime
// call clobbers ptrue reg, as we may return to SVE compiled code.
__ reinitialize_ptrue();
__ leave();
// check for pending exceptions
#ifdef ASSERT
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbnz(rscratch1, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
__ mov(c_rarg0, thread);
}
// The handle is dereferenced through a load barrier.
static void jfr_epilogue(MacroAssembler* masm) {
__ reset_last_Java_frame(true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm, rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(masm);
__ resolve_global_jobject(r0, rscratch1, rscratch2);
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1); // rfp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm, rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(masm);
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1); // rfp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -7045,7 +7045,7 @@ class StubGenerator: public StubCodeGenerator {
Label thaw_success;
// rscratch2 contains the size of the frames to thaw, 0 if overflow or no more frames
__ cbnz(rscratch2, thaw_success);
__ lea(rscratch1, RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
__ lea(rscratch1, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
__ br(rscratch1);
__ bind(thaw_success);
@ -7305,98 +7305,6 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* _masm, Register thread) {
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
__ mov(c_rarg0, thread);
}
// The handle is dereferenced through a load barrier.
static void jfr_epilogue(MacroAssembler* _masm) {
__ reset_last_Java_frame(true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
static RuntimeStub* generate_jfr_write_checkpoint() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm, rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(_masm);
__ resolve_global_jobject(r0, rscratch1, rscratch2);
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1); // rfp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
static RuntimeStub* generate_jfr_return_lease() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm, rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(_masm);
__ leave();
__ ret(lr);
OopMap* map = new OopMap(framesize, 1); // rfp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR
// exception handler for upcall stubs
address generate_upcall_stub_exception_handler() {
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
@ -7412,115 +7320,9 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
#undef __
#define __ masm->
address generate_throw_exception(const char* name,
address runtime_entry,
Register arg1 = noreg,
Register arg2 = noreg) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
// n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
enum layout {
rfp_off = 0,
rfp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 512;
int locs_size = 64;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // Save FP and LR before call
assert(is_even(framesize/2), "sp not 16-byte aligned");
// lr and fp are already in place
__ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
// Call runtime
if (arg1 != noreg) {
assert(arg2 != c_rarg1, "clobbered");
__ mov(c_rarg1, arg1);
}
if (arg2 != noreg) {
__ mov(c_rarg2, arg2);
}
__ mov(c_rarg0, rthread);
BLOCK_COMMENT("call runtime_entry");
__ mov(rscratch1, runtime_entry);
__ blr(rscratch1);
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
// Reinitialize the ptrue predicate register, in case the external runtime
// call clobbers ptrue reg, as we may return to SVE compiled code.
__ reinitialize_ptrue();
__ leave();
// check for pending exceptions
#ifdef ASSERT
Label L;
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
__ cbnz(rscratch1, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub->entry_point();
}
class MontgomeryMultiplyGenerator : public MacroAssembler {
Register Pa_base, Pb_base, Pn_base, Pm_base, inv, Rlen, Ra, Rb, Rm, Rn,
@ -8363,16 +8165,6 @@ class StubGenerator: public StubCodeGenerator {
// is referenced by megamorphic call
StubRoutines::_catch_exception_entry = generate_catch_exception();
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::throw_delayed_StackOverflowError));
// Initialize table for copy memory (arraycopy) check.
if (UnsafeMemoryAccess::_table == nullptr) {
UnsafeMemoryAccess::create_table(8 + 4); // 8 for copyMemory; 4 for setMemory
@ -8408,41 +8200,13 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// support for verify_oop (must happen after universe_init)
if (VerifyOops) {
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
}
StubRoutines::_throw_AbstractMethodError_entry =
generate_throw_exception("AbstractMethodError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry =
generate_throw_exception("IncompatibleClassChangeError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry =
generate_throw_exception("NullPointerException at call throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_NullPointerException_at_call));
// arraycopy stubs used by compilers
generate_arraycopy_stubs();

View File

@ -752,8 +752,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check);

View File

@ -39,6 +39,7 @@
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/preserveException.hpp"
@ -140,7 +141,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, bool for_comp
__ bind(L_no_such_method);
// throw exception
__ jump(StubRoutines::throw_AbstractMethodError_entry(), relocInfo::runtime_call_type, Rtemp);
__ jump(SharedRuntime::throw_AbstractMethodError_entry(), relocInfo::runtime_call_type, Rtemp);
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@ -461,7 +462,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (iid == vmIntrinsics::_linkToInterface) {
__ bind(L_incompatible_class_change_error);
__ jump(StubRoutines::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp);
__ jump(SharedRuntime::throw_IncompatibleClassChangeError_entry(), relocInfo::runtime_call_type, Rtemp);
}
}
}

View File

@ -1728,3 +1728,143 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
// the current activation. Fabricates an exception oop and initiates normal
// exception dispatching in this frame.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
int insts_size = 128;
int locs_size = 32;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps;
int frame_size;
int frame_complete;
oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
frame_size = 2;
__ mov(Rexception_pc, LR);
__ raw_push(FP, LR);
frame_complete = __ pc() - start;
// Any extra arguments are already supposed to be R1 and R2
__ mov(R0, Rthread);
int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
__ call(runtime_entry);
if (pc_offset == -1) {
pc_offset = __ offset();
}
// Generate oop map
OopMap* map = new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
oop_maps->add_gc_map(pc_offset, map);
__ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
__ raw_pop(FP, LR);
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
frame_size, oop_maps, false);
return stub;
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
enum layout {
r1_off,
r2_off,
return_off,
framesize // inclusive of return address
};
CodeBuffer code("jfr_write_checkpoint", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ raw_push(R1, R2, LR);
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(SP, FP, true, Rtemp);
__ mov(c_rarg0, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), c_rarg0);
__ reset_last_Java_frame(Rtemp);
// R0 is jobject handle result, unpack and process it through a barrier.
__ resolve_global_jobject(R0, Rtemp, R1);
__ raw_pop(R1, R2, LR);
__ ret();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
enum layout {
r1_off,
r2_off,
return_off,
framesize // inclusive of return address
};
CodeBuffer code("jfr_return_lease", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ raw_push(R1, R2, LR);
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(SP, FP, true, Rtemp);
__ mov(c_rarg0, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), c_rarg0);
__ reset_last_Java_frame(Rtemp);
__ raw_pop(R1, R2, LR);
__ ret();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -2961,52 +2961,6 @@ class StubGenerator: public StubCodeGenerator {
#undef __
#define __ masm->
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
// the current activation. Fabricates an exception oop and initiates normal
// exception dispatching in this frame.
address generate_throw_exception(const char* name, address runtime_entry) {
int insts_size = 128;
int locs_size = 32;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps;
int frame_size;
int frame_complete;
oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
frame_size = 2;
__ mov(Rexception_pc, LR);
__ raw_push(FP, LR);
frame_complete = __ pc() - start;
// Any extra arguments are already supposed to be R1 and R2
__ mov(R0, Rthread);
int pc_offset = __ set_last_Java_frame(SP, FP, false, Rtemp);
assert(((__ pc()) - start) == __ offset(), "warning: start differs from code_begin");
__ call(runtime_entry);
if (pc_offset == -1) {
pc_offset = __ offset();
}
// Generate oop map
OopMap* map = new OopMap(frame_size*VMRegImpl::slots_per_word, 0);
oop_maps->add_gc_map(pc_offset, map);
__ reset_last_Java_frame(Rtemp); // Rtemp free since scratched by far call
__ raw_pop(FP, LR);
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete,
frame_size, oop_maps, false);
return stub->entry_point();
}
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
@ -3025,95 +2979,6 @@ class StubGenerator: public StubCodeGenerator {
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
static RuntimeStub* generate_jfr_write_checkpoint() {
enum layout {
r1_off,
r2_off,
return_off,
framesize // inclusive of return address
};
CodeBuffer code("jfr_write_checkpoint", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ raw_push(R1, R2, LR);
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(SP, FP, true, Rtemp);
__ mov(c_rarg0, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), c_rarg0);
__ reset_last_Java_frame(Rtemp);
// R0 is jobject handle result, unpack and process it through a barrier.
__ resolve_global_jobject(R0, Rtemp, R1);
__ raw_pop(R1, R2, LR);
__ ret();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
// For c2: call to return a leased buffer.
static RuntimeStub* generate_jfr_return_lease() {
enum layout {
r1_off,
r2_off,
return_off,
framesize // inclusive of return address
};
CodeBuffer code("jfr_return_lease", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ raw_push(R1, R2, LR);
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(SP, FP, true, Rtemp);
__ mov(c_rarg0, Rthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), c_rarg0);
__ reset_last_Java_frame(Rtemp);
__ raw_pop(R1, R2, LR);
__ ret();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
#endif // INCLUDE_JFR
//---------------------------------------------------------------------------
// Initialization
@ -3132,8 +2997,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_catch_exception_entry = generate_catch_exception();
// stub for throwing stack overflow error used both by interpreter and compiler
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
if (UnsafeMemoryAccess::_table == nullptr) {
UnsafeMemoryAccess::create_table(32 + 4); // 32 for copyMemory; 4 for setMemory
}
@ -3155,28 +3018,11 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// Generates all stubs and initializes the entry points
// These entry points require SharedInfo::stack0 to be set up in non-core builds
// and need to be relocatable, so they each fabricate a RuntimeStub internally.
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
//------------------------------------------------------------------------------------------------------------------------
// entry points that are platform specific

View File

@ -560,7 +560,7 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
__ cmp(Rtemp, R0);
__ mov(SP, Rsender_sp, ls); // restore SP
__ b(StubRoutines::throw_StackOverflowError_entry(), ls);
__ b(SharedRuntime::throw_StackOverflowError_entry(), ls);
}

View File

@ -1501,7 +1501,7 @@ void MacroAssembler::reserved_stack_check(Register return_pc) {
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
pop_frame();
mtlr(return_pc);
load_const_optimized(R0, StubRoutines::throw_delayed_StackOverflowError_entry());
load_const_optimized(R0, SharedRuntime::throw_delayed_StackOverflowError_entry());
mtctr(R0);
bctr();

View File

@ -158,8 +158,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ bctr();
__ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
assert(SharedRuntime::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, SharedRuntime::throw_AbstractMethodError_entry());
__ mtctr(target);
__ bctr();
}
@ -489,7 +489,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (iid == vmIntrinsics::_linkToInterface) {
__ BIND(L_incompatible_class_change_error);
__ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ load_const_optimized(temp1, SharedRuntime::throw_IncompatibleClassChangeError_entry());
__ mtctr(temp1);
__ bctr();
}

View File

@ -44,6 +44,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
@ -3404,6 +3405,100 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
oop_maps, true);
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Only callee-saved registers are preserved (through the
// normal register window / RegisterMap handling). If the compiler
// needs all registers to be preserved between the fault point and
// the exception handler then it must assume responsibility for that
// in AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
//
// Note that we generate only this stub into a RuntimeStub, because
// it needs to be properly traversed and ignored during GC, so we
// change the meaning of the "__" macro within this method.
//
// Note: the routine set_pc_not_at_call_for_caller in
// SharedRuntime.cpp requires that this code be generated into a
// RuntimeStub.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
MacroAssembler* masm = new MacroAssembler(&code);
OopMapSet* oop_maps = new OopMapSet();
int frame_size_in_bytes = frame::native_abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
address start = __ pc();
__ save_LR(R11_scratch1);
// Push a frame.
__ push_frame_reg_args(0, R11_scratch1);
address frame_complete_pc = __ pc();
// Note that we always have a runtime stub frame on the top of
// stack by this point. Remember the offset of the instruction
// whose address will be moved to R11_scratch1.
address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
__ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
__ mr(R3_ARG1, R16_thread);
#if defined(ABI_ELFv2)
__ call_c(runtime_entry, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
#endif
// Set an oopmap for the call site.
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
__ reset_last_Java_frame();
#ifdef ASSERT
// Make sure that this code is only executed if there is a pending
// exception.
{
Label L;
__ ld(R0,
in_bytes(Thread::pending_exception_offset()),
R16_thread);
__ cmpdi(CCR0, R0, 0);
__ bne(CCR0, L);
__ stop("SharedRuntime::throw_exception: no pending exception");
__ bind(L);
}
#endif
// Pop frame.
__ pop_frame();
__ restore_LR(R11_scratch1);
__ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
__ mtctr(R11_scratch1);
__ bctr();
// Create runtime stub with OopMap.
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name, &code,
/*frame_complete=*/ (int)(frame_complete_pc - start),
frame_size_in_bytes/wordSize,
oop_maps,
false);
return stub;
}
//------------------------------Montgomery multiplication------------------------
//
@ -3647,3 +3742,81 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
reverse_words(m, (unsigned long *)m_ints, longwords);
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
CodeBuffer code("jfr_write_checkpoint", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
Register tmp1 = R10_ARG8;
Register tmp2 = R9_ARG7;
int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
address start = __ pc();
__ mflr(tmp1);
__ std(tmp1, _abi0(lr), R1_SP); // save return pc
__ push_frame_reg_args(0, tmp1);
int frame_complete = __ pc() - start;
__ set_last_Java_frame(R1_SP, noreg);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread);
address calls_return_pc = __ last_calls_return_pc();
__ reset_last_Java_frame();
// The handle is dereferenced through a load barrier.
__ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
__ pop_frame();
__ ld(tmp1, _abi0(lr), R1_SP);
__ mtlr(tmp1);
__ blr();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(calls_return_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub(code.name(),
&code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
CodeBuffer code("jfr_return_lease", 512, 64);
MacroAssembler* masm = new MacroAssembler(&code);
Register tmp1 = R10_ARG8;
Register tmp2 = R9_ARG7;
int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
address start = __ pc();
__ mflr(tmp1);
__ std(tmp1, _abi0(lr), R1_SP); // save return pc
__ push_frame_reg_args(0, tmp1);
int frame_complete = __ pc() - start;
__ set_last_Java_frame(R1_SP, noreg);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread);
address calls_return_pc = __ last_calls_return_pc();
__ reset_last_Java_frame();
__ pop_frame();
__ ld(tmp1, _abi0(lr), R1_SP);
__ mtlr(tmp1);
__ blr();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(calls_return_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub(code.name(),
&code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -517,109 +517,6 @@ class StubGenerator: public StubCodeGenerator {
}
#undef __
#define __ masm->
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Only callee-saved registers are preserved (through the
// normal register window / RegisterMap handling). If the compiler
// needs all registers to be preserved between the fault point and
// the exception handler then it must assume responsibility for that
// in AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
//
// Note that we generate only this stub into a RuntimeStub, because
// it needs to be properly traversed and ignored during GC, so we
// change the meaning of the "__" macro within this method.
//
// Note: the routine set_pc_not_at_call_for_caller in
// SharedRuntime.cpp requires that this code be generated into a
// RuntimeStub.
address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
Register arg1 = noreg, Register arg2 = noreg) {
CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
MacroAssembler* masm = new MacroAssembler(&code);
OopMapSet* oop_maps = new OopMapSet();
int frame_size_in_bytes = frame::native_abi_reg_args_size;
OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
address start = __ pc();
__ save_LR(R11_scratch1);
// Push a frame.
__ push_frame_reg_args(0, R11_scratch1);
address frame_complete_pc = __ pc();
if (restore_saved_exception_pc) {
__ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc");
}
// Note that we always have a runtime stub frame on the top of
// stack by this point. Remember the offset of the instruction
// whose address will be moved to R11_scratch1.
address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
__ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
__ mr(R3_ARG1, R16_thread);
if (arg1 != noreg) {
__ mr(R4_ARG2, arg1);
}
if (arg2 != noreg) {
__ mr(R5_ARG3, arg2);
}
#if defined(ABI_ELFv2)
__ call_c(runtime_entry, relocInfo::none);
#else
__ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
#endif
// Set an oopmap for the call site.
oop_maps->add_gc_map((int)(gc_map_pc - start), map);
__ reset_last_Java_frame();
#ifdef ASSERT
// Make sure that this code is only executed if there is a pending
// exception.
{
Label L;
__ ld(R0,
in_bytes(Thread::pending_exception_offset()),
R16_thread);
__ cmpdi(CCR0, R0, 0);
__ bne(CCR0, L);
__ stop("StubRoutines::throw_exception: no pending exception");
__ bind(L);
}
#endif
// Pop frame.
__ pop_frame();
__ restore_LR(R11_scratch1);
__ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
__ mtctr(R11_scratch1);
__ bctr();
// Create runtime stub with OopMap.
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name, &code,
/*frame_complete=*/ (int)(frame_complete_pc - start),
frame_size_in_bytes/wordSize,
oop_maps,
false);
return stub->entry_point();
}
#undef __
#define __ _masm->
@ -4616,7 +4513,7 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
Label thaw_success;
__ cmpdi(CCR0, R3_RET, 0);
__ bne(CCR0, thaw_success);
__ load_const_optimized(tmp1, (StubRoutines::throw_StackOverflowError_entry()), R0);
__ load_const_optimized(tmp1, (SharedRuntime::throw_StackOverflowError_entry()), R0);
__ mtctr(tmp1); __ bctr();
__ bind(thaw_success);
@ -4675,84 +4572,6 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* generate_jfr_write_checkpoint() {
CodeBuffer code("jfr_write_checkpoint", 512, 64);
MacroAssembler* _masm = new MacroAssembler(&code);
Register tmp1 = R10_ARG8;
Register tmp2 = R9_ARG7;
int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
address start = __ pc();
__ mflr(tmp1);
__ std(tmp1, _abi0(lr), R1_SP); // save return pc
__ push_frame_reg_args(0, tmp1);
int frame_complete = __ pc() - start;
__ set_last_Java_frame(R1_SP, noreg);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), R16_thread);
address calls_return_pc = __ last_calls_return_pc();
__ reset_last_Java_frame();
// The handle is dereferenced through a load barrier.
__ resolve_global_jobject(R3_RET, tmp1, tmp2, MacroAssembler::PRESERVATION_NONE);
__ pop_frame();
__ ld(tmp1, _abi0(lr), R1_SP);
__ mtlr(tmp1);
__ blr();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(calls_return_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub(code.name(),
&code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* generate_jfr_return_lease() {
CodeBuffer code("jfr_return_lease", 512, 64);
MacroAssembler* _masm = new MacroAssembler(&code);
Register tmp1 = R10_ARG8;
Register tmp2 = R9_ARG7;
int framesize = frame::native_abi_reg_args_size / VMRegImpl::stack_slot_size;
address start = __ pc();
__ mflr(tmp1);
__ std(tmp1, _abi0(lr), R1_SP); // save return pc
__ push_frame_reg_args(0, tmp1);
int frame_complete = __ pc() - start;
__ set_last_Java_frame(R1_SP, noreg);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), R16_thread);
address calls_return_pc = __ last_calls_return_pc();
__ reset_last_Java_frame();
__ pop_frame();
__ ld(tmp1, _abi0(lr), R1_SP);
__ mtlr(tmp1);
__ blr();
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(calls_return_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub(code.name(),
&code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR
// exception handler for upcall stubs
address generate_upcall_stub_exception_handler() {
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
@ -4786,14 +4605,6 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
UnsafeMemoryAccess::create_table(8 + 4); // 8 for copyMemory; 4 for setMemory
}
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
// CRC32 Intrinsics.
if (UseCRC32Intrinsics) {
StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
@ -4812,29 +4623,11 @@ address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// Generates all stubs and initializes the entry points
// These entry points require SharedInfo::stack0 to be set up in
// non-core builds
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
// Handle IncompatibleClassChangeError in itable stubs.
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();

View File

@ -783,8 +783,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
__ bgt(CCR0/*is_stack_overflow*/, done);
// The stack overflows. Load target address of the runtime stub and call it.
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "generated in wrong order");
__ load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order");
__ load_const_optimized(Rscratch1, (SharedRuntime::throw_StackOverflowError_entry()), R0);
__ mtctr(Rscratch1);
// Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
#ifdef ASSERT

View File

@ -4144,7 +4144,7 @@ void MacroAssembler::reserved_stack_check() {
// We have already removed our own frame.
// throw_delayed_StackOverflowError will think that it's been
// called by our caller.
la(t0, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
la(t0, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
jr(t0);
should_not_reach_here();

View File

@ -120,7 +120,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ ld(t0,Address(method, entry_offset));
__ jr(t0);
__ bind(L_no_such_method);
__ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
__ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry()));
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@ -441,7 +441,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
jump_from_method_handle(_masm, xmethod, temp1, for_compiler_entry);
if (iid == vmIntrinsics::_linkToInterface) {
__ bind(L_incompatible_class_change_error);
__ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ far_jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry()));
}
}

View File

@ -48,6 +48,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "utilities/formatBuffer.hpp"
@ -65,6 +66,12 @@
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
class RegisterSaver {
@ -2628,3 +2635,191 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
// return the blob
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
// n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
assert_cond(runtime_entry != nullptr);
enum layout {
fp_off = 0,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
const int insts_size = 1024;
const int locs_size = 64;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
assert_cond(oop_maps != nullptr && masm != nullptr);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // Save FP and RA before call
assert(is_even(framesize / 2), "sp not 16-byte aligned");
// ra and fp are already in place
__ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(sp, fp, the_pc, t0);
// Call runtime
__ mv(c_rarg0, xthread);
BLOCK_COMMENT("call runtime_entry");
__ rt_call(runtime_entry);
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
assert_cond(map != nullptr);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
__ leave();
// check for pending exceptions
#ifdef ASSERT
Label L;
__ ld(t0, Address(xthread, Thread::pending_exception_offset()));
__ bnez(t0, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
assert(stub != nullptr, "create runtime stub fail!");
return stub;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
__ set_last_Java_frame(sp, fp, the_pc, t0);
__ mv(c_rarg0, thread);
}
static void jfr_epilogue(MacroAssembler* masm) {
__ reset_last_Java_frame(true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
enum layout {
fp_off,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm, xthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(masm);
__ resolve_global_jobject(x10, t0, t1);
__ leave();
__ ret();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
enum layout {
fp_off,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm, xthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(masm);
__ leave();
__ ret();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -3774,7 +3774,7 @@ class StubGenerator: public StubCodeGenerator {
Label thaw_success;
// t1 contains the size of the frames to thaw, 0 if overflow or no more frames
__ bnez(t1, thaw_success);
__ la(t0, RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
__ la(t0, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
__ jr(t0);
__ bind(thaw_success);
@ -5834,97 +5834,6 @@ static const int64_t right_3_bits = right_n_bits(3);
return start;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* _masm, Register thread) {
__ set_last_Java_frame(sp, fp, the_pc, t0);
__ mv(c_rarg0, thread);
}
static void jfr_epilogue(MacroAssembler* _masm) {
__ reset_last_Java_frame(true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
static RuntimeStub* generate_jfr_write_checkpoint() {
enum layout {
fp_off,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm, xthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(_masm);
__ resolve_global_jobject(x10, t0, t1);
__ leave();
__ ret();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
static RuntimeStub* generate_jfr_return_lease() {
enum layout {
fp_off,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm, xthread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(_masm);
__ leave();
__ ret();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR
// exception handler for upcall stubs
address generate_upcall_stub_exception_handler() {
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
@ -5939,114 +5848,6 @@ static const int64_t right_3_bits = right_n_bits(3);
return start;
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
#undef __
#define __ masm->
address generate_throw_exception(const char* name,
address runtime_entry,
Register arg1 = noreg,
Register arg2 = noreg) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
// n.b. riscv asserts that frame::arg_reg_save_area_bytes == 0
assert_cond(runtime_entry != nullptr);
enum layout {
fp_off = 0,
fp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
const int insts_size = 1024;
const int locs_size = 64;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
assert_cond(oop_maps != nullptr && masm != nullptr);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // Save FP and RA before call
assert(is_even(framesize / 2), "sp not 16-byte aligned");
// ra and fp are already in place
__ addi(sp, fp, 0 - ((unsigned)framesize << LogBytesPerInt)); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(sp, fp, the_pc, t0);
// Call runtime
if (arg1 != noreg) {
assert(arg2 != c_rarg1, "clobbered");
__ mv(c_rarg1, arg1);
}
if (arg2 != noreg) {
__ mv(c_rarg2, arg2);
}
__ mv(c_rarg0, xthread);
BLOCK_COMMENT("call runtime_entry");
__ rt_call(runtime_entry);
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
assert_cond(map != nullptr);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
__ leave();
// check for pending exceptions
#ifdef ASSERT
Label L;
__ ld(t0, Address(xthread, Thread::pending_exception_offset()));
__ bnez(t0, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
assert(stub != nullptr, "create runtime stub fail!");
return stub->entry_point();
}
#undef __
// Initialization
@ -6071,16 +5872,6 @@ static const int64_t right_3_bits = right_n_bits(3);
// is referenced by megamorphic call
StubRoutines::_catch_exception_entry = generate_catch_exception();
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::riscv::_crc_table;
@ -6093,42 +5884,14 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// support for verify_oop (must happen after universe_init)
if (VerifyOops) {
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
}
StubRoutines::_throw_AbstractMethodError_entry =
generate_throw_exception("AbstractMethodError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry =
generate_throw_exception("IncompatibleClassChangeError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry =
generate_throw_exception("NullPointerException at call throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_NullPointerException_at_call));
// arraycopy stubs used by compilers
generate_arraycopy_stubs();

View File

@ -658,8 +658,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ far_jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check);

View File

@ -2751,7 +2751,7 @@ void MacroAssembler::reserved_stack_check(Register return_pc) {
pop_frame();
restore_return_pc();
load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
load_const_optimized(Z_R1, SharedRuntime::throw_delayed_StackOverflowError_entry());
// Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
z_br(Z_R1);

View File

@ -180,8 +180,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ z_br(target);
__ bind(L_no_such_method);
assert(StubRoutines::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, StubRoutines::throw_AbstractMethodError_entry());
assert(SharedRuntime::throw_AbstractMethodError_entry() != nullptr, "not yet generated!");
__ load_const_optimized(target, SharedRuntime::throw_AbstractMethodError_entry());
__ z_br(target);
}
@ -543,7 +543,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
__ bind(L_no_such_interface);
// Throw exception.
__ load_const_optimized(Z_R1, StubRoutines::throw_IncompatibleClassChangeError_entry());
__ load_const_optimized(Z_R1, SharedRuntime::throw_IncompatibleClassChangeError_entry());
__ z_br(Z_R1);
break;
}

View File

@ -3010,6 +3010,85 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Only callee-saved registers are preserved (through the
// normal RegisterMap handling). If the compiler
// needs all registers to be preserved between the fault point and
// the exception handler then it must assume responsibility for that
// in AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
// Note that we generate only this stub into a RuntimeStub, because
// it needs to be properly traversed and ignored during GC, so we
// change the meaning of the "__" macro within this method.
// Note: the routine set_pc_not_at_call_for_caller in
// SharedRuntime.cpp requires that this code be generated into a
// RuntimeStub.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
int insts_size = 256;
int locs_size = 0;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
MacroAssembler* masm = new MacroAssembler(&code);
int framesize_in_bytes;
address start = __ pc();
__ save_return_pc();
framesize_in_bytes = __ push_frame_abi160(0);
address frame_complete_pc = __ pc();
// Note that we always have a runtime stub frame on the top of stack at this point.
__ get_PC(Z_R1);
__ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1);
// Do the call.
BLOCK_COMMENT("call runtime_entry");
__ call_VM_leaf(runtime_entry, Z_thread);
__ reset_last_Java_frame();
#ifdef ASSERT
// Make sure that this code is only executed if there is a pending exception.
{ Label L;
__ z_lg(Z_R0,
in_bytes(Thread::pending_exception_offset()),
Z_thread);
__ z_ltgr(Z_R0, Z_R0);
__ z_brne(L);
__ stop("SharedRuntime::throw_exception: no pending exception");
__ bind(L);
}
#endif
__ pop_frame();
__ restore_return_pc();
__ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
__ z_br(Z_R1);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name, &code,
frame_complete_pc - start,
framesize_in_bytes/wordSize,
nullptr /*oop_maps*/, false);
return stub;
}
//------------------------------Montgomery multiplication------------------------
//
@ -3263,3 +3342,18 @@ extern "C"
int SpinPause() {
return 0;
}
#if INCLUDE_JFR
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
#endif // INCLUDE_JFR

View File

@ -572,89 +572,6 @@ class StubGenerator: public StubCodeGenerator {
#undef pending_exception_offset
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Only callee-saved registers are preserved (through the
// normal RegisterMap handling). If the compiler
// needs all registers to be preserved between the fault point and
// the exception handler then it must assume responsibility for that
// in AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
// Note that we generate only this stub into a RuntimeStub, because
// it needs to be properly traversed and ignored during GC, so we
// change the meaning of the "__" macro within this method.
// Note: the routine set_pc_not_at_call_for_caller in
// SharedRuntime.cpp requires that this code be generated into a
// RuntimeStub.
#undef __
#define __ masm->
address generate_throw_exception(const char* name, address runtime_entry,
bool restore_saved_exception_pc,
Register arg1 = noreg, Register arg2 = noreg) {
assert_different_registers(arg1, Z_R0_scratch); // would be destroyed by push_frame()
assert_different_registers(arg2, Z_R0_scratch); // would be destroyed by push_frame()
int insts_size = 256;
int locs_size = 0;
CodeBuffer code(name, insts_size, locs_size);
MacroAssembler* masm = new MacroAssembler(&code);
int framesize_in_bytes;
address start = __ pc();
__ save_return_pc();
framesize_in_bytes = __ push_frame_abi160(0);
address frame_complete_pc = __ pc();
if (restore_saved_exception_pc) {
__ unimplemented("StubGenerator::throw_exception", 74);
}
// Note that we always have a runtime stub frame on the top of stack at this point.
__ get_PC(Z_R1);
__ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1);
// Do the call.
BLOCK_COMMENT("call runtime_entry");
__ call_VM_leaf(runtime_entry, Z_thread, arg1, arg2);
__ reset_last_Java_frame();
#ifdef ASSERT
// Make sure that this code is only executed if there is a pending exception.
{ Label L;
__ z_lg(Z_R0,
in_bytes(Thread::pending_exception_offset()),
Z_thread);
__ z_ltgr(Z_R0, Z_R0);
__ z_brne(L);
__ stop("StubRoutines::throw_exception: no pending exception");
__ bind(L);
}
#endif
__ pop_frame();
__ restore_return_pc();
__ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
__ z_br(Z_R1);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name, &code,
frame_complete_pc - start,
framesize_in_bytes/wordSize,
nullptr /*oop_maps*/, false);
return stub->entry_point();
}
#undef __
#ifdef PRODUCT
#define __ _masm->
@ -3121,21 +3038,6 @@ class StubGenerator: public StubCodeGenerator {
return nullptr;
}
#if INCLUDE_JFR
RuntimeStub* generate_jfr_write_checkpoint() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
RuntimeStub* generate_jfr_return_lease() {
if (!Continuations::enabled()) return nullptr;
Unimplemented();
return nullptr;
}
#endif // INCLUDE_JFR
// exception handler for upcall stubs
address generate_upcall_stub_exception_handler() {
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
@ -3164,14 +3066,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
StubRoutines::_catch_exception_entry = generate_catch_exception();
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
//----------------------------------------------------------------------
// Entry points that are platform specific.
@ -3196,29 +3090,13 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// Generates all stubs and initializes the entry points.
StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check();
// These entry points require SharedInfo::stack0 to be set up in non-core builds.
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
// Support for verify_oop (must happen after universe_init).
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();

View File

@ -850,9 +850,9 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_
// Note also that the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
__ load_absolute_address(tmp1, StubRoutines::throw_StackOverflowError_entry());
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
AddressLiteral stub(SharedRuntime::throw_StackOverflowError_entry());
__ load_absolute_address(tmp1, SharedRuntime::throw_StackOverflowError_entry());
__ z_br(tmp1);
// If you get to here, then there is enough stack space.

View File

@ -1303,7 +1303,7 @@ void MacroAssembler::reserved_stack_check() {
jcc(Assembler::below, no_reserved_zone_enabling);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread);
jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
should_not_reach_here();
bind(no_reserved_zone_enabling);

View File

@ -159,7 +159,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
__ jmp(Address(method, entry_offset));
__ bind(L_no_such_method);
__ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
__ jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry()));
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@ -510,7 +510,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
if (iid == vmIntrinsics::_linkToInterface) {
__ bind(L_incompatible_class_change_error);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry()));
}
}
}

View File

@ -43,6 +43,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
@ -56,6 +57,12 @@
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif // PRODUCT
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
class RegisterSaver {
@ -2631,3 +2638,207 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
// frame_size_words or bytes??
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
}
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
// the current activation. Fabricates an exception oop and initiates normal
// exception dispatching in this frame.
//
// Previously the compiler (c2) allowed for callee save registers on Java calls.
// This is no longer true after adapter frames were removed but could possibly
// be brought back in the future if the interpreter code was reworked and it
// was deemed worthwhile. The comment below was left to describe what must
// happen here if callee saves were resurrected. As it stands now this stub
// could actually be a vanilla BufferBlob and have now oopMap at all.
// Since it doesn't make much difference we've chosen to leave it the
// way it was in the callee save days and keep the comment.
// If we need to preserve callee-saved values we need a callee-saved oop map and
// therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
// If the compiler needs all registers to be preserved between the fault
// point and the exception handler then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other implicit
// exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
// either at call sites or otherwise assume that stack unwinding will be initiated,
// so caller saved registers were assumed volatile in the compiler.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
enum layout {
thread_off, // last_java_sp
arg1_off,
arg2_off,
rbp_off, // callee saved register
ret_pc,
framesize
};
int insts_size = 256;
int locs_size = 32;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
Register java_thread = rbx;
__ get_thread(java_thread);
__ enter(); // required for proper stackwalking of RuntimeStub frame
// pc and rbp, already pushed
__ subptr(rsp, (framesize-2) * wordSize); // prolog
// Frame is now completed as far as size and linkage.
int frame_complete = __ pc() - start;
// push java thread (becomes first argument of C function)
__ movptr(Address(rsp, thread_off * wordSize), java_thread);
// Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg);
// Call runtime
BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry));
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(__ pc() - start, map);
// restore the thread (cannot use the pushed argument since arguments
// may be overwritten by C code generated by an optimizing compiler);
// however can use the register value directly if it is callee saved.
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
// check for pending exceptions
#ifdef ASSERT
Label L;
__ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
#endif /* ASSERT */
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
return stub;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* masm) {
Register java_thread = rdi;
__ get_thread(java_thread);
__ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg);
__ movptr(Address(rsp, 0), java_thread);
}
// The handle is dereferenced through a load barrier.
static void jfr_epilogue(MacroAssembler* masm) {
Register java_thread = rdi;
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
enum layout {
FPUState_off = 0,
rbp_off = FPUStateSizeInWords,
rdi_off,
rsi_off,
rcx_off,
rbx_off,
saved_argument_off,
saved_argument_off2, // 2nd half of double
framesize
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(masm);
__ resolve_global_jobject(rax, rdi, rdx);
__ leave();
__ ret(0);
OopMap* map = new OopMap(framesize, 1); // rbp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
enum layout {
FPUState_off = 0,
rbp_off = FPUStateSizeInWords,
rdi_off,
rsi_off,
rcx_off,
rbx_off,
saved_argument_off,
saved_argument_off2, // 2nd half of double
framesize
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, masm);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(masm);
__ leave();
__ ret(0);
OopMap* map = new OopMap(framesize, 1); // rbp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -52,6 +52,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/align.hpp"
@ -70,6 +71,12 @@
#define __ masm->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
#define BLOCK_COMMENT(str) __ block_comment(str)
#endif // PRODUCT
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
class RegisterSaver {
@ -3210,6 +3217,101 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
enum layout {
rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
rbp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 512;
int locs_size = 64;
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_throw_exception";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert(is_even(framesize/2), "sp not 16-byte aligned");
// return address and rbp are already in place
__ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
// Call runtime
__ movptr(c_rarg0, r15_thread);
BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry));
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
// check for pending exceptions
#ifdef ASSERT
Label L;
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
//------------------------------Montgomery multiplication------------------------
//
@ -3475,3 +3577,94 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
reverse_words(m, (julong *)m_ints, longwords);
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
CodeBuffer code("jfr_write_checkpoint", 1024, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ movptr(c_rarg0, r15_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
__ reset_last_Java_frame(true);
// rax is jobject handle result, unpack and process it through a barrier.
__ resolve_global_jobject(rax, r15_thread, c_rarg0);
__ leave();
__ ret(0);
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
CodeBuffer code("jfr_return_lease", 1024, 64);
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch2);
__ movptr(c_rarg0, r15_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
__ reset_last_Java_frame(true);
__ leave();
__ ret(0);
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
#endif // INCLUDE_JFR

View File

@ -3843,121 +3843,8 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
public:
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
enum layout {
thread_off, // last_java_sp
arg1_off,
arg2_off,
rbp_off, // callee saved register
ret_pc,
framesize
};
private:
#undef __
#define __ masm->
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
// the current activation. Fabricates an exception oop and initiates normal
// exception dispatching in this frame.
//
// Previously the compiler (c2) allowed for callee save registers on Java calls.
// This is no longer true after adapter frames were removed but could possibly
// be brought back in the future if the interpreter code was reworked and it
// was deemed worthwhile. The comment below was left to describe what must
// happen here if callee saves were resurrected. As it stands now this stub
// could actually be a vanilla BufferBlob and have now oopMap at all.
// Since it doesn't make much difference we've chosen to leave it the
// way it was in the callee save days and keep the comment.
// If we need to preserve callee-saved values we need a callee-saved oop map and
// therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
// If the compiler needs all registers to be preserved between the fault
// point and the exception handler then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other implicit
// exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
// either at call sites or otherwise assume that stack unwinding will be initiated,
// so caller saved registers were assumed volatile in the compiler.
address generate_throw_exception(const char* name, address runtime_entry,
Register arg1 = noreg, Register arg2 = noreg) {
int insts_size = 256;
int locs_size = 32;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
Register java_thread = rbx;
__ get_thread(java_thread);
__ enter(); // required for proper stackwalking of RuntimeStub frame
// pc and rbp, already pushed
__ subptr(rsp, (framesize-2) * wordSize); // prolog
// Frame is now completed as far as size and linkage.
int frame_complete = __ pc() - start;
// push java thread (becomes first argument of C function)
__ movptr(Address(rsp, thread_off * wordSize), java_thread);
if (arg1 != noreg) {
__ movptr(Address(rsp, arg1_off * wordSize), arg1);
}
if (arg2 != noreg) {
assert(arg1 != noreg, "missing reg arg");
__ movptr(Address(rsp, arg2_off * wordSize), arg2);
}
// Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, nullptr, noreg);
// Call runtime
BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry));
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(__ pc() - start, map);
// restore the thread (cannot use the pushed argument since arguments
// may be overwritten by C code generated by an optimizing compiler);
// however can use the register value directly if it is callee saved.
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
// check for pending exceptions
#ifdef ASSERT
Label L;
__ cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
#endif /* ASSERT */
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, framesize, oop_maps, false);
return stub->entry_point();
}
void create_control_words() {
// Round to nearest, 53-bit mode, exceptions masked
StubRoutines::x86::_fpu_cntrl_wrd_std = 0x027F;
@ -3997,109 +3884,6 @@ class StubGenerator: public StubCodeGenerator {
return nullptr;
}
#if INCLUDE_JFR
static void jfr_prologue(address the_pc, MacroAssembler* masm) {
Register java_thread = rdi;
__ get_thread(java_thread);
__ set_last_Java_frame(java_thread, rsp, rbp, the_pc, noreg);
__ movptr(Address(rsp, 0), java_thread);
}
// The handle is dereferenced through a load barrier.
static void jfr_epilogue(MacroAssembler* masm) {
Register java_thread = rdi;
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, true);
}
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
static RuntimeStub* generate_jfr_write_checkpoint() {
enum layout {
FPUState_off = 0,
rbp_off = FPUStateSizeInWords,
rdi_off,
rsi_off,
rcx_off,
rbx_off,
saved_argument_off,
saved_argument_off2, // 2nd half of double
framesize
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
jfr_epilogue(_masm);
__ resolve_global_jobject(rax, rdi, rdx);
__ leave();
__ ret(0);
OopMap* map = new OopMap(framesize, 1); // rbp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
// For c2: call to return a leased buffer.
static RuntimeStub* generate_jfr_return_lease() {
enum layout {
FPUState_off = 0,
rbp_off = FPUStateSizeInWords,
rdi_off,
rsi_off,
rcx_off,
rbx_off,
saved_argument_off,
saved_argument_off2, // 2nd half of double
framesize
};
int insts_size = 1024;
int locs_size = 64;
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* masm = new MacroAssembler(&code);
MacroAssembler* _masm = masm;
address start = __ pc();
__ enter();
int frame_complete = __ pc() - start;
address the_pc = __ pc();
jfr_prologue(the_pc, _masm);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
jfr_epilogue(_masm);
__ leave();
__ ret(0);
OopMap* map = new OopMap(framesize, 1); // rbp
oop_maps->add_gc_map(the_pc - start, map);
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub;
}
#endif // INCLUDE_JFR
//---------------------------------------------------------------------------
// Initialization
@ -4130,12 +3914,6 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::x86::_d2i_wrapper = generate_d2i_wrapper(T_INT, CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
StubRoutines::x86::_d2l_wrapper = generate_d2i_wrapper(T_LONG, CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
// Build this early so it's available for the interpreter
StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry = generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
@ -4188,28 +3966,11 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif // INCLUDE_JFR
void generate_final_stubs() {
// Generates all stubs and initializes the entry points
// These entry points require SharedInfo::stack0 to be set up in non-core builds
// and need to be relocatable, so they each fabricate a RuntimeStub internally.
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();

View File

@ -45,9 +45,6 @@
#if INCLUDE_JVMCI
#include "jvmci/jvmci_globals.hpp"
#endif
#if INCLUDE_JFR
#include "jfr/support/jfrIntrinsics.hpp"
#endif
// For a more detailed description of the stub routine structure
// see the comment in stubRoutines.hpp
@ -3702,7 +3699,7 @@ address StubGenerator::generate_cont_thaw(const char* label, Continuation::thaw_
Label L_thaw_success;
__ testptr(rbx, rbx);
__ jccb(Assembler::notZero, L_thaw_success);
__ jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
__ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
__ bind(L_thaw_success);
// Make room for the thawed frames and align the stack.
@ -3778,198 +3775,6 @@ address StubGenerator::generate_cont_returnBarrier_exception() {
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
}
#if INCLUDE_JFR
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* StubGenerator::generate_jfr_write_checkpoint() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
CodeBuffer code("jfr_write_checkpoint", 1024, 64);
MacroAssembler* _masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ movptr(c_rarg0, r15_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
__ reset_last_Java_frame(true);
// rax is jobject handle result, unpack and process it through a barrier.
__ resolve_global_jobject(rax, r15_thread, c_rarg0);
__ leave();
__ ret(0);
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
// For c2: call to return a leased buffer.
RuntimeStub* StubGenerator::generate_jfr_return_lease() {
enum layout {
rbp_off,
rbpH_off,
return_off,
return_off2,
framesize // inclusive of return address
};
CodeBuffer code("jfr_return_lease", 1024, 64);
MacroAssembler* _masm = new MacroAssembler(&code);
address start = __ pc();
__ enter();
address the_pc = __ pc();
int frame_complete = the_pc - start;
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch2);
__ movptr(c_rarg0, r15_thread);
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
__ reset_last_Java_frame(true);
__ leave();
__ ret(0);
OopMapSet* oop_maps = new OopMapSet();
OopMap* map = new OopMap(framesize, 1);
oop_maps->add_gc_map(frame_complete, map);
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(code.name(),
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps,
false);
return stub;
}
#endif // INCLUDE_JFR
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this
// frame. Since we need to preserve callee-saved values (currently
// only for C2, but done for C1 as well) we need a callee-saved oop
// map and therefore have to make these stubs into RuntimeStubs
// rather than BufferBlobs. If the compiler needs all registers to
// be preserved between the fault point and the exception handler
// then it must assume responsibility for that in
// AbstractCompiler::continuation_for_implicit_null_exception or
// continuation_for_implicit_division_by_zero_exception. All other
// implicit exceptions (e.g., NullPointerException or
// AbstractMethodError on entry) are either at call sites or
// otherwise assume that stack unwinding will be initiated, so
// caller saved registers were assumed volatile in the compiler.
address StubGenerator::generate_throw_exception(const char* name,
address runtime_entry,
Register arg1,
Register arg2) {
// Information about frame layout at time of blocking runtime call.
// Note that we only have to preserve callee-saved registers since
// the compilers are responsible for supplying a continuation point
// if they expect all registers to be preserved.
enum layout {
rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
rbp_off2,
return_off,
return_off2,
framesize // inclusive of return address
};
int insts_size = 512;
int locs_size = 64;
CodeBuffer code(name, insts_size, locs_size);
OopMapSet* oop_maps = new OopMapSet();
MacroAssembler* _masm = new MacroAssembler(&code);
address start = __ pc();
// This is an inlined and slightly modified version of call_VM
// which has the ability to fetch the return PC out of
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
__ enter(); // required for proper stackwalking of RuntimeStub frame
assert(is_even(framesize/2), "sp not 16-byte aligned");
// return address and rbp are already in place
__ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
int frame_complete = __ pc() - start;
// Set up last_Java_sp and last_Java_fp
address the_pc = __ pc();
__ set_last_Java_frame(rsp, rbp, the_pc, rscratch1);
__ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
// Call runtime
if (arg1 != noreg) {
assert(arg2 != c_rarg1, "clobbered");
__ movptr(c_rarg1, arg1);
}
if (arg2 != noreg) {
__ movptr(c_rarg2, arg2);
}
__ movptr(c_rarg0, r15_thread);
BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry));
// Generate oop map
OopMap* map = new OopMap(framesize, 0);
oop_maps->add_gc_map(the_pc - start, map);
__ reset_last_Java_frame(true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
// check for pending exceptions
#ifdef ASSERT
Label L;
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
#endif // ASSERT
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// codeBlob framesize is in words (not VMRegImpl::slot_size)
RuntimeStub* stub =
RuntimeStub::new_runtime_stub(name,
&code,
frame_complete,
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
oop_maps, false);
return stub->entry_point();
}
// exception handler for upcall stubs
address StubGenerator::generate_upcall_stub_exception_handler() {
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
@ -4087,17 +3892,6 @@ void StubGenerator::generate_initial_stubs() {
StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
// Build this early so it's available for the interpreter.
StubRoutines::_throw_StackOverflowError_entry =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_StackOverflowError));
StubRoutines::_throw_delayed_StackOverflowError_entry =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_delayed_StackOverflowError));
if (UseCRC32Intrinsics) {
// set table address before stub generation which use it
StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
@ -4131,43 +3925,11 @@ void StubGenerator::generate_continuation_stubs() {
StubRoutines::_cont_thaw = generate_cont_thaw();
StubRoutines::_cont_returnBarrier = generate_cont_returnBarrier();
StubRoutines::_cont_returnBarrierExc = generate_cont_returnBarrier_exception();
JFR_ONLY(generate_jfr_stubs();)
}
#if INCLUDE_JFR
void StubGenerator::generate_jfr_stubs() {
StubRoutines::_jfr_write_checkpoint_stub = generate_jfr_write_checkpoint();
StubRoutines::_jfr_write_checkpoint = StubRoutines::_jfr_write_checkpoint_stub->entry_point();
StubRoutines::_jfr_return_lease_stub = generate_jfr_return_lease();
StubRoutines::_jfr_return_lease = StubRoutines::_jfr_return_lease_stub->entry_point();
}
#endif
void StubGenerator::generate_final_stubs() {
// Generates the rest of stubs and initializes the entry points
// These entry points require SharedInfo::stack0 to be set up in
// non-core builds and need to be relocatable, so they each
// fabricate a RuntimeStub internally.
StubRoutines::_throw_AbstractMethodError_entry =
generate_throw_exception("AbstractMethodError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_AbstractMethodError));
StubRoutines::_throw_IncompatibleClassChangeError_entry =
generate_throw_exception("IncompatibleClassChangeError throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_IncompatibleClassChangeError));
StubRoutines::_throw_NullPointerException_at_call_entry =
generate_throw_exception("NullPointerException at call throw_exception",
CAST_FROM_FN_PTR(address,
SharedRuntime::
throw_NullPointerException_at_call));
// support for verify_oop (must happen after universe_init)
if (VerifyOops) {
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();

View File

@ -586,16 +586,6 @@ class StubGenerator: public StubCodeGenerator {
address generate_cont_returnBarrier();
address generate_cont_returnBarrier_exception();
#if INCLUDE_JFR
void generate_jfr_stubs();
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
RuntimeStub* generate_jfr_write_checkpoint();
// For c2: call to runtime to return a buffer lease.
RuntimeStub* generate_jfr_return_lease();
#endif // INCLUDE_JFR
// Continuation point for throwing of implicit exceptions that are
// not handled in the current activation. Fabricates an exception
// oop and initiates normal exception dispatching in this

View File

@ -545,8 +545,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
// Note: the restored frame is not necessarily interpreted.
// Use the shared runtime version of the StackOverflowError.
assert(StubRoutines::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "stub not yet generated");
__ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check_pop);
NOT_LP64(__ pop(rsi));

View File

@ -89,7 +89,7 @@ JRT_LEAF(void, zero_stub())
ShouldNotCallThis();
JRT_END
static RuntimeStub* generate_empty_runtime_stub(const char* name) {
static RuntimeStub* generate_empty_runtime_stub() {
return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
}
@ -101,7 +101,6 @@ static DeoptimizationBlob* generate_empty_deopt_blob() {
return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
}
void SharedRuntime::generate_deopt_blob() {
_deopt_blob = generate_empty_deopt_blob();
}
@ -111,7 +110,11 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
}
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
return generate_empty_runtime_stub("resolve_blob");
return generate_empty_runtime_stub();
}
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
return generate_empty_runtime_stub();
}
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
@ -127,3 +130,15 @@ int SharedRuntime::vector_calling_convention(VMRegPair *regs,
ShouldNotCallThis();
return 0;
}
#if INCLUDE_JFR
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
return nullptr;
}
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
return nullptr;
}
#endif // INCLUDE_JFR

View File

@ -203,22 +203,6 @@ class StubGenerator: public StubCodeGenerator {
void generate_final_stubs() {
// Generates all stubs and initializes the entry points
// These entry points require SharedInfo::stack0 to be set up in
// non-core builds and need to be relocatable, so they each
// fabricate a RuntimeStub internally.
StubRoutines::_throw_AbstractMethodError_entry =
ShouldNotCallThisStub();
StubRoutines::_throw_NullPointerException_at_call_entry =
ShouldNotCallThisStub();
StubRoutines::_throw_StackOverflowError_entry =
ShouldNotCallThisStub();
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry =
ShouldNotCallThisStub();
// arraycopy stubs used by compilers
generate_arraycopy_stubs();

View File

@ -50,6 +50,7 @@ class CompilerToVM {
static address SharedRuntime_deopt_blob_unpack_with_exception_in_tls;
static address SharedRuntime_deopt_blob_uncommon_trap;
static address SharedRuntime_polling_page_return_handler;
static address SharedRuntime_throw_delayed_StackOverflowError_entry;
static address nmethod_entry_barrier;
static int thread_disarmed_guard_value_offset;

View File

@ -68,6 +68,7 @@ address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack;
address CompilerToVM::Data::SharedRuntime_deopt_blob_unpack_with_exception_in_tls;
address CompilerToVM::Data::SharedRuntime_deopt_blob_uncommon_trap;
address CompilerToVM::Data::SharedRuntime_polling_page_return_handler;
address CompilerToVM::Data::SharedRuntime_throw_delayed_StackOverflowError_entry;
address CompilerToVM::Data::nmethod_entry_barrier;
int CompilerToVM::Data::thread_disarmed_guard_value_offset;
@ -158,6 +159,7 @@ void CompilerToVM::Data::initialize(JVMCI_TRAPS) {
SharedRuntime_deopt_blob_unpack_with_exception_in_tls = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
SharedRuntime_deopt_blob_uncommon_trap = SharedRuntime::deopt_blob()->uncommon_trap();
SharedRuntime_polling_page_return_handler = SharedRuntime::polling_page_return_handler_blob()->entry_point();
SharedRuntime_throw_delayed_StackOverflowError_entry = SharedRuntime::throw_delayed_StackOverflowError_entry();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != nullptr) {

View File

@ -68,6 +68,8 @@
static_field(CompilerToVM::Data, SharedRuntime_deopt_blob_uncommon_trap, address) \
static_field(CompilerToVM::Data, SharedRuntime_polling_page_return_handler, \
address) \
static_field(CompilerToVM::Data, SharedRuntime_throw_delayed_StackOverflowError_entry, \
address) \
\
static_field(CompilerToVM::Data, nmethod_entry_barrier, address) \
static_field(CompilerToVM::Data, thread_disarmed_guard_value_offset, int) \
@ -328,8 +330,6 @@
\
static_field(StubRoutines, _verify_oop_count, jint) \
\
static_field(StubRoutines, _throw_delayed_StackOverflowError_entry, address) \
\
static_field(StubRoutines, _jbyte_arraycopy, address) \
static_field(StubRoutines, _jshort_arraycopy, address) \
static_field(StubRoutines, _jint_arraycopy, address) \

View File

@ -3167,7 +3167,7 @@ bool LibraryCallKit::inline_native_jvm_commit() {
// Make a runtime call, which can safepoint, to return the leased buffer. This updates both the JfrThreadLocal and the Java event writer oop.
Node* call_return_lease = make_runtime_call(RC_NO_LEAF,
OptoRuntime::void_void_Type(),
StubRoutines::jfr_return_lease(),
SharedRuntime::jfr_return_lease(),
"return_lease", TypePtr::BOTTOM);
Node* call_return_lease_control = _gvn.transform(new ProjNode(call_return_lease, TypeFunc::Control));
@ -3366,7 +3366,7 @@ bool LibraryCallKit::inline_native_getEventWriter() {
// The call also updates the native thread local thread id and the vthread with the current epoch.
Node* call_write_checkpoint = make_runtime_call(RC_NO_LEAF,
OptoRuntime::jfr_write_checkpoint_Type(),
StubRoutines::jfr_write_checkpoint(),
SharedRuntime::jfr_write_checkpoint(),
"write_checkpoint", TypePtr::BOTTOM);
Node* call_write_checkpoint_control = _gvn.transform(new ProjNode(call_write_checkpoint, TypeFunc::Control));

View File

@ -126,7 +126,10 @@ jint init_globals() {
compilationPolicy_init();
codeCache_init();
VM_Version_init(); // depends on codeCache_init for emitting code
// stub routines in initial blob are referenced by later generated code
initial_stubs_init();
// stack overflow exception blob is referenced by the interpreter
SharedRuntime::generate_initial_stubs();
jint status = universe_init(); // dependent on codeCache_init and
// initial_stubs_init and metaspace_init.
if (status != JNI_OK)
@ -144,6 +147,9 @@ jint init_globals() {
gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init
continuations_init(); // must precede continuation stub generation
continuation_stubs_init(); // depends on continuations_init
#if INCLUDE_JFR
SharedRuntime::generate_jfr_stubs();
#endif
interpreter_init_stub(); // before methods get loaded
accessFlags_init();
InterfaceSupport_init();

View File

@ -71,6 +71,7 @@
#include "runtime/stackWatermarkSet.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
@ -88,23 +89,42 @@
#include "jfr/jfr.hpp"
#endif
// Shared stub locations
// Shared runtime stub routines reside in their own unique blob with a
// single entry point
RuntimeStub* SharedRuntime::_wrong_method_blob;
RuntimeStub* SharedRuntime::_wrong_method_abstract_blob;
RuntimeStub* SharedRuntime::_ic_miss_blob;
RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
RuntimeStub* SharedRuntime::_resolve_static_call_blob;
address SharedRuntime::_resolve_static_call_entry;
DeoptimizationBlob* SharedRuntime::_deopt_blob;
SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
RuntimeStub* SharedRuntime::_throw_AbstractMethodError_blob;
RuntimeStub* SharedRuntime::_throw_IncompatibleClassChangeError_blob;
RuntimeStub* SharedRuntime::_throw_NullPointerException_at_call_blob;
RuntimeStub* SharedRuntime::_throw_StackOverflowError_blob;
RuntimeStub* SharedRuntime::_throw_delayed_StackOverflowError_blob;
#if INCLUDE_JFR
RuntimeStub* SharedRuntime::_jfr_write_checkpoint_blob = nullptr;
RuntimeStub* SharedRuntime::_jfr_return_lease_blob = nullptr;
#endif
nmethod* SharedRuntime::_cont_doYield_stub;
//----------------------------generate_stubs-----------------------------------
void SharedRuntime::generate_initial_stubs() {
// Build this early so it's available for the interpreter.
_throw_StackOverflowError_blob =
generate_throw_exception("StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
}
void SharedRuntime::generate_stubs() {
_wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
_wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
@ -112,7 +132,22 @@ void SharedRuntime::generate_stubs() {
_resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
_resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
_resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
_resolve_static_call_entry = _resolve_static_call_blob->entry_point();
_throw_delayed_StackOverflowError_blob =
generate_throw_exception("delayed StackOverflowError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
_throw_AbstractMethodError_blob =
generate_throw_exception("AbstractMethodError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
_throw_IncompatibleClassChangeError_blob =
generate_throw_exception("IncompatibleClassChangeError throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
_throw_NullPointerException_at_call_blob =
generate_throw_exception("NullPointerException at call throw_exception",
CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
AdapterHandlerLibrary::initialize();
@ -129,6 +164,19 @@ void SharedRuntime::generate_stubs() {
generate_deopt_blob();
}
#if INCLUDE_JFR
//------------------------------generate jfr runtime stubs ------
void SharedRuntime::generate_jfr_stubs() {
ResourceMark rm;
const char* timer_msg = "SharedRuntime generate_jfr_stubs";
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
_jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
_jfr_return_lease_blob = generate_jfr_return_lease();
}
#endif // INCLUDE_JFR
#include <math.h>
// Implementation of SharedRuntime
@ -867,7 +915,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
// method stack banging.
assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
return StubRoutines::throw_StackOverflowError_entry();
return SharedRuntime::throw_StackOverflowError_entry();
}
case IMPLICIT_NULL: {
@ -893,7 +941,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
// Assert that the signal comes from the expected location in stub code.
assert(vt_stub->is_null_pointer_exception(pc),
"obtained signal from unexpected location in stub code");
return StubRoutines::throw_NullPointerException_at_call_entry();
return SharedRuntime::throw_NullPointerException_at_call_entry();
}
} else {
CodeBlob* cb = CodeCache::find_blob(pc);
@ -914,7 +962,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
}
Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
// There is no handler here, so we will simply unwind.
return StubRoutines::throw_NullPointerException_at_call_entry();
return SharedRuntime::throw_NullPointerException_at_call_entry();
}
// Otherwise, it's a compiled method. Consult its exception handlers.
@ -925,13 +973,13 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
// is not set up yet) => use return address pushed by
// caller => don't push another return address
Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
return StubRoutines::throw_NullPointerException_at_call_entry();
return SharedRuntime::throw_NullPointerException_at_call_entry();
}
if (nm->method()->is_method_handle_intrinsic()) {
// exception happened inside MH dispatch code, similar to a vtable stub
Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
return StubRoutines::throw_NullPointerException_at_call_entry();
return SharedRuntime::throw_NullPointerException_at_call_entry();
}
#ifndef PRODUCT
@ -1467,7 +1515,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread*
assert(callerFrame.is_compiled_frame(), "must be");
// Install exception and return forward entry.
address res = StubRoutines::throw_AbstractMethodError_entry();
address res = SharedRuntime::throw_AbstractMethodError_entry();
JRT_BLOCK
methodHandle callee(current, invoke.static_target(current));
if (!callee.is_null()) {
@ -2387,7 +2435,7 @@ void AdapterHandlerLibrary::initialize() {
// AbstractMethodError for invalid invocations.
address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
_abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr),
StubRoutines::throw_AbstractMethodError_entry(),
SharedRuntime::throw_AbstractMethodError_entry(),
wrong_method_abstract, wrong_method_abstract);
_buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);

View File

@ -54,7 +54,6 @@ class SharedRuntime: AllStatic {
static RuntimeStub* _resolve_opt_virtual_call_blob;
static RuntimeStub* _resolve_virtual_call_blob;
static RuntimeStub* _resolve_static_call_blob;
static address _resolve_static_call_entry;
static DeoptimizationBlob* _deopt_blob;
@ -64,6 +63,17 @@ class SharedRuntime: AllStatic {
static nmethod* _cont_doYield_stub;
static RuntimeStub* _throw_AbstractMethodError_blob;
static RuntimeStub* _throw_IncompatibleClassChangeError_blob;
static RuntimeStub* _throw_NullPointerException_at_call_blob;
static RuntimeStub* _throw_StackOverflowError_blob;
static RuntimeStub* _throw_delayed_StackOverflowError_blob;
#if INCLUDE_JFR
static RuntimeStub* _jfr_write_checkpoint_blob;
static RuntimeStub* _jfr_return_lease_blob;
#endif
#ifndef PRODUCT
// Counters
static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable)
@ -73,9 +83,19 @@ class SharedRuntime: AllStatic {
enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP };
static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type);
static RuntimeStub* generate_resolve_blob(address destination, const char* name);
static RuntimeStub* generate_throw_exception(const char* name, address runtime_entry);
public:
static void generate_initial_stubs(void);
static void generate_stubs(void);
#if INCLUDE_JFR
static void generate_jfr_stubs(void);
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
// It returns a jobject handle to the event writer.
// The handle is dereferenced and the return value is the event writer oop.
static RuntimeStub* generate_jfr_write_checkpoint();
// For c2: call to runtime to return a buffer lease.
static RuntimeStub* generate_jfr_return_lease();
#endif
// max bytes for each dtrace string parameter
enum { max_dtrace_string_size = 256 };
@ -241,6 +261,18 @@ class SharedRuntime: AllStatic {
return _cont_doYield_stub;
}
// Implicit exceptions
static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_blob->entry_point(); }
static address throw_IncompatibleClassChangeError_entry() { return _throw_IncompatibleClassChangeError_blob->entry_point(); }
static address throw_NullPointerException_at_call_entry() { return _throw_NullPointerException_at_call_blob->entry_point(); }
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_blob->entry_point(); }
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_blob->entry_point(); }
#if INCLUDE_JFR
static address jfr_write_checkpoint() { return _jfr_write_checkpoint_blob->entry_point(); }
static address jfr_return_lease() { return _jfr_return_lease_blob->entry_point(); }
#endif
// Counters
#ifndef PRODUCT
static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; }

View File

@ -61,11 +61,6 @@ address StubRoutines::_call_stub_entry = nullptr;
address StubRoutines::_catch_exception_entry = nullptr;
address StubRoutines::_forward_exception_entry = nullptr;
address StubRoutines::_throw_AbstractMethodError_entry = nullptr;
address StubRoutines::_throw_IncompatibleClassChangeError_entry = nullptr;
address StubRoutines::_throw_NullPointerException_at_call_entry = nullptr;
address StubRoutines::_throw_StackOverflowError_entry = nullptr;
address StubRoutines::_throw_delayed_StackOverflowError_entry = nullptr;
jint StubRoutines::_verify_oop_count = 0;
address StubRoutines::_verify_oop_subroutine_entry = nullptr;
address StubRoutines::_atomic_xchg_entry = nullptr;
@ -191,11 +186,6 @@ address StubRoutines::_cont_thaw = nullptr;
address StubRoutines::_cont_returnBarrier = nullptr;
address StubRoutines::_cont_returnBarrierExc = nullptr;
JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = nullptr;)
JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = nullptr;)
JFR_ONLY(RuntimeStub* StubRoutines::_jfr_return_lease_stub = nullptr;)
JFR_ONLY(address StubRoutines::_jfr_return_lease = nullptr;)
address StubRoutines::_upcall_stub_exception_handler = nullptr;
address StubRoutines::_lookup_secondary_supers_table_slow_path_stub = nullptr;

View File

@ -36,7 +36,34 @@
// StubRoutines provides entry points to assembly routines used by
// compiled code and the run-time system. Platform-specific entry
// points are defined in the platform-specific inner class.
// points are defined in the platform-specific inner class. Most
// routines have a single (main) entry point. However, a few routines
// do provide alternative entry points.
//
// Stub routines whose entries are advertised via class StubRoutines
// are generated in batches at well-defined stages during JVM init:
// initial stubs, continuation stubs, compiler stubs, final stubs.
// Each batch is embedded in a single, associated blob (an instance of
// BufferBlob) i.e. the blob to entry relationship is 1-m.
//
// Note that this constrasts with the much smaller number of stub
// routines generated via classes SharedRuntime, c1_Runtime1 and
// OptoRuntime. The latter routines are also generated at well-defined
// points during JVM init. However, each stub routine has its own
// unique blob (various subclasses of RuntimeBlob) i.e. the blob to
// entry relationship is 1-1. The difference arises because
// SharedRuntime routines may need to be relocatable or advertise
// properties such as a frame size via their blob.
//
// Staging of stub routine generation is needed in order to manage
// init dependencies between 1) stubs and other stubs or 2) stubs and
// other runtime components. For example, some exception throw stubs
// need to be generated before compiler stubs (such as the
// deoptimization stub) so that the latter can invoke the thrwo rotine
// in bail-out code. Likewise, stubs that access objects (such as the
// object array copy stub) need to be created after initialization of
// some GC constants and generation of the GC barrier stubs they might
// need to invoke.
//
// Class scheme:
//
@ -49,8 +76,7 @@
// | |
// | |
// stubRoutines.cpp stubRoutines_<arch>.cpp
// stubRoutines_<os_family>.cpp stubGenerator_<arch>.cpp
// stubRoutines_<os_arch>.cpp
// stubGenerator_<arch>.cpp
//
// Note 1: The important thing is a clean decoupling between stub
// entry points (interfacing to the whole vm; i.e., 1-to-n
@ -75,6 +101,8 @@
// 3. add a public accessor function to the instance variable
// 4. implement the corresponding generator function in the platform-dependent
// stubGenerator_<arch>.cpp file and call the function in generate_all() of that file
// 5. ensure the entry is generated in the right blob to satisfy initialization
// dependencies between it and other stubs or runtime components.
class UnsafeMemoryAccess : public CHeapObj<mtCode> {
private:
@ -137,11 +165,6 @@ class StubRoutines: AllStatic {
static address _call_stub_entry;
static address _forward_exception_entry;
static address _catch_exception_entry;
static address _throw_AbstractMethodError_entry;
static address _throw_IncompatibleClassChangeError_entry;
static address _throw_NullPointerException_at_call_entry;
static address _throw_StackOverflowError_entry;
static address _throw_delayed_StackOverflowError_entry;
static address _atomic_xchg_entry;
static address _atomic_cmpxchg_entry;
@ -269,11 +292,6 @@ class StubRoutines: AllStatic {
static address _cont_returnBarrier;
static address _cont_returnBarrierExc;
JFR_ONLY(static RuntimeStub* _jfr_write_checkpoint_stub;)
JFR_ONLY(static address _jfr_write_checkpoint;)
JFR_ONLY(static RuntimeStub* _jfr_return_lease_stub;)
JFR_ONLY(static address _jfr_return_lease;)
// Vector Math Routines
static address _vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP];
static address _vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP];
@ -329,12 +347,6 @@ class StubRoutines: AllStatic {
// Exceptions
static address forward_exception_entry() { return _forward_exception_entry; }
// Implicit exceptions
static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; }
static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
static address throw_StackOverflowError_entry() { return _throw_StackOverflowError_entry; }
static address throw_delayed_StackOverflowError_entry() { return _throw_delayed_StackOverflowError_entry; }
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
@ -487,9 +499,6 @@ class StubRoutines: AllStatic {
static address cont_returnBarrier() { return _cont_returnBarrier; }
static address cont_returnBarrierExc(){return _cont_returnBarrierExc; }
JFR_ONLY(static address jfr_write_checkpoint() { return _jfr_write_checkpoint; })
JFR_ONLY(static address jfr_return_lease() { return _jfr_return_lease; })
static address upcall_stub_exception_handler() {
assert(_upcall_stub_exception_handler != nullptr, "not implemented");
return _upcall_stub_exception_handler;