mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-04 07:28:22 +00:00
8295110: RISC-V: Mark out relocations as incompressible
Reviewed-by: fyang, yadongwang
This commit is contained in:
parent
74a51ccc86
commit
9005af3b90
@ -1842,6 +1842,27 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
// Emit a relocation.
|
||||
void relocate(RelocationHolder const& rspec, int format = 0) {
|
||||
AbstractAssembler::relocate(rspec, format);
|
||||
}
|
||||
void relocate(relocInfo::relocType rtype, int format = 0) {
|
||||
AbstractAssembler::relocate(rtype, format);
|
||||
}
|
||||
template <typename Callback>
|
||||
void relocate(RelocationHolder const& rspec, Callback emit_insts, int format = 0) {
|
||||
AbstractAssembler::relocate(rspec, format);
|
||||
IncompressibleRegion ir(this); // relocations
|
||||
emit_insts();
|
||||
}
|
||||
template <typename Callback>
|
||||
void relocate(relocInfo::relocType rtype, Callback emit_insts, int format = 0) {
|
||||
AbstractAssembler::relocate(rtype, format);
|
||||
IncompressibleRegion ir(this); // relocations
|
||||
emit_insts();
|
||||
}
|
||||
|
||||
// patch a 16-bit instruction.
|
||||
static void c_patch(address a, unsigned msb, unsigned lsb, uint16_t val) {
|
||||
assert_cond(a != NULL);
|
||||
@ -2723,7 +2744,7 @@ public:
|
||||
return uabs(target - branch) < branch_range;
|
||||
}
|
||||
|
||||
Assembler(CodeBuffer* code) : AbstractAssembler(code), _in_compressible_region(false) {}
|
||||
Assembler(CodeBuffer* code) : AbstractAssembler(code), _in_compressible_region(true) {}
|
||||
|
||||
virtual ~Assembler() {}
|
||||
};
|
||||
|
||||
@ -42,8 +42,9 @@
|
||||
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
|
||||
__ bind(_entry);
|
||||
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
|
||||
__ relocate(safepoint_pc.rspec());
|
||||
__ la(t0, safepoint_pc.target());
|
||||
__ relocate(safepoint_pc.rspec(), [&] {
|
||||
__ la(t0, safepoint_pc.target());
|
||||
});
|
||||
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
|
||||
|
||||
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
|
||||
@ -101,9 +102,12 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
|
||||
__ mv(t1, _array->as_pointer_register());
|
||||
stub_id = Runtime1::throw_range_check_failed_id;
|
||||
}
|
||||
int32_t off = 0;
|
||||
__ la_patchable(ra, RuntimeAddress(Runtime1::entry_for(stub_id)), off);
|
||||
__ jalr(ra, ra, off);
|
||||
RuntimeAddress target(Runtime1::entry_for(stub_id));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(ra, target, offset);
|
||||
__ jalr(ra, ra, offset);
|
||||
});
|
||||
ce->add_call_info_here(_info);
|
||||
ce->verify_oop_map(_info);
|
||||
debug_only(__ should_not_reach_here());
|
||||
|
||||
@ -1409,9 +1409,11 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
|
||||
}
|
||||
int pc_for_athrow_offset = __ offset();
|
||||
InternalAddress pc_for_athrow(__ pc());
|
||||
int32_t off = 0;
|
||||
__ la_patchable(exceptionPC->as_register(), pc_for_athrow, off);
|
||||
__ addi(exceptionPC->as_register(), exceptionPC->as_register(), off);
|
||||
__ relocate(pc_for_athrow.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(exceptionPC->as_register(), pc_for_athrow, offset);
|
||||
__ addi(exceptionPC->as_register(), exceptionPC->as_register(), offset);
|
||||
});
|
||||
add_call_info(pc_for_athrow_offset, info); // for exception handler
|
||||
|
||||
__ verify_not_null_oop(x10);
|
||||
@ -1839,9 +1841,12 @@ void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* arg
|
||||
if (cb != NULL) {
|
||||
__ far_call(RuntimeAddress(dest));
|
||||
} else {
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(dest), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(dest);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
}
|
||||
|
||||
if (info != NULL) {
|
||||
|
||||
@ -67,9 +67,12 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres
|
||||
set_last_Java_frame(sp, fp, retaddr, t0);
|
||||
|
||||
// do the call
|
||||
int32_t off = 0;
|
||||
la_patchable(t0, RuntimeAddress(entry), off);
|
||||
jalr(x1, t0, off);
|
||||
RuntimeAddress target(entry);
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t0, target, offset);
|
||||
jalr(x1, t0, offset);
|
||||
});
|
||||
bind(retaddr);
|
||||
int call_offset = offset();
|
||||
// verify callee-saved register
|
||||
@ -567,9 +570,12 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
Label retaddr;
|
||||
__ set_last_Java_frame(sp, fp, retaddr, t0);
|
||||
// do the call
|
||||
int32_t off = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(target), off);
|
||||
__ jalr(x1, t0, off);
|
||||
RuntimeAddress addr(target);
|
||||
__ relocate(addr.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, addr, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
assert_cond(oop_maps != NULL);
|
||||
|
||||
@ -38,9 +38,10 @@ void C2SafepointPollStubTable::emit_stub_impl(MacroAssembler& masm, C2SafepointP
|
||||
RuntimeAddress callback_addr(stub);
|
||||
|
||||
__ bind(entry->_stub_label);
|
||||
InternalAddress safepoint_pc(masm.pc() - masm.offset() + entry->_safepoint_offset);
|
||||
masm.relocate(safepoint_pc.rspec());
|
||||
__ la(t0, safepoint_pc.target());
|
||||
InternalAddress safepoint_pc(__ pc() - __ offset() + entry->_safepoint_offset);
|
||||
__ relocate(safepoint_pc.rspec(), [&] {
|
||||
__ la(t0, safepoint_pc.target());
|
||||
});
|
||||
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
|
||||
__ far_jump(callback_addr);
|
||||
}
|
||||
|
||||
@ -338,9 +338,13 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
{
|
||||
ZSaveLiveRegisters save_live_registers(masm, stub);
|
||||
ZSetupArguments setup_arguments(masm, stub);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, stub->slow_path(), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
|
||||
Address target(stub->slow_path());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
}
|
||||
|
||||
// Stub exit
|
||||
|
||||
@ -182,9 +182,12 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, i
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_dispatch() {
|
||||
int32_t offset = 0;
|
||||
la_patchable(xdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
|
||||
addi(xdispatch, xdispatch, offset);
|
||||
ExternalAddress target((address)Interpreter::dispatch_table());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(xdispatch, target, offset);
|
||||
addi(xdispatch, xdispatch, offset);
|
||||
});
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
|
||||
|
||||
@ -73,11 +73,14 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
MacroAssembler* masm = new MacroAssembler(&cbuf);
|
||||
address fast_entry = __ pc();
|
||||
|
||||
Label slow;
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(rcounter_addr, SafepointSynchronize::safepoint_counter_addr(), offset);
|
||||
__ addi(rcounter_addr, rcounter_addr, offset);
|
||||
Address target(SafepointSynchronize::safepoint_counter_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(rcounter_addr, target, offset);
|
||||
__ addi(rcounter_addr, rcounter_addr, offset);
|
||||
});
|
||||
|
||||
Label slow;
|
||||
Address safepoint_counter_addr(rcounter_addr, 0);
|
||||
__ lwu(rcounter, safepoint_counter_addr);
|
||||
// An even value means there are no ongoing safepoint operations
|
||||
@ -90,11 +93,12 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
// Check to see if a field access watch has been set before we
|
||||
// take the fast path.
|
||||
int32_t offset2;
|
||||
__ la_patchable(result,
|
||||
ExternalAddress((address) JvmtiExport::get_field_access_count_addr()),
|
||||
offset2);
|
||||
__ lwu(result, Address(result, offset2));
|
||||
ExternalAddress target((address) JvmtiExport::get_field_access_count_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(result, target, offset);
|
||||
__ lwu(result, Address(result, offset));
|
||||
});
|
||||
__ bnez(result, slow);
|
||||
|
||||
__ mv(robj, c_rarg1);
|
||||
@ -169,9 +173,12 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
{
|
||||
__ enter();
|
||||
int32_t tmp_offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress(slow_case_addr), tmp_offset);
|
||||
__ jalr(x1, t0, tmp_offset);
|
||||
ExternalAddress target(slow_case_addr);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ leave();
|
||||
__ ret();
|
||||
}
|
||||
|
||||
@ -307,9 +307,12 @@ void MacroAssembler::call_VM_base(Register oop_result,
|
||||
ld(t0, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
|
||||
Label ok;
|
||||
beqz(t0, ok);
|
||||
int32_t offset = 0;
|
||||
la_patchable(t0, RuntimeAddress(StubRoutines::forward_exception_entry()), offset);
|
||||
jalr(x0, t0, offset);
|
||||
RuntimeAddress target(StubRoutines::forward_exception_entry());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t0, target, offset);
|
||||
jalr(x0, t0, offset);
|
||||
});
|
||||
bind(ok);
|
||||
}
|
||||
|
||||
@ -382,9 +385,12 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
|
||||
movptr(t0, (address)b);
|
||||
|
||||
// call indirectly to solve generation ordering problem
|
||||
int32_t offset = 0;
|
||||
la_patchable(t1, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()), offset);
|
||||
ld(t1, Address(t1, offset));
|
||||
ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t1, target, offset);
|
||||
ld(t1, Address(t1, offset));
|
||||
});
|
||||
jalr(t1);
|
||||
|
||||
pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp);
|
||||
@ -421,9 +427,12 @@ void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* f
|
||||
movptr(t0, (address)b);
|
||||
|
||||
// call indirectly to solve generation ordering problem
|
||||
int32_t offset = 0;
|
||||
la_patchable(t1, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()), offset);
|
||||
ld(t1, Address(t1, offset));
|
||||
ExternalAddress target(StubRoutines::verify_oop_subroutine_entry_address());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t1, target, offset);
|
||||
ld(t1, Address(t1, offset));
|
||||
});
|
||||
jalr(t1);
|
||||
|
||||
pop_reg(RegSet::of(ra, t0, t1, c_rarg0), sp);
|
||||
@ -763,8 +772,9 @@ void MacroAssembler::la(Register Rd, const Address &adr) {
|
||||
if (rtype == relocInfo::none) {
|
||||
mv(Rd, (intptr_t)(adr.target()));
|
||||
} else {
|
||||
relocate(adr.rspec());
|
||||
movptr(Rd, adr.target());
|
||||
relocate(adr.rspec(), [&] {
|
||||
movptr(Rd, adr.target());
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -884,8 +894,9 @@ void MacroAssembler::li(Register Rd, int64_t imm) {
|
||||
void MacroAssembler::NAME(const Address &adr, Register temp) { \
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
relocate(adr.rspec()); \
|
||||
NAME(adr.target(), temp); \
|
||||
relocate(adr.rspec(), [&] { \
|
||||
NAME(adr.target(), temp); \
|
||||
}); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
@ -1584,17 +1595,21 @@ void MacroAssembler::reinit_heapbase() {
|
||||
if (Universe::is_fully_initialized()) {
|
||||
mv(xheapbase, CompressedOops::ptrs_base());
|
||||
} else {
|
||||
int32_t offset = 0;
|
||||
la_patchable(xheapbase, ExternalAddress(CompressedOops::ptrs_base_addr()), offset);
|
||||
ld(xheapbase, Address(xheapbase, offset));
|
||||
ExternalAddress target(CompressedOops::ptrs_base_addr());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(xheapbase, target, offset);
|
||||
ld(xheapbase, Address(xheapbase, offset));
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::mv(Register Rd, Address dest) {
|
||||
assert(dest.getMode() == Address::literal, "Address mode should be Address::literal");
|
||||
relocate(dest.rspec());
|
||||
movptr(Rd, dest.target());
|
||||
relocate(dest.rspec(), [&] {
|
||||
movptr(Rd, dest.target());
|
||||
});
|
||||
}
|
||||
|
||||
void MacroAssembler::mv(Register Rd, RegisterOrConstant src) {
|
||||
@ -2019,8 +2034,12 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
|
||||
SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value) {
|
||||
int32_t offset = 0;
|
||||
_masm = masm;
|
||||
_masm->la_patchable(t0, ExternalAddress((address)flag_addr), offset);
|
||||
_masm->lbu(t0, Address(t0, offset));
|
||||
ExternalAddress target((address)flag_addr);
|
||||
_masm->relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
_masm->la_patchable(t0, target, offset);
|
||||
_masm->lbu(t0, Address(t0, offset));
|
||||
});
|
||||
_masm->beqz(t0, _label);
|
||||
}
|
||||
|
||||
@ -2764,12 +2783,14 @@ void MacroAssembler::far_jump(Address entry, Register tmp) {
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
|
||||
IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size()
|
||||
int32_t offset = 0;
|
||||
if (far_branches()) {
|
||||
// We can use auipc + jalr here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
la_patchable(tmp, entry, offset);
|
||||
jalr(x0, tmp, offset);
|
||||
relocate(entry.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(tmp, entry, offset);
|
||||
jalr(x0, tmp, offset);
|
||||
});
|
||||
} else {
|
||||
j(entry);
|
||||
}
|
||||
@ -2783,12 +2804,14 @@ void MacroAssembler::far_call(Address entry, Register tmp) {
|
||||
|| entry.rspec().type() == relocInfo::runtime_call_type
|
||||
|| entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
|
||||
IncompressibleRegion ir(this); // Fixed length: see MacroAssembler::far_branch_size()
|
||||
int32_t offset = 0;
|
||||
if (far_branches()) {
|
||||
// We can use auipc + jalr here because we know that the total size of
|
||||
// the code cache cannot exceed 2Gb.
|
||||
la_patchable(tmp, entry, offset);
|
||||
jalr(x1, tmp, offset); // link
|
||||
relocate(entry.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(tmp, entry, offset);
|
||||
jalr(x1, tmp, offset); // link
|
||||
});
|
||||
} else {
|
||||
jal(entry); // link
|
||||
}
|
||||
@ -3024,7 +3047,6 @@ void MacroAssembler::la_patchable(Register reg1, const Address &dest, int32_t &o
|
||||
assert(is_valid_riscv64_address(dest.target()), "bad address");
|
||||
assert(dest.getMode() == Address::literal, "la_patchable must be applied to a literal address");
|
||||
|
||||
relocate(dest.rspec());
|
||||
// RISC-V doesn't compute a page-aligned address, in order to partially
|
||||
// compensate for the use of *signed* offsets in its base+disp12
|
||||
// addressing mode (RISC-V's PC-relative reach remains asymmetric
|
||||
@ -3064,17 +3086,23 @@ void MacroAssembler::reserved_stack_check() {
|
||||
|
||||
enter(); // RA and FP are live.
|
||||
mv(c_rarg0, xthread);
|
||||
int32_t offset = 0;
|
||||
la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone)), offset);
|
||||
jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone));
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t0, target, offset);
|
||||
jalr(x1, t0, offset);
|
||||
});
|
||||
leave();
|
||||
|
||||
// We have already removed our own frame.
|
||||
// throw_delayed_StackOverflowError will think that it's been
|
||||
// called by our caller.
|
||||
offset = 0;
|
||||
la_patchable(t0, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()), offset);
|
||||
jalr(x0, t0, offset);
|
||||
target = RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t0, target, offset);
|
||||
jalr(x0, t0, offset);
|
||||
});
|
||||
should_not_reach_here();
|
||||
|
||||
bind(no_reserved_zone_enabling);
|
||||
@ -3088,8 +3116,9 @@ void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype)
|
||||
// Read the polling page. The address of the polling page must
|
||||
// already be in r.
|
||||
void MacroAssembler::read_polling_page(Register r, int32_t offset, relocInfo::relocType rtype) {
|
||||
relocate(rtype);
|
||||
lwu(zr, Address(r, offset));
|
||||
relocate(rtype, [&] {
|
||||
lwu(zr, Address(r, offset));
|
||||
});
|
||||
}
|
||||
|
||||
void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
@ -3103,8 +3132,9 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
|
||||
}
|
||||
#endif
|
||||
int oop_index = oop_recorder()->find_index(obj);
|
||||
relocate(oop_Relocation::spec(oop_index));
|
||||
li32(dst, 0xDEADBEEF);
|
||||
relocate(oop_Relocation::spec(oop_index), [&] {
|
||||
li32(dst, 0xDEADBEEF);
|
||||
});
|
||||
zero_extend(dst, dst, 32);
|
||||
}
|
||||
|
||||
@ -3115,8 +3145,9 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert(!Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
narrowKlass nk = CompressedKlassPointers::encode(k);
|
||||
relocate(metadata_Relocation::spec(index));
|
||||
li32(dst, nk);
|
||||
relocate(metadata_Relocation::spec(index), [&] {
|
||||
li32(dst, nk);
|
||||
});
|
||||
zero_extend(dst, dst, 32);
|
||||
}
|
||||
|
||||
@ -3163,8 +3194,9 @@ address MacroAssembler::trampoline_call(Address entry) {
|
||||
assert_alignment(call_pc);
|
||||
}
|
||||
#endif
|
||||
relocate(entry.rspec());
|
||||
jal(target);
|
||||
relocate(entry.rspec(), [&] {
|
||||
jal(target);
|
||||
});
|
||||
|
||||
postcond(pc() != badAddress);
|
||||
return call_pc;
|
||||
@ -3172,6 +3204,7 @@ address MacroAssembler::trampoline_call(Address entry) {
|
||||
|
||||
address MacroAssembler::ic_call(address entry, jint method_index) {
|
||||
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
|
||||
IncompressibleRegion ir(this); // relocations
|
||||
movptr(t1, (address)Universe::non_oop_word());
|
||||
assert_cond(entry != NULL);
|
||||
return trampoline_call(Address(entry, rh));
|
||||
@ -3203,21 +3236,22 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
// Make sure the address of destination 8-byte aligned after 3 instructions.
|
||||
align(wordSize, NativeCallTrampolineStub::data_offset);
|
||||
|
||||
relocate(trampoline_stub_Relocation::spec(code()->insts()->start() +
|
||||
insts_call_instruction_offset));
|
||||
RelocationHolder rh = trampoline_stub_Relocation::spec(code()->insts()->start() +
|
||||
insts_call_instruction_offset);
|
||||
const int stub_start_offset = offset();
|
||||
|
||||
// Now, create the trampoline stub's code:
|
||||
// - load the call
|
||||
// - call
|
||||
Label target;
|
||||
ld(t0, target); // auipc + ld
|
||||
jr(t0); // jalr
|
||||
bind(target);
|
||||
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
|
||||
"should be");
|
||||
assert(offset() % wordSize == 0, "bad alignment");
|
||||
emit_int64((intptr_t)dest);
|
||||
relocate(rh, [&] {
|
||||
// Now, create the trampoline stub's code:
|
||||
// - load the call
|
||||
// - call
|
||||
Label target;
|
||||
ld(t0, target); // auipc + ld
|
||||
jr(t0); // jalr
|
||||
bind(target);
|
||||
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
|
||||
"should be");
|
||||
assert(offset() % wordSize == 0, "bad alignment");
|
||||
emit_int64((int64_t)dest);
|
||||
});
|
||||
|
||||
const address stub_start_addr = addr_at(stub_start_offset);
|
||||
|
||||
@ -3285,9 +3319,11 @@ void MacroAssembler::decrementw(const Address dst, int32_t value) {
|
||||
|
||||
void MacroAssembler::cmpptr(Register src1, Address src2, Label& equal) {
|
||||
assert_different_registers(src1, t0);
|
||||
int32_t offset;
|
||||
la_patchable(t0, src2, offset);
|
||||
ld(t0, Address(t0, offset));
|
||||
relocate(src2.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(t0, src2, offset);
|
||||
ld(t0, Address(t0, offset));
|
||||
});
|
||||
beq(src1, t0, equal);
|
||||
}
|
||||
|
||||
@ -4472,11 +4508,14 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
|
||||
|
||||
void MacroAssembler::rt_call(address dest, Register tmp) {
|
||||
CodeBlob *cb = CodeCache::find_blob(dest);
|
||||
RuntimeAddress target(dest);
|
||||
if (cb) {
|
||||
far_call(RuntimeAddress(dest));
|
||||
far_call(target);
|
||||
} else {
|
||||
int32_t offset = 0;
|
||||
la_patchable(tmp, RuntimeAddress(dest), offset);
|
||||
jalr(x1, tmp, offset);
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(tmp, target, offset);
|
||||
jalr(x1, tmp, offset);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -494,7 +494,8 @@ class MacroAssembler: public Assembler {
|
||||
result_type header { \
|
||||
guarantee(rtype == relocInfo::internal_word_type, \
|
||||
"only internal_word_type relocs make sense here"); \
|
||||
relocate(InternalAddress(dest).rspec());
|
||||
relocate(InternalAddress(dest).rspec()); \
|
||||
IncompressibleRegion ir(this); /* relocations */
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(Register Rs1, Register Rs2, const address dest) { \
|
||||
@ -665,7 +666,8 @@ public:
|
||||
result_type header { \
|
||||
guarantee(rtype == relocInfo::internal_word_type, \
|
||||
"only internal_word_type relocs make sense here"); \
|
||||
relocate(InternalAddress(dest).rspec());
|
||||
relocate(InternalAddress(dest).rspec()); \
|
||||
IncompressibleRegion ir(this); /* relocations */
|
||||
|
||||
#define INSN(NAME) \
|
||||
void NAME(Register Rd, address dest) { \
|
||||
@ -686,8 +688,9 @@ public:
|
||||
void NAME(Register Rd, const Address &adr, Register temp = t0) { \
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
relocate(adr.rspec()); \
|
||||
NAME(Rd, adr.target()); \
|
||||
relocate(adr.rspec(), [&] { \
|
||||
NAME(Rd, adr.target()); \
|
||||
}); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
@ -743,8 +746,9 @@ public:
|
||||
void NAME(FloatRegister Rd, const Address &adr, Register temp = t0) { \
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
relocate(adr.rspec()); \
|
||||
NAME(Rd, adr.target(), temp); \
|
||||
relocate(adr.rspec(), [&] { \
|
||||
NAME(Rd, adr.target(), temp); \
|
||||
}); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
@ -800,8 +804,9 @@ public:
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
assert_different_registers(Rs, temp); \
|
||||
relocate(adr.rspec()); \
|
||||
NAME(Rs, adr.target(), temp); \
|
||||
relocate(adr.rspec(), [&] { \
|
||||
NAME(Rs, adr.target(), temp); \
|
||||
}); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
@ -843,8 +848,9 @@ public:
|
||||
void NAME(FloatRegister Rs, const Address &adr, Register temp = t0) { \
|
||||
switch (adr.getMode()) { \
|
||||
case Address::literal: { \
|
||||
relocate(adr.rspec()); \
|
||||
NAME(Rs, adr.target(), temp); \
|
||||
relocate(adr.rspec(), [&] { \
|
||||
NAME(Rs, adr.target(), temp); \
|
||||
}); \
|
||||
break; \
|
||||
} \
|
||||
case Address::base_plus_offset: { \
|
||||
@ -1242,9 +1248,12 @@ private:
|
||||
if (NearCpool) {
|
||||
ld(dest, const_addr);
|
||||
} else {
|
||||
int32_t offset = 0;
|
||||
la_patchable(dest, InternalAddress(const_addr.target()), offset);
|
||||
ld(dest, Address(dest, offset));
|
||||
InternalAddress target(const_addr.target());
|
||||
relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
la_patchable(dest, target, offset);
|
||||
ld(dest, Address(dest, offset));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -339,9 +339,12 @@ static void patch_callers_callsite(MacroAssembler *masm) {
|
||||
|
||||
__ mv(c_rarg0, xmethod);
|
||||
__ mv(c_rarg1, ra);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
|
||||
__ pop_CPU_state();
|
||||
// restore sp
|
||||
@ -1287,9 +1290,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label dtrace_method_entry, dtrace_method_entry_done;
|
||||
{
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ lbu(t0, Address(t0, offset));
|
||||
ExternalAddress target((address)&DTraceMethodProbes);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lbu(t0, Address(t0, offset));
|
||||
});
|
||||
__ addw(t0, t0, zr);
|
||||
__ bnez(t0, dtrace_method_entry);
|
||||
__ bind(dtrace_method_entry_done);
|
||||
@ -1490,9 +1496,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label dtrace_method_exit, dtrace_method_exit_done;
|
||||
{
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address)&DTraceMethodProbes), offset);
|
||||
__ lbu(t0, Address(t0, offset));
|
||||
ExternalAddress target((address)&DTraceMethodProbes);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lbu(t0, Address(t0, offset));
|
||||
});
|
||||
__ bnez(t0, dtrace_method_exit);
|
||||
__ bind(dtrace_method_exit_done);
|
||||
}
|
||||
@ -1623,9 +1632,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
#ifndef PRODUCT
|
||||
assert(frame::arg_reg_save_area_bytes == 0, "not expecting frame reg save area");
|
||||
#endif
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
|
||||
// Restore any method result value
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
@ -1797,9 +1809,12 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
__ mvw(xcpool, (int32_t)Deoptimization::Unpack_reexecute);
|
||||
__ mv(c_rarg0, xthread);
|
||||
__ orrw(c_rarg2, zr, xcpool); // exec mode
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
|
||||
|
||||
@ -1891,9 +1906,12 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
#endif // ASSERT
|
||||
__ mv(c_rarg0, xthread);
|
||||
__ mv(c_rarg1, xcpool);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
|
||||
// Need to have an oopmap that tells fetch_unroll_info where to
|
||||
@ -2035,9 +2053,12 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
|
||||
__ mv(c_rarg0, xthread);
|
||||
__ mv(c_rarg1, xcpool); // second arg: exec_mode
|
||||
offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
|
||||
// Set an oopmap for the call site
|
||||
// Use the same PC we used for the last java frame
|
||||
@ -2127,11 +2148,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
|
||||
__ mv(c_rarg0, xthread);
|
||||
__ mvw(c_rarg2, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0,
|
||||
RuntimeAddress(CAST_FROM_FN_PTR(address,
|
||||
Deoptimization::uncommon_trap)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
|
||||
// Set an oopmap for the call site
|
||||
@ -2253,9 +2275,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
|
||||
// sp should already be aligned
|
||||
__ mv(c_rarg0, xthread);
|
||||
__ mvw(c_rarg1, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
target = RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
|
||||
// Set an oopmap for the call site
|
||||
// Use the same PC we used for the last java frame
|
||||
@ -2324,9 +2349,12 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
|
||||
|
||||
// Do the call
|
||||
__ mv(c_rarg0, xthread);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(call_ptr), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(call_ptr);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
|
||||
// Set an oopmap for the call site. This oopmap will map all
|
||||
@ -2434,9 +2462,12 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
|
||||
__ set_last_Java_frame(sp, noreg, retaddr, t0);
|
||||
|
||||
__ mv(c_rarg0, xthread);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(destination), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(destination);
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
__ bind(retaddr);
|
||||
}
|
||||
|
||||
@ -2565,9 +2596,12 @@ void OptoRuntime::generate_exception_blob() {
|
||||
address the_pc = __ pc();
|
||||
__ set_last_Java_frame(sp, noreg, the_pc, t0);
|
||||
__ mv(c_rarg0, xthread);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)), offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
RuntimeAddress target(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C));
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ jalr(x1, t0, offset);
|
||||
});
|
||||
|
||||
|
||||
// handle_exception_C is a special VM call which does not require an explicit
|
||||
|
||||
@ -2263,9 +2263,12 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
|
||||
// take the time to call into the VM.
|
||||
Label L1;
|
||||
assert_different_registers(cache, index, x10);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()), offset);
|
||||
__ lwu(x10, Address(t0, offset));
|
||||
ExternalAddress target((address) JvmtiExport::get_field_access_count_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lwu(x10, Address(t0, offset));
|
||||
});
|
||||
|
||||
__ beqz(x10, L1);
|
||||
|
||||
@ -2479,9 +2482,12 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
|
||||
// we take the time to call into the VM.
|
||||
Label L1;
|
||||
assert_different_registers(cache, index, x10);
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()), offset);
|
||||
__ lwu(x10, Address(t0, offset));
|
||||
ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lwu(x10, Address(t0, offset));
|
||||
});
|
||||
__ beqz(x10, L1);
|
||||
|
||||
__ get_cache_and_index_at_bcp(c_rarg2, t0, 1);
|
||||
@ -2778,9 +2784,12 @@ void TemplateTable::jvmti_post_fast_field_mod() {
|
||||
// Check to see if a field modification watch has been set before
|
||||
// we take the time to call into the VM.
|
||||
Label L2;
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()), offset);
|
||||
__ lwu(c_rarg3, Address(t0, offset));
|
||||
ExternalAddress target((address)JvmtiExport::get_field_modification_count_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lwu(c_rarg3, Address(t0, offset));
|
||||
});
|
||||
__ beqz(c_rarg3, L2);
|
||||
__ pop_ptr(x9); // copy the object pointer from tos
|
||||
__ verify_oop(x9);
|
||||
@ -2914,9 +2923,12 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
// Check to see if a field access watch has been set before we
|
||||
// take the time to call into the VM.
|
||||
Label L1;
|
||||
int32_t offset = 0;
|
||||
__ la_patchable(t0, ExternalAddress((address)JvmtiExport::get_field_access_count_addr()), offset);
|
||||
__ lwu(x12, Address(t0, offset));
|
||||
ExternalAddress target((address)JvmtiExport::get_field_access_count_addr());
|
||||
__ relocate(target.rspec(), [&] {
|
||||
int32_t offset;
|
||||
__ la_patchable(t0, target, offset);
|
||||
__ lwu(x12, Address(t0, offset));
|
||||
});
|
||||
__ beqz(x12, L1);
|
||||
// access constant pool cache entry
|
||||
__ get_cache_entry_pointer_at_bcp(c_rarg2, t1, 1);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user