mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-04 23:48:33 +00:00
8351151: Clean up x86 template interpreter after 32-bit x86 removal
Reviewed-by: coleenp, fparain, vlivanov
This commit is contained in:
parent
b9907801af
commit
e2cd70aab6
@ -1401,9 +1401,6 @@ void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state,
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::notify_method_entry() {
|
||||
// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
|
||||
// track stack depth. If it is possible to enter interp_only_mode we add
|
||||
|
||||
@ -303,8 +303,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// only if +VerifyOops && state == atos
|
||||
#define interp_verify_oop(reg, state) _interp_verify_oop(reg, state, __FILE__, __LINE__);
|
||||
void _interp_verify_oop(Register reg, TosState state, const char* file, int line);
|
||||
// only if +VerifyFPU && (state == ftos || state == dtos)
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
|
||||
@ -198,10 +198,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Debugging
|
||||
void interp_verify_oop(Register reg, TosState state, const char* file, int line); // only if +VerifyOops && state == atos
|
||||
|
||||
void verify_FPU(int stack_depth, TosState state = ftos) {
|
||||
// No VFP state verification is required for ARM
|
||||
}
|
||||
|
||||
// Object locking
|
||||
void lock_object (Register lock_reg);
|
||||
void unlock_object(Register lock_reg);
|
||||
|
||||
@ -265,7 +265,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Debugging
|
||||
void verify_oop(Register reg, TosState state = atos); // only if +VerifyOops && state == atos
|
||||
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
|
||||
@ -2381,12 +2381,6 @@ static bool verify_return_address(Method* m, int bci) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
if (VerifyFPU) {
|
||||
unimplemented("verfiyFPU");
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
|
||||
if (!VerifyOops) return;
|
||||
|
||||
|
||||
@ -1431,8 +1431,6 @@ void InterpreterMacroAssembler::profile_switch_case(Register index,
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
|
||||
|
||||
void InterpreterMacroAssembler::notify_method_entry() {
|
||||
// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
|
||||
// track stack depth. If it is possible to enter interp_only_mode we add
|
||||
|
||||
@ -284,10 +284,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void profile_return_type(Register mdp, Register ret, Register tmp);
|
||||
void profile_parameters_type(Register mdp, Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
// Debugging
|
||||
// only if +VerifyFPU && (state == ftos || state == dtos)
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
// support for jvmti/dtrace
|
||||
|
||||
@ -93,8 +93,6 @@ void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr, bool
|
||||
// Dispatch value in Lbyte_code and increment Lbcp.
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_base(TosState state, address* table, bool generate_poll) {
|
||||
verify_FPU(1, state);
|
||||
|
||||
#ifdef ASSERT
|
||||
address reentry = nullptr;
|
||||
{ Label OK;
|
||||
@ -2189,9 +2187,3 @@ void InterpreterMacroAssembler::pop_interpreter_frame(Register return_pc, Regist
|
||||
z_stg(Z_ARG3, _z_parent_ijava_frame_abi(return_pc), Z_SP);
|
||||
#endif
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
if (VerifyFPU) {
|
||||
unimplemented("verifyFPU");
|
||||
}
|
||||
}
|
||||
|
||||
@ -313,7 +313,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// Debugging
|
||||
void verify_oop(Register reg, TosState state = atos); // Only if +VerifyOops && state == atos.
|
||||
void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
// JVMTI helpers
|
||||
void skip_if_jvmti_mode(Label &Lskip, Register Rscratch = Z_R0);
|
||||
|
||||
@ -53,11 +53,7 @@ void InterpreterMacroAssembler::jump_to_entry(address entry) {
|
||||
void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
|
||||
Label update, next, none;
|
||||
|
||||
#ifdef _LP64
|
||||
assert_different_registers(obj, rscratch1, mdo_addr.base(), mdo_addr.index());
|
||||
#else
|
||||
assert_different_registers(obj, mdo_addr.base(), mdo_addr.index());
|
||||
#endif
|
||||
|
||||
interp_verify_oop(obj, atos);
|
||||
|
||||
@ -72,9 +68,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
|
||||
|
||||
bind(update);
|
||||
load_klass(obj, obj, rscratch1);
|
||||
#ifdef _LP64
|
||||
mov(rscratch1, obj);
|
||||
#endif
|
||||
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
@ -89,7 +83,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
|
||||
jccb(Assembler::equal, none);
|
||||
cmpptr(mdo_addr, TypeEntries::null_seen);
|
||||
jccb(Assembler::equal, none);
|
||||
#ifdef _LP64
|
||||
|
||||
// There is a chance that the checks above (re-reading profiling
|
||||
// data from memory) fail if another thread has just set the
|
||||
// profiling to this obj's klass
|
||||
@ -97,7 +91,6 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
|
||||
xorptr(obj, mdo_addr);
|
||||
testptr(obj, TypeEntries::type_klass_mask);
|
||||
jccb(Assembler::zero, next);
|
||||
#endif
|
||||
|
||||
// different than before. Cannot keep accurate profile.
|
||||
orptr(mdo_addr, TypeEntries::type_unknown);
|
||||
@ -314,7 +307,6 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
||||
// really make a difference for these runtime calls, since they are
|
||||
// slow anyway. Btw., bcp must be saved/restored since it may change
|
||||
// due to GC.
|
||||
NOT_LP64(assert(java_thread == noreg , "not expecting a precomputed java thread");)
|
||||
save_bcp();
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -335,7 +327,6 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
|
||||
restore_locals();
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||
address entry_point,
|
||||
Register arg_1) {
|
||||
@ -387,13 +378,6 @@ void InterpreterMacroAssembler::restore_after_resume(bool is_native) {
|
||||
push(ltos);
|
||||
}
|
||||
}
|
||||
#else
|
||||
void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result,
|
||||
address entry_point,
|
||||
Register arg_1) {
|
||||
MacroAssembler::call_VM(oop_result, entry_point, arg_1);
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
|
||||
if (JvmtiExport::can_pop_frame()) {
|
||||
@ -404,8 +388,7 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
|
||||
// don't want to reenter.
|
||||
// This method is only called just after the call into the vm in
|
||||
// call_VM_base, so the arg registers are available.
|
||||
Register pop_cond = NOT_LP64(java_thread) // Not clear if any other register is available on 32 bit
|
||||
LP64_ONLY(c_rarg0);
|
||||
Register pop_cond = c_rarg0;
|
||||
movl(pop_cond, Address(java_thread, JavaThread::popframe_condition_offset()));
|
||||
testl(pop_cond, JavaThread::popframe_pending_bit);
|
||||
jcc(Assembler::zero, L);
|
||||
@ -416,18 +399,15 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
|
||||
jmp(rax);
|
||||
bind(L);
|
||||
NOT_LP64(get_thread(java_thread);)
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
|
||||
NOT_LP64(get_thread(thread);)
|
||||
movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
|
||||
movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
|
||||
const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
|
||||
const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
|
||||
const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
|
||||
#ifdef _LP64
|
||||
|
||||
switch (state) {
|
||||
case atos: movptr(rax, oop_addr);
|
||||
movptr(oop_addr, NULL_WORD);
|
||||
@ -443,41 +423,18 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
|
||||
case vtos: /* nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
// Clean up tos value in the thread object
|
||||
movl(tos_addr, ilgl);
|
||||
movl(val_addr, NULL_WORD);
|
||||
#else
|
||||
const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
|
||||
+ in_ByteSize(wordSize));
|
||||
switch (state) {
|
||||
case atos: movptr(rax, oop_addr);
|
||||
movptr(oop_addr, NULL_WORD);
|
||||
interp_verify_oop(rax, state); break;
|
||||
case ltos:
|
||||
movl(rdx, val_addr1); // fall through
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: movl(rax, val_addr); break;
|
||||
case ftos: load_float(val_addr); break;
|
||||
case dtos: load_double(val_addr); break;
|
||||
case vtos: /* nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// Clean up tos value in the thread object
|
||||
movl(tos_addr, ilgl);
|
||||
movptr(val_addr, NULL_WORD);
|
||||
NOT_LP64(movptr(val_addr1, NULL_WORD);)
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
Label L;
|
||||
Register tmp = LP64_ONLY(c_rarg0) NOT_LP64(java_thread);
|
||||
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(java_thread);
|
||||
Register tmp = c_rarg0;
|
||||
Register rthread = r15_thread;
|
||||
|
||||
movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
|
||||
testptr(tmp, tmp);
|
||||
@ -492,18 +449,11 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
|
||||
|
||||
// Call Interpreter::remove_activation_early_entry() to get the address of the
|
||||
// same-named entrypoint in the generated interpreter code.
|
||||
NOT_LP64(get_thread(java_thread);)
|
||||
movptr(tmp, Address(rthread, JavaThread::jvmti_thread_state_offset()));
|
||||
#ifdef _LP64
|
||||
movl(tmp, Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), tmp);
|
||||
#else
|
||||
pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);
|
||||
#endif // _LP64
|
||||
jmp(rax);
|
||||
bind(L);
|
||||
NOT_LP64(get_thread(java_thread);)
|
||||
}
|
||||
}
|
||||
|
||||
@ -582,23 +532,6 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
}
|
||||
|
||||
|
||||
#ifndef _LP64
|
||||
void InterpreterMacroAssembler::f2ieee() {
|
||||
if (IEEEPrecision) {
|
||||
fstp_s(Address(rsp, 0));
|
||||
fld_s(Address(rsp, 0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::d2ieee() {
|
||||
if (IEEEPrecision) {
|
||||
fstp_d(Address(rsp, 0));
|
||||
fld_d(Address(rsp, 0));
|
||||
}
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// Java Expression Stack
|
||||
|
||||
void InterpreterMacroAssembler::pop_ptr(Register r) {
|
||||
@ -637,7 +570,6 @@ void InterpreterMacroAssembler::pop_d(XMMRegister r) {
|
||||
addptr(rsp, 2 * Interpreter::stackElementSize);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
// XXX can't use pop currently, upper half non clean
|
||||
movl(r, Address(rsp, 0));
|
||||
@ -688,105 +620,6 @@ void InterpreterMacroAssembler::push(TosState state) {
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#else
|
||||
void InterpreterMacroAssembler::pop_i(Register r) {
|
||||
pop(r);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
|
||||
pop(lo);
|
||||
pop(hi);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_f() {
|
||||
fld_s(Address(rsp, 0));
|
||||
addptr(rsp, 1 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::pop_d() {
|
||||
fld_d(Address(rsp, 0));
|
||||
addptr(rsp, 2 * wordSize);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::pop(TosState state) {
|
||||
switch (state) {
|
||||
case atos: pop_ptr(rax); break;
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: pop_i(rax); break;
|
||||
case ltos: pop_l(rax, rdx); break;
|
||||
case ftos:
|
||||
if (UseSSE >= 1) {
|
||||
pop_f(xmm0);
|
||||
} else {
|
||||
pop_f();
|
||||
}
|
||||
break;
|
||||
case dtos:
|
||||
if (UseSSE >= 2) {
|
||||
pop_d(xmm0);
|
||||
} else {
|
||||
pop_d();
|
||||
}
|
||||
break;
|
||||
case vtos: /* nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
interp_verify_oop(rax, state);
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
|
||||
push(hi);
|
||||
push(lo);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_f() {
|
||||
// Do not schedule for no AGI! Never write beyond rsp!
|
||||
subptr(rsp, 1 * wordSize);
|
||||
fstp_s(Address(rsp, 0));
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::push_d() {
|
||||
// Do not schedule for no AGI! Never write beyond rsp!
|
||||
subptr(rsp, 2 * wordSize);
|
||||
fstp_d(Address(rsp, 0));
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::push(TosState state) {
|
||||
interp_verify_oop(rax, state);
|
||||
switch (state) {
|
||||
case atos: push_ptr(rax); break;
|
||||
case btos: // fall through
|
||||
case ztos: // fall through
|
||||
case ctos: // fall through
|
||||
case stos: // fall through
|
||||
case itos: push_i(rax); break;
|
||||
case ltos: push_l(rax, rdx); break;
|
||||
case ftos:
|
||||
if (UseSSE >= 1) {
|
||||
push_f(xmm0);
|
||||
} else {
|
||||
push_f();
|
||||
}
|
||||
break;
|
||||
case dtos:
|
||||
if (UseSSE >= 2) {
|
||||
push_d(xmm0);
|
||||
} else {
|
||||
push_d();
|
||||
}
|
||||
break;
|
||||
case vtos: /* nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
// Helpers for swap and dup
|
||||
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
|
||||
@ -821,9 +654,7 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
// interp_only is an int, on little endian it is sufficient to test the byte only
|
||||
// Is a cmpl faster?
|
||||
LP64_ONLY(temp = r15_thread;)
|
||||
NOT_LP64(get_thread(temp);)
|
||||
cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
|
||||
cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);
|
||||
jccb(Assembler::zero, run_compiled_code);
|
||||
jmp(Address(method, Method::interpreter_entry_offset()));
|
||||
bind(run_compiled_code);
|
||||
@ -846,7 +677,6 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
address* table,
|
||||
bool verifyoop,
|
||||
bool generate_poll) {
|
||||
verify_FPU(1, state);
|
||||
if (VerifyActivationFrameSize) {
|
||||
Label L;
|
||||
mov(rcx, rbp);
|
||||
@ -864,7 +694,6 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
}
|
||||
|
||||
address* const safepoint_table = Interpreter::safept_table(state);
|
||||
#ifdef _LP64
|
||||
Label no_safepoint, dispatch;
|
||||
if (table != safepoint_table && generate_poll) {
|
||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||
@ -879,27 +708,6 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
|
||||
lea(rscratch1, ExternalAddress((address)table));
|
||||
bind(dispatch);
|
||||
jmp(Address(rscratch1, rbx, Address::times_8));
|
||||
|
||||
#else
|
||||
Address index(noreg, rbx, Address::times_ptr);
|
||||
if (table != safepoint_table && generate_poll) {
|
||||
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
|
||||
Label no_safepoint;
|
||||
const Register thread = rcx;
|
||||
get_thread(thread);
|
||||
testb(Address(thread, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit());
|
||||
|
||||
jccb(Assembler::zero, no_safepoint);
|
||||
ArrayAddress dispatch_addr(ExternalAddress((address)safepoint_table), index);
|
||||
jump(dispatch_addr, noreg);
|
||||
bind(no_safepoint);
|
||||
}
|
||||
|
||||
{
|
||||
ArrayAddress dispatch_addr(ExternalAddress((address)table), index);
|
||||
jump(dispatch_addr, noreg);
|
||||
}
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
|
||||
@ -951,24 +759,19 @@ void InterpreterMacroAssembler::narrow(Register result) {
|
||||
bind(notBool);
|
||||
cmpl(rcx, T_BYTE);
|
||||
jcc(Assembler::notEqual, notByte);
|
||||
LP64_ONLY(movsbl(result, result);)
|
||||
NOT_LP64(shll(result, 24);) // truncate upper 24 bits
|
||||
NOT_LP64(sarl(result, 24);) // and sign-extend byte
|
||||
movsbl(result, result);
|
||||
jmp(done);
|
||||
|
||||
bind(notByte);
|
||||
cmpl(rcx, T_CHAR);
|
||||
jcc(Assembler::notEqual, notChar);
|
||||
LP64_ONLY(movzwl(result, result);)
|
||||
NOT_LP64(andl(result, 0xFFFF);) // truncate upper 16 bits
|
||||
movzwl(result, result);
|
||||
jmp(done);
|
||||
|
||||
bind(notChar);
|
||||
// cmpl(rcx, T_SHORT); // all that's left
|
||||
// jcc(Assembler::notEqual, done);
|
||||
LP64_ONLY(movswl(result, result);)
|
||||
NOT_LP64(shll(result, 16);) // truncate upper 16 bits
|
||||
NOT_LP64(sarl(result, 16);) // and sign-extend short
|
||||
movswl(result, result);
|
||||
|
||||
// Nothing to do for T_INT
|
||||
bind(done);
|
||||
@ -998,12 +801,9 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
// result check if synchronized method
|
||||
Label unlocked, unlock, no_unlock;
|
||||
|
||||
const Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
|
||||
const Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
|
||||
const Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
|
||||
// monitor pointers need different register
|
||||
// because rdx may have the result in it
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
const Register rthread = r15_thread;
|
||||
const Register robj = c_rarg1;
|
||||
const Register rmon = c_rarg1;
|
||||
|
||||
// The below poll is for the stack watermark barrier. It allows fixing up frames lazily,
|
||||
// that would normally not be safe to use. Such bad returns into unsafe territory of
|
||||
@ -1016,7 +816,6 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
push(state);
|
||||
set_last_Java_frame(rthread, noreg, rbp, (address)pc(), rscratch1);
|
||||
super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::at_unwind), rthread);
|
||||
NOT_LP64(get_thread(rthread);) // call_VM clobbered it, restore
|
||||
reset_last_Java_frame(rthread, true);
|
||||
pop(state);
|
||||
bind(fast_path);
|
||||
@ -1057,7 +856,6 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
pop(state);
|
||||
if (throw_monitor_exception) {
|
||||
// Entry already unlocked, need to throw exception
|
||||
NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::throw_illegal_monitor_state_exception));
|
||||
should_not_reach_here();
|
||||
@ -1066,7 +864,6 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
// install an illegal_monitor_state_exception. Continue with
|
||||
// stack unrolling.
|
||||
if (install_monitor_exception) {
|
||||
NOT_LP64(empty_FPU_stack();)
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::new_illegal_monitor_state_exception));
|
||||
}
|
||||
@ -1108,7 +905,6 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
|
||||
if (throw_monitor_exception) {
|
||||
// Throw exception
|
||||
NOT_LP64(empty_FPU_stack();)
|
||||
MacroAssembler::call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::
|
||||
throw_illegal_monitor_state_exception));
|
||||
@ -1124,7 +920,6 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
pop(state);
|
||||
|
||||
if (install_monitor_exception) {
|
||||
NOT_LP64(empty_FPU_stack();)
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::
|
||||
new_illegal_monitor_state_exception));
|
||||
@ -1159,11 +954,9 @@ void InterpreterMacroAssembler::remove_activation(
|
||||
Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
if (StackReservedPages > 0) {
|
||||
// testing if reserved zone needs to be re-enabled
|
||||
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
|
||||
Register rthread = r15_thread;
|
||||
Label no_reserved_zone_enabling;
|
||||
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
|
||||
// check if already enabled - if so no re-enabling needed
|
||||
assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
|
||||
cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
|
||||
@ -1209,8 +1002,7 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
// Kills:
|
||||
// rax, rbx
|
||||
void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
|
||||
"The argument is only for looks. It must be c_rarg1");
|
||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_preemptable(noreg,
|
||||
@ -1221,7 +1013,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
|
||||
const Register swap_reg = rax; // Must use rax for cmpxchg instruction
|
||||
const Register tmp_reg = rbx;
|
||||
const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
const Register rklass_decode_tmp = rscratch1;
|
||||
|
||||
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
|
||||
@ -1239,13 +1031,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
#ifdef _LP64
|
||||
const Register thread = r15_thread;
|
||||
lightweight_lock(lock_reg, obj_reg, swap_reg, thread, tmp_reg, slow_case);
|
||||
#else
|
||||
// Lacking registers and thread on x86_32. Always take slow path.
|
||||
jmp(slow_case);
|
||||
#endif
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Load immediate 1 into swap_reg %rax
|
||||
movl(swap_reg, 1);
|
||||
@ -1263,7 +1050,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
jcc(Assembler::zero, count_locking);
|
||||
|
||||
const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
|
||||
const int zero_bits = 7;
|
||||
|
||||
// Fast check for recursive lock.
|
||||
//
|
||||
@ -1328,8 +1115,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
|
||||
// rscratch1 (scratch reg)
|
||||
// rax, rbx, rcx, rdx
|
||||
void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
assert(lock_reg == LP64_ONLY(c_rarg1) NOT_LP64(rdx),
|
||||
"The argument is only for looks. It must be c_rarg1");
|
||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||
@ -1337,8 +1123,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
Label count_locking, done, slow_case;
|
||||
|
||||
const Register swap_reg = rax; // Must use rax for cmpxchg instruction
|
||||
const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark
|
||||
const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
|
||||
const Register header_reg = c_rarg2; // Will contain the old oopMark
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
|
||||
save_bcp(); // Save in case of exception
|
||||
|
||||
@ -1355,12 +1141,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
|
||||
movptr(Address(lock_reg, BasicObjectLock::obj_offset()), NULL_WORD);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
#ifdef _LP64
|
||||
lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
|
||||
#else
|
||||
// Lacking registers and thread on x86_32. Always take slow path.
|
||||
jmp(slow_case);
|
||||
#endif
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Load the old header from BasicLock structure
|
||||
movptr(header_reg, Address(swap_reg,
|
||||
@ -1436,8 +1217,8 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
Label verify_continue;
|
||||
push(rax);
|
||||
push(rbx);
|
||||
Register arg3_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
|
||||
Register arg2_reg = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
|
||||
Register arg3_reg = c_rarg3;
|
||||
Register arg2_reg = c_rarg2;
|
||||
push(arg3_reg);
|
||||
push(arg2_reg);
|
||||
test_method_data_pointer(arg3_reg, verify_continue); // If mdp is zero, continue
|
||||
@ -1895,8 +1676,6 @@ void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass,
|
||||
|
||||
// Record the object type.
|
||||
record_klass_in_profile(klass, mdp, reg2, false);
|
||||
NOT_LP64(assert(reg2 == rdi, "we know how to fix this blown reg");)
|
||||
NOT_LP64(restore_locals();) // Restore EDI
|
||||
}
|
||||
update_mdp_by_constant(mdp, mdp_delta);
|
||||
|
||||
@ -1964,14 +1743,6 @@ void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state,
|
||||
}
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
|
||||
#ifndef _LP64
|
||||
if ((state == ftos && UseSSE < 1) ||
|
||||
(state == dtos && UseSSE < 2)) {
|
||||
MacroAssembler::verify_FPU(stack_depth);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// Jump if ((*counter_addr += increment) & mask) == 0
|
||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, Address mask,
|
||||
@ -1992,11 +1763,10 @@ void InterpreterMacroAssembler::notify_method_entry() {
|
||||
// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
|
||||
// track stack depth. If it is possible to enter interp_only_mode we add
|
||||
// the code to check if the event should be sent.
|
||||
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
|
||||
Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
|
||||
Register rthread = r15_thread;
|
||||
Register rarg = c_rarg1;
|
||||
if (JvmtiExport::can_post_interpreter_events()) {
|
||||
Label L;
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
|
||||
testl(rdx, rdx);
|
||||
jcc(Assembler::zero, L);
|
||||
@ -2006,7 +1776,6 @@ void InterpreterMacroAssembler::notify_method_entry() {
|
||||
}
|
||||
|
||||
if (DTraceMethodProbes) {
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
get_method(rarg);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
|
||||
rthread, rarg);
|
||||
@ -2014,7 +1783,6 @@ void InterpreterMacroAssembler::notify_method_entry() {
|
||||
|
||||
// RedefineClasses() tracing support for obsolete method entry
|
||||
if (log_is_enabled(Trace, redefine, class, obsolete)) {
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
get_method(rarg);
|
||||
call_VM_leaf(
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
|
||||
@ -2028,8 +1796,8 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
|
||||
// track stack depth. If it is possible to enter interp_only_mode we add
|
||||
// the code to check if the event should be sent.
|
||||
Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
|
||||
Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
|
||||
Register rthread = r15_thread;
|
||||
Register rarg = c_rarg1;
|
||||
if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
|
||||
Label L;
|
||||
// Note: frame::interpreter_frame_result has a dependency on how the
|
||||
@ -2039,7 +1807,6 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
|
||||
// template interpreter will leave the result on the top of the stack.
|
||||
push(state);
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
movl(rdx, Address(rthread, JavaThread::interp_only_mode_offset()));
|
||||
testl(rdx, rdx);
|
||||
jcc(Assembler::zero, L);
|
||||
@ -2051,7 +1818,6 @@ void InterpreterMacroAssembler::notify_method_exit(
|
||||
|
||||
if (DTraceMethodProbes) {
|
||||
push(state);
|
||||
NOT_LP64(get_thread(rthread);)
|
||||
get_method(rarg);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
|
||||
rthread, rarg);
|
||||
|
||||
@ -53,8 +53,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
|
||||
public:
|
||||
InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code),
|
||||
_locals_register(LP64_ONLY(r14) NOT_LP64(rdi)),
|
||||
_bcp_register(LP64_ONLY(r13) NOT_LP64(rsi)) {}
|
||||
_locals_register(r14),
|
||||
_bcp_register(r13) {}
|
||||
|
||||
void jump_to_entry(address entry);
|
||||
|
||||
@ -121,9 +121,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
Register cpool, // the constant pool (corrupted on return)
|
||||
Register index); // the constant pool index (corrupted on return)
|
||||
|
||||
NOT_LP64(void f2ieee();) // truncate ftos to 32bits
|
||||
NOT_LP64(void d2ieee();) // truncate dtos to 64bits
|
||||
|
||||
// Expression stack
|
||||
void pop_ptr(Register r = rax);
|
||||
void pop_i(Register r = rax);
|
||||
@ -143,18 +140,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void pop_f(XMMRegister r);
|
||||
void pop_d(XMMRegister r);
|
||||
void push_d(XMMRegister r);
|
||||
#ifdef _LP64
|
||||
void pop_l(Register r = rax);
|
||||
void push_l(Register r = rax);
|
||||
#else
|
||||
void pop_l(Register lo = rax, Register hi = rdx);
|
||||
void pop_f();
|
||||
void pop_d();
|
||||
|
||||
void push_l(Register lo = rax, Register hi = rdx);
|
||||
void push_d();
|
||||
void push_f();
|
||||
#endif // _LP64
|
||||
|
||||
void pop(Register r) { ((MacroAssembler*)this)->pop(r); }
|
||||
void push(Register r) { ((MacroAssembler*)this)->push(r); }
|
||||
@ -168,7 +155,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
lea(rsp, Address(rbp, rcx, Address::times_ptr));
|
||||
// null last_sp until next java call
|
||||
movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
NOT_LP64(empty_FPU_stack());
|
||||
}
|
||||
|
||||
// Helpers for swap and dup
|
||||
@ -273,8 +259,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// only if +VerifyOops && state == atos
|
||||
#define interp_verify_oop(reg, state) _interp_verify_oop(reg, state, __FILE__, __LINE__);
|
||||
void _interp_verify_oop(Register reg, TosState state, const char* file, int line);
|
||||
// only if +VerifyFPU && (state == ftos || state == dtos)
|
||||
void verify_FPU(int stack_depth, TosState state = ftos);
|
||||
|
||||
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
|
||||
|
||||
|
||||
@ -33,7 +33,6 @@
|
||||
class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
private:
|
||||
MacroAssembler* _masm;
|
||||
#ifdef AMD64
|
||||
#ifdef _WIN64
|
||||
unsigned int _num_args;
|
||||
#else
|
||||
@ -41,17 +40,11 @@ class SignatureHandlerGenerator: public NativeSignatureIterator {
|
||||
unsigned int _num_int_args;
|
||||
#endif // _WIN64
|
||||
int _stack_offset;
|
||||
#else
|
||||
void move(int from_offset, int to_offset);
|
||||
void box(int from_offset, int to_offset);
|
||||
#endif // AMD64
|
||||
|
||||
void pass_int();
|
||||
void pass_long();
|
||||
void pass_float();
|
||||
#ifdef AMD64
|
||||
void pass_double();
|
||||
#endif // AMD64
|
||||
void pass_object();
|
||||
|
||||
public:
|
||||
|
||||
@ -63,15 +63,11 @@
|
||||
// if too small.
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
#ifdef AMD64
|
||||
int TemplateInterpreter::InterpreterCodeSize = JVMCI_ONLY(268) NOT_JVMCI(256) * 1024;
|
||||
#else
|
||||
int TemplateInterpreter::InterpreterCodeSize = 224 * 1024;
|
||||
#endif // AMD64
|
||||
|
||||
// Global Register Names
|
||||
static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
|
||||
static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
|
||||
static const Register rbcp = r13;
|
||||
static const Register rlocals = r14;
|
||||
|
||||
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
|
||||
const int bcp_offset = frame::interpreter_frame_bcp_offset * wordSize;
|
||||
@ -120,12 +116,11 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler() {
|
||||
// Setup parameters.
|
||||
// ??? convention: expect aberrant index in register ebx/rbx.
|
||||
// Pass array to create more detailed exceptions.
|
||||
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::
|
||||
throw_ArrayIndexOutOfBoundsException),
|
||||
rarg, rbx);
|
||||
c_rarg1, rbx);
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -133,8 +128,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
|
||||
address entry = __ pc();
|
||||
|
||||
// object is at TOS
|
||||
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
|
||||
__ pop(rarg);
|
||||
__ pop(c_rarg1);
|
||||
|
||||
// expression stack must be empty before entering the VM if an
|
||||
// exception happened
|
||||
@ -144,7 +138,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::
|
||||
throw_ClassCastException),
|
||||
rarg);
|
||||
c_rarg1);
|
||||
return entry;
|
||||
}
|
||||
|
||||
@ -153,28 +147,25 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
assert(!pass_oop || message == nullptr, "either oop or message but not both");
|
||||
address entry = __ pc();
|
||||
|
||||
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
|
||||
Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
|
||||
|
||||
if (pass_oop) {
|
||||
// object is at TOS
|
||||
__ pop(rarg2);
|
||||
__ pop(c_rarg2);
|
||||
}
|
||||
// expression stack must be empty before entering the VM if an
|
||||
// exception happened
|
||||
__ empty_expression_stack();
|
||||
// setup parameters
|
||||
__ lea(rarg, ExternalAddress((address)name));
|
||||
__ lea(c_rarg1, ExternalAddress((address)name));
|
||||
if (pass_oop) {
|
||||
__ call_VM(rax, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::
|
||||
create_klass_exception),
|
||||
rarg, rarg2);
|
||||
c_rarg1, c_rarg2);
|
||||
} else {
|
||||
__ lea(rarg2, ExternalAddress((address)message));
|
||||
__ lea(c_rarg2, ExternalAddress((address)message));
|
||||
__ call_VM(rax,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
|
||||
rarg, rarg2);
|
||||
c_rarg1, c_rarg2);
|
||||
}
|
||||
// throw exception
|
||||
__ jump(RuntimeAddress(Interpreter::throw_exception_entry()));
|
||||
@ -184,30 +175,6 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
|
||||
address entry = __ pc();
|
||||
|
||||
#ifndef _LP64
|
||||
#ifdef COMPILER2
|
||||
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
|
||||
if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
|
||||
for (int i = 1; i < 8; i++) {
|
||||
__ ffree(i);
|
||||
}
|
||||
} else if (UseSSE < 2) {
|
||||
__ empty_FPU_stack();
|
||||
}
|
||||
#endif // COMPILER2
|
||||
if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
|
||||
__ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
|
||||
} else {
|
||||
__ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
|
||||
}
|
||||
|
||||
if (state == ftos) {
|
||||
__ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
|
||||
} else if (state == dtos) {
|
||||
__ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// Restore stack bottom in case i2c adjusted stack
|
||||
__ movptr(rcx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ lea(rsp, Address(rbp, rcx, Address::times_ptr));
|
||||
@ -236,14 +203,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ lea(rsp, Address(rsp, cache, Interpreter::stackElementScale()));
|
||||
}
|
||||
|
||||
const Register java_thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
|
||||
if (JvmtiExport::can_pop_frame()) {
|
||||
NOT_LP64(__ get_thread(java_thread));
|
||||
__ check_and_handle_popframe(java_thread);
|
||||
__ check_and_handle_popframe(r15_thread);
|
||||
}
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
NOT_LP64(__ get_thread(java_thread));
|
||||
__ check_and_handle_earlyret(java_thread);
|
||||
__ check_and_handle_earlyret(r15_thread);
|
||||
}
|
||||
|
||||
__ dispatch_next(state, step);
|
||||
@ -255,20 +219,11 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step, address continuation) {
|
||||
address entry = __ pc();
|
||||
|
||||
#ifndef _LP64
|
||||
if (state == ftos) {
|
||||
__ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
|
||||
} else if (state == dtos) {
|
||||
__ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// null last_sp until next java call
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
const Register thread = r15_thread;
|
||||
#if INCLUDE_JVMCI
|
||||
// Check if we need to take lock at entry of synchronized method. This can
|
||||
// only occur on method entry so emit it only for vtos with step 0.
|
||||
@ -319,46 +274,14 @@ address TemplateInterpreterGenerator::generate_result_handler_for(
|
||||
address entry = __ pc();
|
||||
switch (type) {
|
||||
case T_BOOLEAN: __ c2bool(rax); break;
|
||||
#ifndef _LP64
|
||||
case T_CHAR : __ andptr(rax, 0xFFFF); break;
|
||||
#else
|
||||
case T_CHAR : __ movzwl(rax, rax); break;
|
||||
#endif // _LP64
|
||||
case T_BYTE : __ sign_extend_byte(rax); break;
|
||||
case T_SHORT : __ sign_extend_short(rax); break;
|
||||
case T_INT : /* nothing to do */ break;
|
||||
case T_LONG : /* nothing to do */ break;
|
||||
case T_VOID : /* nothing to do */ break;
|
||||
#ifndef _LP64
|
||||
case T_DOUBLE :
|
||||
case T_FLOAT :
|
||||
{ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
|
||||
__ pop(t); // remove return address first
|
||||
// Must return a result for interpreter or compiler. In SSE
|
||||
// mode, results are returned in xmm0 and the FPU stack must
|
||||
// be empty.
|
||||
if (type == T_FLOAT && UseSSE >= 1) {
|
||||
// Load ST0
|
||||
__ fld_d(Address(rsp, 0));
|
||||
// Store as float and empty fpu stack
|
||||
__ fstp_s(Address(rsp, 0));
|
||||
// and reload
|
||||
__ movflt(xmm0, Address(rsp, 0));
|
||||
} else if (type == T_DOUBLE && UseSSE >= 2 ) {
|
||||
__ movdbl(xmm0, Address(rsp, 0));
|
||||
} else {
|
||||
// restore ST0
|
||||
__ fld_d(Address(rsp, 0));
|
||||
}
|
||||
// and pop the temp
|
||||
__ addptr(rsp, 2 * wordSize);
|
||||
__ push(t); // restore return address
|
||||
}
|
||||
break;
|
||||
#else
|
||||
case T_FLOAT : /* nothing to do */ break;
|
||||
case T_DOUBLE : /* nothing to do */ break;
|
||||
#endif // _LP64
|
||||
|
||||
case T_OBJECT :
|
||||
// retrieve result from frame
|
||||
@ -467,12 +390,11 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue)
|
||||
// of the verified entry point for the method or null if the
|
||||
// compilation did not complete (either went background or bailed
|
||||
// out).
|
||||
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
|
||||
__ movl(rarg, 0);
|
||||
__ movl(c_rarg1, 0);
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::frequency_counter_overflow),
|
||||
rarg);
|
||||
c_rarg1);
|
||||
|
||||
__ movptr(rbx, Address(rbp, method_offset)); // restore Method*
|
||||
// Preserve invariant that r13/r14 contain bcp/locals of sender frame
|
||||
@ -523,13 +445,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
// the stack before the red zone
|
||||
|
||||
Label after_frame_check_pop;
|
||||
const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
|
||||
#ifndef _LP64
|
||||
__ push(thread);
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
|
||||
const Address stack_limit(thread, JavaThread::stack_overflow_limit_offset());
|
||||
const Address stack_limit(r15_thread, JavaThread::stack_overflow_limit_offset());
|
||||
|
||||
// locals + overhead, in bytes
|
||||
__ mov(rax, rdx);
|
||||
@ -552,7 +469,6 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
__ cmpptr(rsp, rax);
|
||||
|
||||
__ jcc(Assembler::above, after_frame_check_pop);
|
||||
NOT_LP64(__ pop(rsi)); // get saved bcp
|
||||
|
||||
// Restore sender's sp as SP. This is necessary if the sender's
|
||||
// frame is an extended compiled frame (see gen_c2i_adapter())
|
||||
@ -568,7 +484,6 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
|
||||
__ jump(RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry()));
|
||||
// all done with frame size check
|
||||
__ bind(after_frame_check_pop);
|
||||
NOT_LP64(__ pop(rsi));
|
||||
|
||||
// all done with frame size check
|
||||
__ bind(after_frame_check);
|
||||
@ -631,9 +546,8 @@ void TemplateInterpreterGenerator::lock_method() {
|
||||
__ subptr(monitor_block_top, entry_size / wordSize); // set new monitor block top
|
||||
// store object
|
||||
__ movptr(Address(rsp, BasicObjectLock::obj_offset()), rax);
|
||||
const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
|
||||
__ movptr(lockreg, rsp); // object address
|
||||
__ lock_object(lockreg);
|
||||
__ movptr(c_rarg1, rsp); // object address
|
||||
__ lock_object(c_rarg1);
|
||||
}
|
||||
|
||||
// Generate a fixed interpreter frame. This is identical setup for
|
||||
@ -728,19 +642,13 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
// rdx: scratch
|
||||
// rdi: scratch
|
||||
|
||||
// Preserve the sender sp in case the load barrier
|
||||
// calls the runtime
|
||||
NOT_LP64(__ push(rsi));
|
||||
|
||||
// Load the value of the referent field.
|
||||
const Address field_address(rax, referent_offset);
|
||||
__ load_heap_oop(rax, field_address, /*tmp1*/ rbx, /*tmp_thread*/ rdx, ON_WEAK_OOP_REF);
|
||||
|
||||
// _areturn
|
||||
const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
|
||||
NOT_LP64(__ pop(rsi)); // get sender sp
|
||||
__ pop(rdi); // get return address
|
||||
__ mov(rsp, sender_sp); // set sp to sender sp
|
||||
__ mov(rsp, r13); // set sp to sender sp
|
||||
__ jmp(rdi);
|
||||
__ ret(0);
|
||||
|
||||
@ -764,11 +672,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
const int page_size = (int)os::vm_page_size();
|
||||
const int n_shadow_pages = shadow_zone_size / page_size;
|
||||
|
||||
const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
|
||||
#ifndef _LP64
|
||||
__ push(thread);
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
const Register thread = r15_thread;
|
||||
|
||||
#ifdef ASSERT
|
||||
Label L_good_limit;
|
||||
@ -800,10 +704,6 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
|
||||
__ movptr(Address(thread, JavaThread::shadow_zone_growth_watermark()), rsp);
|
||||
|
||||
__ bind(L_done);
|
||||
|
||||
#ifndef _LP64
|
||||
__ pop(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Interpreter stub for calling a native method. (asm interpreter)
|
||||
@ -877,9 +777,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// _do_not_unlock_if_synchronized to true. The remove_activation will
|
||||
// check this flag.
|
||||
|
||||
const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread1));
|
||||
const Address do_not_unlock_if_synchronized(thread1,
|
||||
const Address do_not_unlock_if_synchronized(r15_thread,
|
||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||
__ movbool(do_not_unlock_if_synchronized, true);
|
||||
|
||||
@ -895,7 +793,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
bang_stack_shadow_pages(true);
|
||||
|
||||
// reset the _do_not_unlock_if_synchronized flag
|
||||
NOT_LP64(__ get_thread(thread1));
|
||||
__ movbool(do_not_unlock_if_synchronized, false);
|
||||
|
||||
// check for synchronized methods
|
||||
@ -937,26 +834,19 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
// work registers
|
||||
const Register method = rbx;
|
||||
const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
|
||||
const Register t = NOT_LP64(rcx) LP64_ONLY(r11);
|
||||
const Register thread = r15_thread;
|
||||
const Register t = r11;
|
||||
|
||||
// allocate space for parameters
|
||||
__ get_method(method);
|
||||
__ movptr(t, Address(method, Method::const_offset()));
|
||||
__ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
|
||||
|
||||
#ifndef _LP64
|
||||
__ shlptr(t, Interpreter::logStackElementSize); // Convert parameter count to bytes.
|
||||
__ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
|
||||
__ subptr(rsp, t);
|
||||
__ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
|
||||
#else
|
||||
__ shll(t, Interpreter::logStackElementSize);
|
||||
|
||||
__ subptr(rsp, t);
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
__ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
|
||||
#endif // _LP64
|
||||
|
||||
// get signature handler
|
||||
{
|
||||
@ -978,7 +868,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
"adjust this code");
|
||||
assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
|
||||
"adjust this code");
|
||||
assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
|
||||
assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
|
||||
"adjust this code");
|
||||
|
||||
// The generated handlers do not touch RBX (the method).
|
||||
@ -1007,13 +897,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
|
||||
t);
|
||||
// pass handle to mirror
|
||||
#ifndef _LP64
|
||||
__ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
|
||||
__ movptr(Address(rsp, wordSize), t);
|
||||
#else
|
||||
__ lea(c_rarg1,
|
||||
Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
|
||||
#endif // _LP64
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
@ -1034,16 +919,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
}
|
||||
|
||||
// pass JNIEnv
|
||||
#ifndef _LP64
|
||||
__ get_thread(thread);
|
||||
__ lea(t, Address(thread, JavaThread::jni_environment_offset()));
|
||||
__ movptr(Address(rsp, 0), t);
|
||||
|
||||
// set_last_Java_frame_before_call
|
||||
// It is enough that the pc()
|
||||
// points into the right code segment. It does not have to be the correct return pc.
|
||||
__ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg);
|
||||
#else
|
||||
__ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
|
||||
|
||||
// It is enough that the pc() points into the right code
|
||||
@ -1052,7 +927,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// case of preemption on Object.wait.
|
||||
Label native_return;
|
||||
__ set_last_Java_frame(rsp, rbp, native_return, rscratch1);
|
||||
#endif // _LP64
|
||||
|
||||
// change thread state
|
||||
#ifdef ASSERT
|
||||
@ -1088,39 +962,10 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// pushes change or anything else is added to the stack then the code in
|
||||
// interpreter_frame_result must also change.
|
||||
|
||||
#ifndef _LP64
|
||||
// save potential result in ST(0) & rdx:rax
|
||||
// (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
|
||||
// the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
|
||||
// It is safe to do this push because state is _thread_in_native and return address will be found
|
||||
// via _last_native_pc and not via _last_jave_sp
|
||||
|
||||
// NOTE: the order of these push(es) is known to frame::interpreter_frame_result.
|
||||
// If the order changes or anything else is added to the stack the code in
|
||||
// interpreter_frame_result will have to be changed.
|
||||
|
||||
{ Label L;
|
||||
Label push_double;
|
||||
ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
|
||||
ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
|
||||
__ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize),
|
||||
float_handler.addr(), noreg);
|
||||
__ jcc(Assembler::equal, push_double);
|
||||
__ cmpptr(Address(rbp, (frame::interpreter_frame_result_handler_offset)*wordSize),
|
||||
double_handler.addr(), noreg);
|
||||
__ jcc(Assembler::notEqual, L);
|
||||
__ bind(push_double);
|
||||
__ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
|
||||
__ bind(L);
|
||||
}
|
||||
#else
|
||||
__ push(dtos);
|
||||
#endif // _LP64
|
||||
|
||||
__ push(ltos);
|
||||
|
||||
// change thread state
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()),
|
||||
_thread_in_native_trans);
|
||||
|
||||
@ -1130,12 +975,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
Assembler::LoadLoad | Assembler::LoadStore |
|
||||
Assembler::StoreLoad | Assembler::StoreStore));
|
||||
}
|
||||
#ifndef _LP64
|
||||
if (AlwaysRestoreFPU) {
|
||||
// Make sure the control word is correct.
|
||||
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{
|
||||
@ -1155,13 +994,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// preserved and correspond to the bcp/locals pointers. So we do a
|
||||
// runtime call by hand.
|
||||
//
|
||||
#ifndef _LP64
|
||||
__ push(thread);
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
|
||||
JavaThread::check_special_condition_for_native_trans)));
|
||||
__ increment(rsp, wordSize);
|
||||
__ get_thread(thread);
|
||||
#else
|
||||
__ mov(c_rarg0, r15_thread);
|
||||
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
@ -1169,14 +1001,12 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
|
||||
__ mov(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
#endif // _LP64
|
||||
__ bind(Continue);
|
||||
}
|
||||
|
||||
// change thread state
|
||||
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
|
||||
|
||||
#ifdef _LP64
|
||||
if (LockingMode != LM_LEGACY) {
|
||||
// Check preemption for Object.wait()
|
||||
Label not_preempted;
|
||||
@ -1192,7 +1022,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// any pc will do so just use this one for LM_LEGACY to keep code together.
|
||||
__ bind(native_return);
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// reset_last_Java_frame
|
||||
__ reset_last_Java_frame(thread, true);
|
||||
@ -1234,10 +1063,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ jcc(Assembler::notEqual, no_reguard);
|
||||
|
||||
__ pusha(); // XXX only save smashed registers
|
||||
#ifndef _LP64
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
|
||||
__ popa();
|
||||
#else
|
||||
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
|
||||
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
|
||||
__ andptr(rsp, -16); // align stack as required by ABI
|
||||
@ -1245,7 +1070,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ mov(rsp, r12); // restore sp
|
||||
__ popa(); // XXX only restore smashed registers
|
||||
__ reinit_heapbase();
|
||||
#endif // _LP64
|
||||
|
||||
__ bind(no_reguard);
|
||||
}
|
||||
@ -1293,7 +1117,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
(intptr_t)(frame::interpreter_frame_initial_sp_offset *
|
||||
wordSize - (int)sizeof(BasicObjectLock)));
|
||||
|
||||
const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
|
||||
const Register regmon = c_rarg1;
|
||||
|
||||
// monitor expect in c_rarg1 for slow unlock path
|
||||
__ lea(regmon, monitor); // address of first monitor
|
||||
@ -1325,7 +1149,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// restore potential result in ST0 & handle result
|
||||
|
||||
__ pop(ltos);
|
||||
LP64_ONLY( __ pop(dtos));
|
||||
__ pop(dtos);
|
||||
|
||||
__ movptr(t, Address(rbp,
|
||||
(frame::interpreter_frame_result_handler_offset) * wordSize));
|
||||
@ -1454,9 +1278,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
// _do_not_unlock_if_synchronized to true. The remove_activation
|
||||
// will check this flag.
|
||||
|
||||
const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
const Address do_not_unlock_if_synchronized(thread,
|
||||
const Address do_not_unlock_if_synchronized(r15_thread,
|
||||
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
|
||||
__ movbool(do_not_unlock_if_synchronized, true);
|
||||
|
||||
@ -1474,7 +1296,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
bang_stack_shadow_pages(false);
|
||||
|
||||
// reset the _do_not_unlock_if_synchronized flag
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movbool(do_not_unlock_if_synchronized, false);
|
||||
|
||||
// check for synchronized methods
|
||||
@ -1541,15 +1362,14 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// rdx: return address/pc that threw exception
|
||||
__ restore_bcp(); // r13/rsi points to call/send
|
||||
__ restore_locals();
|
||||
LP64_ONLY(__ reinit_heapbase()); // restore r12 as heapbase.
|
||||
__ reinit_heapbase(); // restore r12 as heapbase.
|
||||
// Entry point for exceptions thrown within interpreter code
|
||||
Interpreter::_throw_exception_entry = __ pc();
|
||||
// expression stack is undefined here
|
||||
// rax: exception
|
||||
// r13/rsi: exception bcp
|
||||
__ verify_oop(rax);
|
||||
Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
|
||||
LP64_ONLY(__ mov(c_rarg1, rax));
|
||||
__ mov(c_rarg1, rax);
|
||||
|
||||
// expression stack must be empty before entering the VM in case of
|
||||
// an exception
|
||||
@ -1558,7 +1378,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ call_VM(rdx,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::exception_handler_for_exception),
|
||||
rarg);
|
||||
c_rarg1);
|
||||
// rax: exception handler entry point
|
||||
// rdx: preserved exception oop
|
||||
// r13/rsi: bcp for exception handler
|
||||
@ -1588,8 +1408,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// indicating that we are currently handling popframe, so that
|
||||
// call_VMs that may happen later do not trigger new popframe
|
||||
// handling cycles.
|
||||
const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
const Register thread = r15_thread;
|
||||
__ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
|
||||
__ orl(rdx, JavaThread::popframe_processing_bit);
|
||||
__ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
|
||||
@ -1606,10 +1425,9 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// deoptimization blob's unpack entry because of the presence of
|
||||
// adapter frames in C2.
|
||||
Label caller_not_deoptimized;
|
||||
Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
|
||||
__ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
|
||||
__ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::interpreter_contains), rarg);
|
||||
InterpreterRuntime::interpreter_contains), c_rarg1);
|
||||
__ testl(rax, rax);
|
||||
__ jcc(Assembler::notZero, caller_not_deoptimized);
|
||||
|
||||
@ -1624,7 +1442,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ subptr(rlocals, rax);
|
||||
__ addptr(rlocals, wordSize);
|
||||
// Save these arguments
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
|
||||
Deoptimization::
|
||||
popframe_preserve_args),
|
||||
@ -1637,7 +1454,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
|
||||
// Inform deoptimization that it is responsible for restoring
|
||||
// these arguments
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movl(Address(thread, JavaThread::popframe_condition_offset()),
|
||||
JavaThread::popframe_force_deopt_reexecution_bit);
|
||||
|
||||
@ -1663,23 +1479,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
// maintain this kind of invariant all the time we call a small
|
||||
// fixup routine to move the mutated arguments onto the top of our
|
||||
// expression stack if necessary.
|
||||
#ifndef _LP64
|
||||
__ mov(rax, rsp);
|
||||
__ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ lea(rbx, Address(rbp, rbx, Address::times_ptr));
|
||||
__ get_thread(thread);
|
||||
// PC must point into interpreter here
|
||||
__ set_last_Java_frame(thread, noreg, rbp, __ pc(), noreg);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
|
||||
__ get_thread(thread);
|
||||
#else
|
||||
__ mov(c_rarg1, rsp);
|
||||
__ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
|
||||
__ lea(c_rarg2, Address(rbp, c_rarg2, Address::times_ptr));
|
||||
// PC must point into interpreter here
|
||||
__ set_last_Java_frame(noreg, rbp, __ pc(), rscratch1);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
|
||||
#endif
|
||||
__ reset_last_Java_frame(thread, true);
|
||||
|
||||
// Restore the last_sp and null it out
|
||||
@ -1696,7 +1501,6 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
}
|
||||
|
||||
// Clear the popframe condition flag
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movl(Address(thread, JavaThread::popframe_condition_offset()),
|
||||
JavaThread::popframe_inactive);
|
||||
|
||||
@ -1730,12 +1534,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
|
||||
// preserve exception over this code sequence
|
||||
__ pop_ptr(rax);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
|
||||
// remove the activation (without doing throws on illegalMonitorExceptions)
|
||||
__ remove_activation(vtos, rdx, false, true, false);
|
||||
// restore exception
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ get_vm_result(rax, thread);
|
||||
|
||||
// In between activations - previous activation type unknown yet
|
||||
@ -1771,9 +1573,7 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
|
||||
__ empty_expression_stack();
|
||||
__ load_earlyret_value(state); // 32 bits returns value in rdx, so don't reuse
|
||||
|
||||
const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
|
||||
NOT_LP64(__ get_thread(thread));
|
||||
__ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
|
||||
__ movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
|
||||
Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
|
||||
|
||||
// Clear the earlyret state
|
||||
@ -1804,21 +1604,12 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
|
||||
address& vep) {
|
||||
assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
|
||||
Label L;
|
||||
#ifndef _LP64
|
||||
fep = __ pc(); // ftos entry point
|
||||
__ push(ftos);
|
||||
__ jmpb(L);
|
||||
dep = __ pc(); // dtos entry point
|
||||
__ push(dtos);
|
||||
__ jmpb(L);
|
||||
#else
|
||||
fep = __ pc(); // ftos entry point
|
||||
__ push_f(xmm0);
|
||||
__ jmpb(L);
|
||||
dep = __ pc(); // dtos entry point
|
||||
__ push_d(xmm0);
|
||||
__ jmpb(L);
|
||||
#endif // _LP64
|
||||
lep = __ pc(); // ltos entry point
|
||||
__ push_l();
|
||||
__ jmpb(L);
|
||||
@ -1837,19 +1628,6 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
|
||||
address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
||||
address entry = __ pc();
|
||||
|
||||
#ifndef _LP64
|
||||
// prepare expression stack
|
||||
__ pop(rcx); // pop return address so expression stack is 'pure'
|
||||
__ push(state); // save tosca
|
||||
|
||||
// pass tosca registers as arguments & call tracer
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), rcx, rax, rdx);
|
||||
__ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
|
||||
__ pop(state); // restore tosca
|
||||
|
||||
// return
|
||||
__ jmp(rcx);
|
||||
#else
|
||||
__ push(state);
|
||||
__ push(c_rarg0);
|
||||
__ push(c_rarg1);
|
||||
@ -1868,17 +1646,12 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
||||
__ pop(c_rarg0);
|
||||
__ pop(state);
|
||||
__ ret(0); // return from result handler
|
||||
#endif // _LP64
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::count_bytecode() {
|
||||
#ifndef _LP64
|
||||
__ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1);
|
||||
#else
|
||||
__ incrementq(ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch1);
|
||||
#endif
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
|
||||
@ -1904,28 +1677,18 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
|
||||
|
||||
assert(Interpreter::trace_code(t->tos_in()) != nullptr,
|
||||
"entry must have been generated");
|
||||
#ifndef _LP64
|
||||
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
|
||||
#else
|
||||
__ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
|
||||
__ andptr(rsp, -16); // align stack as required by ABI
|
||||
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
|
||||
__ mov(rsp, r12); // restore sp
|
||||
__ reinit_heapbase();
|
||||
#endif // _LP64
|
||||
}
|
||||
|
||||
|
||||
void TemplateInterpreterGenerator::stop_interpreter_at() {
|
||||
Label L;
|
||||
#ifndef _LP64
|
||||
__ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
|
||||
StopInterpreterAt,
|
||||
rscratch1);
|
||||
#else
|
||||
__ mov64(rscratch1, StopInterpreterAt);
|
||||
__ cmp64(rscratch1, ExternalAddress((address) &BytecodeCounter::_counter_value), rscratch2);
|
||||
#endif
|
||||
__ jcc(Assembler::notEqual, L);
|
||||
__ int3();
|
||||
__ bind(L);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -371,7 +371,6 @@ void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState t
|
||||
if (PrintBytecodePairHistogram) histogram_bytecode_pair(t);
|
||||
if (TraceBytecodes) trace_bytecode(t);
|
||||
if (StopInterpreterAt > 0) stop_interpreter_at();
|
||||
__ verify_FPU(1, t->tos_in());
|
||||
#endif // !PRODUCT
|
||||
int step = 0;
|
||||
if (!t->does_dispatch()) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user