jdk/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
Robbin Ehn 5c612c230b 8332689: RISC-V: Use load instead of trampolines
Reviewed-by: fyang, mli, luhenry
2024-07-11 10:24:00 +00:00

339 lines
11 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "classfile/javaClasses.hpp"
#include "nativeInst_riscv.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_riscv.inline.hpp"
#define __ ce->masm()->
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
InternalAddress safepoint_pc(__ pc() - __ offset() + safepoint_offset());
__ relocate(safepoint_pc.rspec(), [&] {
int32_t offset;
__ la(t0, safepoint_pc.target(), offset);
__ addi(t0, t0, offset);
});
__ sd(t0, Address(xthread, JavaThread::saved_exception_pc_offset()));
assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
"polling page return stub not created yet");
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
__ far_jump(RuntimeAddress(stub));
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
Metadata *m = _method->as_constant_ptr()->as_metadata();
__ mov_metadata(t0, m);
ce->store_parameter(t0, 1);
ce->store_parameter(_bci, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ j(_continuation);
}
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_info->deoptimize_on_exception()) {
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
return;
}
if (_index->is_cpu_register()) {
__ mv(t0, _index->as_register());
} else {
__ mv(t0, _index->as_jint());
}
Runtime1::StubID stub_id;
if (_throw_index_out_of_bounds_exception) {
stub_id = Runtime1::throw_index_exception_id;
} else {
assert(_array != LIR_Opr::nullOpr(), "sanity");
__ mv(t1, _array->as_pointer_register());
stub_id = Runtime1::throw_range_check_failed_id;
}
// t0 and t1 are used as args in generate_exception_throw
// so use ra as the tmp register for rt_call.
__ rt_call(Runtime1::entry_for(stub_id), ra);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
_info = new CodeEmitInfo(info);
}
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
if (_offset != -1) {
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
}
__ bind(_entry);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
#ifdef ASSERT
__ should_not_reach_here();
#endif
}
// Implementation of NewInstanceStub
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
_result = result;
_klass = klass;
_klass_reg = klass_reg;
_info = new CodeEmitInfo(info);
assert(stub_id == Runtime1::new_instance_id ||
stub_id == Runtime1::fast_new_instance_id ||
stub_id == Runtime1::fast_new_instance_init_check_id,
"need new_instance id");
_stub_id = stub_id;
}
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
__ mv(x13, _klass_reg->as_register());
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == x10, "result must in x10");
__ j(_continuation);
}
// Implementation of NewTypeArrayStub
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_length = length;
_result = result;
_info = new CodeEmitInfo(info);
}
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == x9, "length must in x9");
assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == x10, "result must in x10");
__ j(_continuation);
}
// Implementation of NewObjectArrayStub
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
_klass_reg = klass_reg;
_result = result;
_length = length;
_info = new CodeEmitInfo(info);
}
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
assert(_length->as_register() == x9, "length must in x9");
assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
assert(_result->as_register() == x10, "result must in x10");
__ j(_continuation);
}
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
ce->store_parameter(_obj_reg->as_register(), 1);
ce->store_parameter(_lock_reg->as_register(), 0);
Runtime1::StubID enter_id;
if (ce->compilation()->has_fpu_code()) {
enter_id = Runtime1::monitorenter_id;
} else {
enter_id = Runtime1::monitorenter_nofpu_id;
}
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
__ j(_continuation);
}
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
if (_compute_lock) {
// lock_reg was destroyed by fast unlocking attempt => recompute it
ce->monitor_address(_monitor_ix, _lock_reg);
}
ce->store_parameter(_lock_reg->as_register(), 0);
// note: non-blocking leaf routine => no call info needed
Runtime1::StubID exit_id;
if (ce->compilation()->has_fpu_code()) {
exit_id = Runtime1::monitorexit_id;
} else {
exit_id = Runtime1::monitorexit_nofpu_id;
}
__ la(ra, _continuation);
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
// - Replace original code with a call to the stub
// At Runtime:
// - call to stub, jump to runtime
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
// - in runtime: after initializing class, restore original code, reexecute instruction
int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
void PatchingStub::align_patch_site(MacroAssembler* masm) {}
void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(false, "RISCV should not use C1 runtime patching");
}
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
ce->store_parameter(_trap_request, 0);
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
ce->add_call_info_here(_info);
DEBUG_ONLY(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a = nullptr;
if (_info->deoptimize_on_exception()) {
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
}
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
// pass the object in a tmp register because all other registers
// must be preserved
if (_obj->is_cpu_register()) {
__ mv(t0, _obj->as_register());
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), t1);
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
// ---------------slow case: call to native-----------------
__ bind(_entry);
// Figure out where the args should go
// This should really convert the IntrinsicID to the Method* and signature
// but I don't know how to do that.
const int args_num = 5;
VMRegPair args[args_num];
BasicType signature[args_num] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
SharedRuntime::java_calling_convention(signature, args, args_num);
// push parameters
Register r[args_num];
r[0] = src()->as_register();
r[1] = src_pos()->as_register();
r[2] = dst()->as_register();
r[3] = dst_pos()->as_register();
r[4] = length()->as_register();
// next registers will get stored on the stack
for (int j = 0; j < args_num; j++) {
VMReg r_1 = args[j].first();
if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize;
__ sd(r[j], Address(sp, st_off));
} else {
assert(r[j] == args[j].first()->as_Register(), "Wrong register for arg");
}
}
ce->align_call(lir_static_call);
ce->emit_static_call_stub();
if (ce->compilation()->bailed_out()) {
return; // CodeCache is full
}
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
relocInfo::static_call_type);
address call = __ reloc_call(resolve);
if (call == nullptr) {
ce->bailout("trampoline stub overflow");
return;
}
ce->add_call_info_here(info());
#ifndef PRODUCT
if (PrintC1Statistics) {
__ la(t1, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
__ incrementw(Address(t1));
}
#endif
__ j(_continuation);
}
#undef __