8254231: Implementation of Foreign Linker API (Incubator)

Reviewed-by: coleenp, ihse, dholmes, vlivanov
This commit is contained in:
Maurizio Cimadamore 2020-11-23 11:00:38 +00:00
parent 53f38353e0
commit 0fb31dbf3a
212 changed files with 67390 additions and 179 deletions

View File

@ -1770,6 +1770,11 @@ int MachCallRuntimeNode::ret_addr_offset() {
}
}
int MachCallNativeNode::ret_addr_offset() {
ShouldNotReachHere();
return -1;
}
// Indicate if the safepoint node needs the polling page as an input
// the shared code plants the oop data at the start of the generated

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/foreign_globals.hpp"
#include "prims/foreign_globals.inline.hpp"
bool ABIDescriptor::is_volatile_reg(Register reg) const {
return _integer_argument_registers.contains(reg)
|| _integer_additional_volatile_registers.contains(reg);
}
bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
return _vector_argument_registers.contains(reg)
|| _vector_additional_volatile_registers.contains(reg);
}
#define INTEGER_TYPE 0
#define VECTOR_TYPE 1
const ABIDescriptor ForeignGlobals::parse_abi_descriptor_impl(jobject jabi) const {
oop abi_oop = JNIHandles::resolve_non_null(jabi);
ABIDescriptor abi;
objArrayOop inputStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.inputStorage_offset));
loadArray(inputStorage, INTEGER_TYPE, abi._integer_argument_registers, as_Register);
loadArray(inputStorage, VECTOR_TYPE, abi._vector_argument_registers, as_FloatRegister);
objArrayOop outputStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.outputStorage_offset));
loadArray(outputStorage, INTEGER_TYPE, abi._integer_return_registers, as_Register);
loadArray(outputStorage, VECTOR_TYPE, abi._vector_return_registers, as_FloatRegister);
objArrayOop volatileStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.volatileStorage_offset));
loadArray(volatileStorage, INTEGER_TYPE, abi._integer_additional_volatile_registers, as_Register);
loadArray(volatileStorage, VECTOR_TYPE, abi._vector_additional_volatile_registers, as_FloatRegister);
abi._stack_alignment_bytes = abi_oop->int_field(ABI.stackAlignment_offset);
abi._shadow_space_bytes = abi_oop->int_field(ABI.shadowSpace_offset);
return abi;
}
const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) const {
oop layout_oop = JNIHandles::resolve_non_null(jlayout);
BufferLayout layout;
layout.stack_args_bytes = layout_oop->long_field(BL.stack_args_bytes_offset);
layout.stack_args = layout_oop->long_field(BL.stack_args_offset);
layout.arguments_next_pc = layout_oop->long_field(BL.arguments_next_pc_offset);
typeArrayOop input_offsets = cast<typeArrayOop>(layout_oop->obj_field(BL.input_type_offsets_offset));
layout.arguments_integer = (size_t) input_offsets->long_at(INTEGER_TYPE);
layout.arguments_vector = (size_t) input_offsets->long_at(VECTOR_TYPE);
typeArrayOop output_offsets = cast<typeArrayOop>(layout_oop->obj_field(BL.output_type_offsets_offset));
layout.returns_integer = (size_t) output_offsets->long_at(INTEGER_TYPE);
layout.returns_vector = (size_t) output_offsets->long_at(VECTOR_TYPE);
layout.buffer_size = layout_oop->long_field(BL.size_offset);
return layout;
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_AARCH64_VM_FOREIGN_GLOBALS_AARCH64_HPP
#define CPU_AARCH64_VM_FOREIGN_GLOBALS_AARCH64_HPP
#include "asm/macroAssembler.hpp"
#include "utilities/growableArray.hpp"
constexpr size_t float_reg_size = 16; // bytes
struct ABIDescriptor {
GrowableArray<Register> _integer_argument_registers;
GrowableArray<Register> _integer_return_registers;
GrowableArray<FloatRegister> _vector_argument_registers;
GrowableArray<FloatRegister> _vector_return_registers;
GrowableArray<Register> _integer_additional_volatile_registers;
GrowableArray<FloatRegister> _vector_additional_volatile_registers;
int32_t _stack_alignment_bytes;
int32_t _shadow_space_bytes;
bool is_volatile_reg(Register reg) const;
bool is_volatile_reg(FloatRegister reg) const;
};
struct BufferLayout {
size_t stack_args_bytes;
size_t stack_args;
size_t arguments_vector;
size_t arguments_integer;
size_t arguments_next_pc;
size_t returns_vector;
size_t returns_integer;
size_t buffer_size;
};
#endif // CPU_AARCH64_VM_FOREIGN_GLOBALS_AARCH64_HPP

View File

@ -178,6 +178,13 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
return NULL;
}
// No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) {
__ hlt(0);
return NULL;
}
// r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
// rmethod: Method*
// r3: argument locator (parameter slot count, added to rsp)
@ -272,7 +279,10 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
assert_different_registers(temp1, temp2, temp3, receiver_reg);
assert_different_registers(temp1, temp2, temp3, member_reg);
if (iid == vmIntrinsics::_invokeBasic) {
if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
if (iid == vmIntrinsics::_linkToNative) {
assert(for_compiler_entry, "only compiler entry is supported");
}
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form(_masm, receiver_reg, rmethod, temp1, for_compiler_entry);

View File

@ -1195,7 +1195,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
member_reg = r19; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (iid == vmIntrinsics::_invokeBasic) {
} else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
has_receiver = true;
} else {
fatal("unexpected intrinsic id %d", iid);
@ -3069,3 +3069,10 @@ void OptoRuntime::generate_exception_blob() {
_exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
}
#endif // COMPILER2
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
return NULL;
}

View File

@ -0,0 +1,127 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/codeBlob.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalNativeInvoker.hpp"
#define __ _masm->
void ProgrammableInvoker::Generator::generate() {
__ enter();
// Name registers used in the stub code. These are all caller-save so
// may be clobbered by the call to the native function. Avoid using
// rscratch1 here as it's r8 which is the indirect result register in
// the standard ABI.
Register Rctx = r10, Rstack_size = r11;
Register Rwords = r12, Rtmp = r13;
Register Rsrc_ptr = r14, Rdst_ptr = r15;
assert_different_registers(Rctx, Rstack_size, rscratch1, rscratch2);
// TODO: if the callee is not using the standard C ABI then we need to
// preserve more registers here.
__ block_comment("init_and_alloc_stack");
__ mov(Rctx, c_rarg0);
__ str(Rctx, Address(__ pre(sp, -2 * wordSize)));
assert(_abi->_stack_alignment_bytes % 16 == 0, "stack must be 16 byte aligned");
__ block_comment("allocate_stack");
__ ldr(Rstack_size, Address(Rctx, (int) _layout->stack_args_bytes));
__ add(rscratch2, Rstack_size, _abi->_stack_alignment_bytes - 1);
__ andr(rscratch2, rscratch2, -_abi->_stack_alignment_bytes);
__ sub(sp, sp, rscratch2);
__ block_comment("load_arguments");
__ ldr(Rsrc_ptr, Address(Rctx, (int) _layout->stack_args));
__ lsr(Rwords, Rstack_size, LogBytesPerWord);
__ mov(Rdst_ptr, sp);
Label Ldone, Lnext;
__ bind(Lnext);
__ cbz(Rwords, Ldone);
__ ldr(Rtmp, __ post(Rsrc_ptr, wordSize));
__ str(Rtmp, __ post(Rdst_ptr, wordSize));
__ sub(Rwords, Rwords, 1);
__ b(Lnext);
__ bind(Ldone);
for (int i = 0; i < _abi->_vector_argument_registers.length(); i++) {
ssize_t offs = _layout->arguments_vector + i * float_reg_size;
__ ldrq(_abi->_vector_argument_registers.at(i), Address(Rctx, offs));
}
for (int i = 0; i < _abi->_integer_argument_registers.length(); i++) {
ssize_t offs = _layout->arguments_integer + i * sizeof(uintptr_t);
__ ldr(_abi->_integer_argument_registers.at(i), Address(Rctx, offs));
}
assert(_abi->_shadow_space_bytes == 0, "shadow space not supported on AArch64");
// call target function
__ block_comment("call target function");
__ ldr(rscratch2, Address(Rctx, (int) _layout->arguments_next_pc));
__ blr(rscratch2);
__ ldr(Rctx, Address(rfp, -2 * wordSize)); // Might have clobbered Rctx
__ block_comment("store_registers");
for (int i = 0; i < _abi->_integer_return_registers.length(); i++) {
ssize_t offs = _layout->returns_integer + i * sizeof(uintptr_t);
__ str(_abi->_integer_return_registers.at(i), Address(Rctx, offs));
}
for (int i = 0; i < _abi->_vector_return_registers.length(); i++) {
ssize_t offs = _layout->returns_vector + i * float_reg_size;
__ strq(_abi->_vector_return_registers.at(i), Address(Rctx, offs));
}
__ leave();
__ ret(lr);
__ flush();
}
address ProgrammableInvoker::generate_adapter(jobject jabi, jobject jlayout) {
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const BufferLayout layout = ForeignGlobals::parse_buffer_layout(jlayout);
BufferBlob* _invoke_native_blob = BufferBlob::create("invoke_native_blob", native_invoker_size);
CodeBuffer code2(_invoke_native_blob);
ProgrammableInvoker::Generator g2(&code2, &abi, &layout);
g2.generate();
code2.log_section_sizes("InvokeNativeBlob");
return _invoke_native_blob->code_begin();
}

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, Arm Limited. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalUpcallHandler.hpp"
#define __ _masm->
// 1. Create buffer according to layout
// 2. Load registers & stack args into buffer
// 3. Call upcall helper with upcall handler instance & buffer pointer (C++ ABI)
// 4. Load return value from buffer into foreign ABI registers
// 5. Return
address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jabi, jobject jlayout) {
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const BufferLayout layout = ForeignGlobals::parse_buffer_layout(jlayout);
CodeBuffer buffer("upcall_stub", 1024, upcall_stub_size);
MacroAssembler* _masm = new MacroAssembler(&buffer);
// stub code
__ enter();
// save pointer to JNI receiver handle into constant segment
Address rec_adr = InternalAddress(__ address_constant((address)rec));
assert(abi._stack_alignment_bytes % 16 == 0, "stack must be 16 byte aligned");
__ sub(sp, sp, (int) align_up(layout.buffer_size, abi._stack_alignment_bytes));
// TODO: This stub only uses registers which are caller-save in the
// standard C ABI. If this is called from a different ABI then
// we need to save registers here according to abi.is_volatile_reg.
for (int i = 0; i < abi._integer_argument_registers.length(); i++) {
Register reg = abi._integer_argument_registers.at(i);
ssize_t offset = layout.arguments_integer + i * sizeof(uintptr_t);
__ str(reg, Address(sp, offset));
}
for (int i = 0; i < abi._vector_argument_registers.length(); i++) {
FloatRegister reg = abi._vector_argument_registers.at(i);
ssize_t offset = layout.arguments_vector + i * float_reg_size;
__ strq(reg, Address(sp, offset));
}
// Capture prev stack pointer (stack arguments base)
__ add(rscratch1, rfp, 16); // Skip saved FP and LR
__ str(rscratch1, Address(sp, layout.stack_args));
// Call upcall helper
__ ldr(c_rarg0, rec_adr);
__ mov(c_rarg1, sp);
__ movptr(rscratch1, CAST_FROM_FN_PTR(uint64_t, ProgrammableUpcallHandler::attach_thread_and_do_upcall));
__ blr(rscratch1);
for (int i = 0; i < abi._integer_return_registers.length(); i++) {
ssize_t offs = layout.returns_integer + i * sizeof(uintptr_t);
__ ldr(abi._integer_return_registers.at(i), Address(sp, offs));
}
for (int i = 0; i < abi._vector_return_registers.length(); i++) {
FloatRegister reg = abi._vector_return_registers.at(i);
ssize_t offs = layout.returns_vector + i * float_reg_size;
__ ldrq(reg, Address(sp, offs));
}
__ leave();
__ ret(lr);
__ flush();
BufferBlob* blob = BufferBlob::create("upcall_stub", &buffer);
return blob->code_begin();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,7 +26,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
#include "vmreg_aarch64.inline.hpp"
void VMRegImpl::set_regName() {
@ -51,3 +51,16 @@ void VMRegImpl::set_regName() {
regName[i] = "NON-GPR-FPR";
}
}
#define INTEGER_TYPE 0
#define VECTOR_TYPE 1
#define STACK_TYPE 3
VMReg VMRegImpl::vmStorageToVMReg(int type, int index) {
switch(type) {
case INTEGER_TYPE: return ::as_Register(index)->as_VMReg();
case VECTOR_TYPE: return ::as_FloatRegister(index)->as_VMReg();
case STACK_TYPE: return VMRegImpl::stack2reg(index LP64_ONLY(* 2));
}
return VMRegImpl::Bad();
}

View File

@ -8099,13 +8099,6 @@ void Assembler::decl(Register dst) {
// 64bit doesn't use the x87
void Assembler::emit_operand32(Register reg, Address adr) {
assert(reg->encoding() < 8, "no extended registers");
assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
adr._rspec);
}
void Assembler::emit_farith(int b1, int b2, int i) {
assert(isByte(b1) && isByte(b2), "wrong opcode");
assert(0 <= i && i < 8, "illegal stack offset");
@ -8290,12 +8283,6 @@ void Assembler::fld_s(int index) {
emit_farith(0xD9, 0xC0, index);
}
void Assembler::fld_x(Address adr) {
InstructionMark im(this);
emit_int8((unsigned char)0xDB);
emit_operand32(rbp, adr);
}
void Assembler::fldcw(Address src) {
InstructionMark im(this);
emit_int8((unsigned char)0xD9);
@ -8422,12 +8409,6 @@ void Assembler::fstp_s(Address adr) {
emit_operand32(rbx, adr);
}
void Assembler::fstp_x(Address adr) {
InstructionMark im(this);
emit_int8((unsigned char)0xDB);
emit_operand32(rdi, adr);
}
void Assembler::fsub(int i) {
emit_farith(0xD8, 0xE0, i);
}
@ -9895,6 +9876,25 @@ void Assembler::decq(Address dst) {
emit_operand(rcx, dst);
}
void Assembler::fld_x(Address adr) {
InstructionMark im(this);
emit_int8((unsigned char)0xDB);
emit_operand32(rbp, adr);
}
void Assembler::fstp_x(Address adr) {
InstructionMark im(this);
emit_int8((unsigned char)0xDB);
emit_operand32(rdi, adr);
}
void Assembler::emit_operand32(Register reg, Address adr) {
assert(reg->encoding() < 8, "no extended registers");
assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
adr._rspec);
}
void Assembler::fxrstor(Address src) {
emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(1), src);

View File

@ -41,9 +41,13 @@ class Argument {
#ifdef _WIN64
n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... )
n_int_register_returns_c = 1, // rax
n_float_register_returns_c = 1, // xmm0
#else
n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
n_int_register_returns_c = 2, // rax, rdx
n_float_register_returns_c = 2, // xmm0, xmm1
#endif // _WIN64
n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
@ -1201,8 +1205,6 @@ private:
#ifndef _LP64
private:
// operands that only take the original 32bit registers
void emit_operand32(Register reg, Address adr);
void emit_farith(int b1, int b2, int i);
@ -1268,7 +1270,6 @@ private:
void fld_d(Address adr);
void fld_s(Address adr);
void fld_s(int index);
void fld_x(Address adr); // extended-precision (80-bit) format
void fldcw(Address src);
@ -1313,7 +1314,6 @@ private:
void fstp_d(Address adr);
void fstp_d(int index);
void fstp_s(Address adr);
void fstp_x(Address adr); // extended-precision (80-bit) format
void fsub(int i);
void fsub_d(Address src);
@ -1348,6 +1348,11 @@ private:
void fldl2e();
#endif // !_LP64
// operands that only take the original 32bit registers
void emit_operand32(Register reg, Address adr);
void fld_x(Address adr); // extended-precision (80-bit) format
void fstp_x(Address adr); // extended-precision (80-bit) format
void fxrstor(Address src);
void xrstor(Address src);

View File

@ -0,0 +1,89 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/foreign_globals.hpp"
#include "prims/foreign_globals.inline.hpp"
bool ABIDescriptor::is_volatile_reg(Register reg) const {
return _integer_argument_registers.contains(reg)
|| _integer_additional_volatile_registers.contains(reg);
}
bool ABIDescriptor::is_volatile_reg(XMMRegister reg) const {
return _vector_argument_registers.contains(reg)
|| _vector_additional_volatile_registers.contains(reg);
}
#define INTEGER_TYPE 0
#define VECTOR_TYPE 1
#define X87_TYPE 2
const ABIDescriptor ForeignGlobals::parse_abi_descriptor_impl(jobject jabi) const {
oop abi_oop = JNIHandles::resolve_non_null(jabi);
ABIDescriptor abi;
objArrayOop inputStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.inputStorage_offset));
loadArray(inputStorage, INTEGER_TYPE, abi._integer_argument_registers, as_Register);
loadArray(inputStorage, VECTOR_TYPE, abi._vector_argument_registers, as_XMMRegister);
objArrayOop outputStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.outputStorage_offset));
loadArray(outputStorage, INTEGER_TYPE, abi._integer_return_registers, as_Register);
loadArray(outputStorage, VECTOR_TYPE, abi._vector_return_registers, as_XMMRegister);
objArrayOop subarray = cast<objArrayOop>(outputStorage->obj_at(X87_TYPE));
abi._X87_return_registers_noof = subarray->length();
objArrayOop volatileStorage = cast<objArrayOop>(abi_oop->obj_field(ABI.volatileStorage_offset));
loadArray(volatileStorage, INTEGER_TYPE, abi._integer_additional_volatile_registers, as_Register);
loadArray(volatileStorage, VECTOR_TYPE, abi._vector_additional_volatile_registers, as_XMMRegister);
abi._stack_alignment_bytes = abi_oop->int_field(ABI.stackAlignment_offset);
abi._shadow_space_bytes = abi_oop->int_field(ABI.shadowSpace_offset);
return abi;
}
const BufferLayout ForeignGlobals::parse_buffer_layout_impl(jobject jlayout) const {
oop layout_oop = JNIHandles::resolve_non_null(jlayout);
BufferLayout layout;
layout.stack_args_bytes = layout_oop->long_field(BL.stack_args_bytes_offset);
layout.stack_args = layout_oop->long_field(BL.stack_args_offset);
layout.arguments_next_pc = layout_oop->long_field(BL.arguments_next_pc_offset);
typeArrayOop input_offsets = cast<typeArrayOop>(layout_oop->obj_field(BL.input_type_offsets_offset));
layout.arguments_integer = (size_t) input_offsets->long_at(INTEGER_TYPE);
layout.arguments_vector = (size_t) input_offsets->long_at(VECTOR_TYPE);
typeArrayOop output_offsets = cast<typeArrayOop>(layout_oop->obj_field(BL.output_type_offsets_offset));
layout.returns_integer = (size_t) output_offsets->long_at(INTEGER_TYPE);
layout.returns_vector = (size_t) output_offsets->long_at(VECTOR_TYPE);
layout.returns_x87 = (size_t) output_offsets->long_at(X87_TYPE);
layout.buffer_size = layout_oop->long_field(BL.size_offset);
return layout;
}

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef CPU_X86_VM_FOREIGN_GLOBALS_X86_HPP
#define CPU_X86_VM_FOREIGN_GLOBALS_X86_HPP
#include "asm/macroAssembler.hpp"
#include "utilities/growableArray.hpp"
constexpr size_t xmm_reg_size = 16; // size of XMM reg
struct ABIDescriptor {
GrowableArray<Register> _integer_argument_registers;
GrowableArray<Register> _integer_return_registers;
GrowableArray<XMMRegister> _vector_argument_registers;
GrowableArray<XMMRegister> _vector_return_registers;
size_t _X87_return_registers_noof;
GrowableArray<Register> _integer_additional_volatile_registers;
GrowableArray<XMMRegister> _vector_additional_volatile_registers;
int32_t _stack_alignment_bytes;
int32_t _shadow_space_bytes;
bool is_volatile_reg(Register reg) const;
bool is_volatile_reg(XMMRegister reg) const;
};
struct BufferLayout {
size_t stack_args_bytes;
size_t stack_args;
size_t arguments_vector;
size_t arguments_integer;
size_t arguments_next_pc;
size_t returns_vector;
size_t returns_integer;
size_t returns_x87;
size_t buffer_size;
};
#endif // CPU_X86_VM_FOREIGN_GLOBALS_X86_HPP

View File

@ -344,6 +344,11 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
assert(map->include_argument_oops(), "should be set by clear");
vmassert(jfa->last_Java_pc() != NULL, "not walkable");
frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
if (jfa->saved_rbp_address()) {
update_map_with_saved_link(map, jfa->saved_rbp_address());
}
return fr;
}

View File

@ -30,6 +30,9 @@ private:
// FP value associated with _last_Java_sp:
intptr_t* volatile _last_Java_fp; // pointer is volatile not what it points to
// (Optional) location of saved RBP register, which GCs want to inspect
intptr_t** volatile _saved_rbp_address;
public:
// Each arch must define reset, save, restore
// These are used by objects that only care about:
@ -43,6 +46,7 @@ public:
// fence?
_last_Java_fp = NULL;
_last_Java_pc = NULL;
_saved_rbp_address = NULL;
}
void copy(JavaFrameAnchor* src) {
@ -60,6 +64,8 @@ public:
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
_last_Java_sp = src->_last_Java_sp;
_saved_rbp_address = src->_saved_rbp_address;
}
bool walkable(void) { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
@ -70,9 +76,12 @@ public:
address last_Java_pc(void) { return _last_Java_pc; }
intptr_t** saved_rbp_address(void) const { return _saved_rbp_address; }
private:
static ByteSize last_Java_fp_offset() { return byte_offset_of(JavaFrameAnchor, _last_Java_fp); }
static ByteSize saved_rbp_address_offset() { return byte_offset_of(JavaFrameAnchor, _saved_rbp_address); }
public:

View File

@ -745,20 +745,6 @@ void MacroAssembler::pushptr(AddressLiteral src) {
}
}
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
// we must set sp to zero to clear frame
movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
// must clear fp, so that compiled frames are not confused; it is
// possible that we need it only for debugging
if (clear_fp) {
movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
vzeroupper();
}
void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Register last_java_fp,
address last_java_pc) {
@ -2042,10 +2028,6 @@ void MacroAssembler::fld_s(AddressLiteral src) {
fld_s(as_Address(src));
}
void MacroAssembler::fld_x(AddressLiteral src) {
Assembler::fld_x(as_Address(src));
}
void MacroAssembler::fldcw(AddressLiteral src) {
Assembler::fldcw(as_Address(src));
}
@ -2253,6 +2235,10 @@ void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
}
}
void MacroAssembler::fld_x(AddressLiteral src) {
Assembler::fld_x(as_Address(src));
}
void MacroAssembler::ldmxcsr(AddressLiteral src) {
if (reachable(src)) {
Assembler::ldmxcsr(as_Address(src));
@ -2727,20 +2713,25 @@ void MacroAssembler::push_IU_state() {
pusha();
}
void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
reset_last_Java_frame(r15_thread, clear_fp);
}
void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register
if (!java_thread->is_valid()) {
java_thread = rdi;
get_thread(java_thread);
}
// we must set sp to zero to clear frame
movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
movslq(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
// must clear fp, so that compiled frames are not confused; it is
// possible that we need it only for debugging
if (clear_fp) {
movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
movslq(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
}
// Always clear the pc because it could have been set by make_walkable()
movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
movslq(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
movslq(Address(java_thread, JavaThread::saved_rbp_address_offset()), NULL_WORD);
vzeroupper();
}

View File

@ -816,6 +816,7 @@ class MacroAssembler: public Assembler {
void call(Label& L, relocInfo::relocType rtype);
void call(Register entry);
void call(Address addr) { Assembler::call(addr); }
// NOTE: this call transfers to the effective address of entry NOT
// the address contained by entry. This is because this is more natural
@ -870,13 +871,13 @@ class MacroAssembler: public Assembler {
void fld_d(Address src) { Assembler::fld_d(src); }
void fld_d(AddressLiteral src);
void fld_x(Address src) { Assembler::fld_x(src); }
void fld_x(AddressLiteral src);
void fmul_s(Address src) { Assembler::fmul_s(src); }
void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
#endif // _LP64
void fld_x(Address src) { Assembler::fld_x(src); }
void fld_x(AddressLiteral src);
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
void ldmxcsr(AddressLiteral src);

View File

@ -216,6 +216,13 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
return NULL;
}
// No need in interpreter entry for linkToNative for now.
// Interpreter calls compiled entry through i2c.
if (iid == vmIntrinsics::_linkToNative) {
__ hlt();
return NULL;
}
// rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
// rbx: Method*
// rdx: argument locator (parameter slot count, added to rsp)
@ -327,7 +334,10 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
assert_different_registers(temp1, temp2, temp3, receiver_reg);
assert_different_registers(temp1, temp2, temp3, member_reg);
if (iid == vmIntrinsics::_invokeBasic) {
if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
if (iid == vmIntrinsics::_linkToNative) {
assert(for_compiler_entry, "only compiler entry is supported");
}
// indirect through MH.form.vmentry.vmtarget
jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);

View File

@ -1643,7 +1643,7 @@ static void gen_special_dispatch(MacroAssembler* masm,
member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
member_reg = rbx; // known to be free at this point
has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
} else if (iid == vmIntrinsics::_invokeBasic) {
} else if (iid == vmIntrinsics::_invokeBasic || iid == vmIntrinsics::_linkToNative) {
has_receiver = true;
} else {
fatal("unexpected intrinsic id %d", iid);
@ -3417,6 +3417,216 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
}
static const int native_invoker_code_size = MethodHandles::adapter_code_size;
class NativeInvokerGenerator : public StubCodeGenerator {
address _call_target;
int _shadow_space_bytes;
const GrowableArray<VMReg>& _input_registers;
const GrowableArray<VMReg>& _output_registers;
public:
NativeInvokerGenerator(CodeBuffer* buffer,
address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers)
: StubCodeGenerator(buffer, PrintMethodHandleStubs),
_call_target(call_target),
_shadow_space_bytes(shadow_space_bytes),
_input_registers(input_registers),
_output_registers(output_registers) {}
void generate();
void spill_register(VMReg reg) {
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ push(reg->as_Register());
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
__ subptr(rsp, 64); // bytes
__ evmovdqul(Address(rsp, 0), reg->as_XMMRegister(), Assembler::AVX_512bit);
} else if (UseAVX >= 1) {
__ subptr(rsp, 32);
__ vmovdqu(Address(rsp, 0), reg->as_XMMRegister());
} else {
__ subptr(rsp, 16);
__ movdqu(Address(rsp, 0), reg->as_XMMRegister());
}
} else {
ShouldNotReachHere();
}
}
void fill_register(VMReg reg) {
assert(reg->is_reg(), "must be a register");
MacroAssembler* masm = _masm;
if (reg->is_Register()) {
__ pop(reg->as_Register());
} else if (reg->is_XMMRegister()) {
if (UseAVX >= 3) {
__ evmovdqul(reg->as_XMMRegister(), Address(rsp, 0), Assembler::AVX_512bit);
__ addptr(rsp, 64); // bytes
} else if (UseAVX >= 1) {
__ vmovdqu(reg->as_XMMRegister(), Address(rsp, 0));
__ addptr(rsp, 32);
} else {
__ movdqu(reg->as_XMMRegister(), Address(rsp, 0));
__ addptr(rsp, 16);
}
} else {
ShouldNotReachHere();
}
}
private:
#ifdef ASSERT
bool target_uses_register(VMReg reg) {
return _input_registers.contains(reg) || _output_registers.contains(reg);
}
#endif
};
BufferBlob* SharedRuntime::make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers) {
BufferBlob* _invoke_native_blob = BufferBlob::create("nep_invoker_blob", native_invoker_code_size);
if (_invoke_native_blob == NULL)
return NULL; // allocation failure
CodeBuffer code(_invoke_native_blob);
NativeInvokerGenerator g(&code, call_target, shadow_space_bytes, input_registers, output_registers);
g.generate();
code.log_section_sizes("nep_invoker_blob");
return _invoke_native_blob;
}
void NativeInvokerGenerator::generate() {
assert(!(target_uses_register(r15_thread->as_VMReg()) || target_uses_register(rscratch1->as_VMReg())), "Register conflict");
MacroAssembler* masm = _masm;
__ enter();
Address java_pc(r15_thread, JavaThread::last_Java_pc_offset());
__ movptr(rscratch1, Address(rsp, 8)); // read return address from stack
__ movptr(java_pc, rscratch1);
__ movptr(rscratch1, rsp);
__ addptr(rscratch1, 16); // skip return and frame
__ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), rscratch1);
__ movptr(Address(r15_thread, JavaThread::saved_rbp_address_offset()), rsp); // rsp points at saved RBP
// State transition
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
if (_shadow_space_bytes != 0) {
// needed here for correct stack args offset on Windows
__ subptr(rsp, _shadow_space_bytes);
}
__ call(RuntimeAddress(_call_target));
if (_shadow_space_bytes != 0) {
// needed here for correct stack args offset on Windows
__ addptr(rsp, _shadow_space_bytes);
}
assert(_output_registers.length() <= 1
|| (_output_registers.length() == 2 && !_output_registers.at(1)->is_valid()), "no multi-reg returns");
bool need_spills = _output_registers.length() != 0;
VMReg ret_reg = need_spills ? _output_registers.at(0) : VMRegImpl::Bad();
__ restore_cpu_control_state_after_jni();
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
// Force this write out before the read below
__ membar(Assembler::Membar_mask_bits(
Assembler::LoadLoad | Assembler::LoadStore |
Assembler::StoreLoad | Assembler::StoreStore));
Label L_after_safepoint_poll;
Label L_safepoint_poll_slow_path;
__ safepoint_poll(L_safepoint_poll_slow_path, r15_thread, true /* at_return */, false /* in_nmethod */);
__ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
__ jcc(Assembler::notEqual, L_safepoint_poll_slow_path);
__ bind(L_after_safepoint_poll);
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ block_comment("reguard stack check");
Label L_reguard;
Label L_after_reguard;
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::equal, L_reguard);
__ bind(L_after_reguard);
__ reset_last_Java_frame(r15_thread, true);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_safepoint_poll_slow_path");
__ bind(L_safepoint_poll_slow_path);
__ vzeroupper();
if (need_spills) {
spill_register(ret_reg);
}
__ mov(c_rarg0, r15_thread);
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if (need_spills) {
fill_register(ret_reg);
}
__ jmp(L_after_safepoint_poll);
__ block_comment("} L_safepoint_poll_slow_path");
//////////////////////////////////////////////////////////////////////////////
__ block_comment("{ L_reguard");
__ bind(L_reguard);
__ vzeroupper();
if (need_spills) {
spill_register(ret_reg);
}
__ mov(r12, rsp); // remember sp
__ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
__ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
__ mov(rsp, r12); // restore sp
__ reinit_heapbase();
if (need_spills) {
fill_register(ret_reg);
}
__ jmp(L_after_reguard);
__ block_comment("} L_reguard");
//////////////////////////////////////////////////////////////////////////////
__ flush();
}
//------------------------------Montgomery multiplication------------------------
//

View File

@ -0,0 +1,147 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/codeBlob.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalNativeInvoker.hpp"
#define __ _masm->
void ProgrammableInvoker::Generator::generate() {
__ enter();
// Put the context pointer in ebx/rbx - it's going to be heavily used below both before and after the call
Register ctxt_reg = rbx;
Register used_regs[] = { ctxt_reg, rcx, rsi, rdi };
GrowableArray<Register> preserved_regs;
for (size_t i = 0; i < sizeof(used_regs)/sizeof(Register); i++) {
Register used_reg = used_regs[i];
if (!_abi->is_volatile_reg(used_reg)) {
preserved_regs.push(used_reg);
}
}
__ block_comment("init_and_alloc_stack");
for (int i = 0; i < preserved_regs.length(); i++) {
__ push(preserved_regs.at(i));
}
__ movptr(ctxt_reg, c_rarg0); // FIXME c args? or java?
__ block_comment("allocate_stack");
__ movptr(rcx, Address(ctxt_reg, (int) _layout->stack_args_bytes));
__ subptr(rsp, rcx);
__ andptr(rsp, -_abi->_stack_alignment_bytes);
// Note: rcx is used below!
__ block_comment("load_arguments");
__ shrptr(rcx, LogBytesPerWord); // bytes -> words
__ movptr(rsi, Address(ctxt_reg, (int) _layout->stack_args));
__ movptr(rdi, rsp);
__ rep_mov();
for (int i = 0; i < _abi->_vector_argument_registers.length(); i++) {
// [1] -> 64 bit -> xmm
// [2] -> 128 bit -> xmm
// [4] -> 256 bit -> ymm
// [8] -> 512 bit -> zmm
XMMRegister reg = _abi->_vector_argument_registers.at(i);
size_t offs = _layout->arguments_vector + i * xmm_reg_size;
__ movdqu(reg, Address(ctxt_reg, (int)offs));
}
for (int i = 0; i < _abi->_integer_argument_registers.length(); i++) {
size_t offs = _layout->arguments_integer + i * sizeof(uintptr_t);
__ movptr(_abi->_integer_argument_registers.at(i), Address(ctxt_reg, (int)offs));
}
if (_abi->_shadow_space_bytes != 0) {
__ block_comment("allocate shadow space for argument register spill");
__ subptr(rsp, _abi->_shadow_space_bytes);
}
// call target function
__ block_comment("call target function");
__ call(Address(ctxt_reg, (int) _layout->arguments_next_pc));
if (_abi->_shadow_space_bytes != 0) {
__ block_comment("pop shadow space");
__ addptr(rsp, _abi->_shadow_space_bytes);
}
__ block_comment("store_registers");
for (int i = 0; i < _abi->_integer_return_registers.length(); i++) {
ssize_t offs = _layout->returns_integer + i * sizeof(uintptr_t);
__ movptr(Address(ctxt_reg, offs), _abi->_integer_return_registers.at(i));
}
for (int i = 0; i < _abi->_vector_return_registers.length(); i++) {
// [1] -> 64 bit -> xmm
// [2] -> 128 bit -> xmm (SSE)
// [4] -> 256 bit -> ymm (AVX)
// [8] -> 512 bit -> zmm (AVX-512, aka AVX3)
XMMRegister reg = _abi->_vector_return_registers.at(i);
size_t offs = _layout->returns_vector + i * xmm_reg_size;
__ movdqu(Address(ctxt_reg, (int)offs), reg);
}
for (size_t i = 0; i < _abi->_X87_return_registers_noof; i++) {
size_t offs = _layout->returns_x87 + i * (sizeof(long double));
__ fstp_x(Address(ctxt_reg, (int)offs)); //pop ST(0)
}
// Restore backed up preserved register
for (int i = 0; i < preserved_regs.length(); i++) {
__ movptr(preserved_regs.at(i), Address(rbp, -(int)(sizeof(uintptr_t) * (i + 1))));
}
__ leave();
__ ret(0);
__ flush();
}
address ProgrammableInvoker::generate_adapter(jobject jabi, jobject jlayout) {
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const BufferLayout layout = ForeignGlobals::parse_buffer_layout(jlayout);
BufferBlob* _invoke_native_blob = BufferBlob::create("invoke_native_blob", native_invoker_size);
CodeBuffer code2(_invoke_native_blob);
ProgrammableInvoker::Generator g2(&code2, &abi, &layout);
g2.generate();
code2.log_section_sizes("InvokeNativeBlob");
return _invoke_native_blob->code_begin();
}

View File

@ -0,0 +1,142 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalUpcallHandler.hpp"
#define __ _masm->
// 1. Create buffer according to layout
// 2. Load registers & stack args into buffer
// 3. Call upcall helper with upcall handler instance & buffer pointer (C++ ABI)
// 4. Load return value from buffer into foreign ABI registers
// 5. Return
address ProgrammableUpcallHandler::generate_upcall_stub(jobject rec, jobject jabi, jobject jlayout) {
ResourceMark rm;
const ABIDescriptor abi = ForeignGlobals::parse_abi_descriptor(jabi);
const BufferLayout layout = ForeignGlobals::parse_buffer_layout(jlayout);
CodeBuffer buffer("upcall_stub", 1024, upcall_stub_size);
MacroAssembler* _masm = new MacroAssembler(&buffer);
int stack_alignment_C = 16; // bytes
int register_size = sizeof(uintptr_t);
int buffer_alignment = xmm_reg_size;
// stub code
__ enter();
// save pointer to JNI receiver handle into constant segment
Address rec_adr = __ as_Address(InternalAddress(__ address_constant((address)rec)));
__ subptr(rsp, (int) align_up(layout.buffer_size, buffer_alignment));
Register used[] = { c_rarg0, c_rarg1, rax, rbx, rdi, rsi, r12, r13, r14, r15 };
GrowableArray<Register> preserved;
// TODO need to preserve anything killed by the upcall that is non-volatile, needs XMM regs as well, probably
for (size_t i = 0; i < sizeof(used)/sizeof(Register); i++) {
Register reg = used[i];
if (!abi.is_volatile_reg(reg)) {
preserved.push(reg);
}
}
int preserved_size = align_up(preserved.length() * register_size, stack_alignment_C); // includes register alignment
int buffer_offset = preserved_size; // offset from rsp
__ subptr(rsp, preserved_size);
for (int i = 0; i < preserved.length(); i++) {
__ movptr(Address(rsp, i * register_size), preserved.at(i));
}
for (int i = 0; i < abi._integer_argument_registers.length(); i++) {
size_t offs = buffer_offset + layout.arguments_integer + i * sizeof(uintptr_t);
__ movptr(Address(rsp, (int)offs), abi._integer_argument_registers.at(i));
}
for (int i = 0; i < abi._vector_argument_registers.length(); i++) {
XMMRegister reg = abi._vector_argument_registers.at(i);
size_t offs = buffer_offset + layout.arguments_vector + i * xmm_reg_size;
__ movdqu(Address(rsp, (int)offs), reg);
}
// Capture prev stack pointer (stack arguments base)
#ifndef _WIN64
__ lea(rax, Address(rbp, 16)); // skip frame+return address
#else
__ lea(rax, Address(rbp, 16 + 32)); // also skip shadow space
#endif
__ movptr(Address(rsp, buffer_offset + (int) layout.stack_args), rax);
#ifndef PRODUCT
__ movptr(Address(rsp, buffer_offset + (int) layout.stack_args_bytes), -1); // unknown
#endif
// Call upcall helper
__ movptr(c_rarg0, rec_adr);
__ lea(c_rarg1, Address(rsp, buffer_offset));
#ifdef _WIN64
__ block_comment("allocate shadow space for argument register spill");
__ subptr(rsp, 32);
#endif
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ProgrammableUpcallHandler::attach_thread_and_do_upcall)));
#ifdef _WIN64
__ block_comment("pop shadow space");
__ addptr(rsp, 32);
#endif
for (int i = 0; i < abi._integer_return_registers.length(); i++) {
size_t offs = buffer_offset + layout.returns_integer + i * sizeof(uintptr_t);
__ movptr(abi._integer_return_registers.at(i), Address(rsp, (int)offs));
}
for (int i = 0; i < abi._vector_return_registers.length(); i++) {
XMMRegister reg = abi._vector_return_registers.at(i);
size_t offs = buffer_offset + layout.returns_vector + i * xmm_reg_size;
__ movdqu(reg, Address(rsp, (int)offs));
}
for (size_t i = abi._X87_return_registers_noof; i > 0 ; i--) {
ssize_t offs = buffer_offset + layout.returns_x87 + (i - 1) * (sizeof(long double));
__ fld_x (Address(rsp, (int)offs));
}
// Restore preserved registers
for (int i = 0; i < preserved.length(); i++) {
__ movptr(preserved.at(i), Address(rsp, i * register_size));
}
__ leave();
__ ret(0);
_masm->flush();
BufferBlob* blob = BufferBlob::create("upcall_stub", &buffer);
return blob->code_begin();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "code/vmreg.hpp"
#include "vmreg_x86.inline.hpp"
void VMRegImpl::set_regName() {
@ -66,3 +66,17 @@ void VMRegImpl::set_regName() {
regName[i] = "NON-GPR-FPR-XMM-KREG";
}
}
#define INTEGER_TYPE 0
#define VECTOR_TYPE 1
#define X87_TYPE 2
#define STACK_TYPE 3
VMReg VMRegImpl::vmStorageToVMReg(int type, int index) {
switch(type) {
case INTEGER_TYPE: return ::as_Register(index)->as_VMReg();
case VECTOR_TYPE: return ::as_XMMRegister(index)->as_VMReg();
case STACK_TYPE: return VMRegImpl::stack2reg(index LP64_ONLY(* 2)); // numbering on x64 goes per 64-bits
}
return VMRegImpl::Bad();
}

View File

@ -457,6 +457,11 @@ int MachCallRuntimeNode::ret_addr_offset() {
return offset;
}
int MachCallNativeNode::ret_addr_offset() {
int offset = 13; // movq r10,#addr; callq (r10)
offset += clear_avx_size();
return offset;
}
//
// Compute padding required for nodes which need alignment
//
@ -12400,6 +12405,18 @@ instruct CallLeafDirect(method meth)
ins_pipe(pipe_slow);
%}
//
instruct CallNativeDirect(method meth)
%{
match(CallNative);
effect(USE meth);
ins_cost(300);
format %{ "call_native " %}
ins_encode(clear_avx, Java_To_Runtime(meth));
ins_pipe(pipe_slow);
%}
// Call runtime without safepoint
instruct CallLeafNoFPDirect(method meth)
%{

View File

@ -37,6 +37,10 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View File

@ -37,6 +37,10 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View File

@ -44,6 +44,10 @@
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset();
}
static ByteSize saved_rbp_address_offset() {
return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::saved_rbp_address_offset();
}
bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
bool isInJava);

View File

@ -419,6 +419,8 @@ Form::CallType InstructForm::is_ideal_call() const {
idx = 0;
if(_matrule->find_type("CallLeafNoFP",idx)) return Form::JAVA_LEAF;
idx = 0;
if(_matrule->find_type("CallNative",idx)) return Form::JAVA_NATIVE;
idx = 0;
return Form::invalid_type;
}
@ -1131,6 +1133,9 @@ const char *InstructForm::mach_base_class(FormDict &globals) const {
else if( is_ideal_call() == Form::JAVA_LEAF ) {
return "MachCallLeafNode";
}
else if( is_ideal_call() == Form::JAVA_NATIVE ) {
return "MachCallNativeNode";
}
else if (is_ideal_return()) {
return "MachReturnNode";
}

View File

@ -4129,6 +4129,9 @@ bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return
}
break;
case vmIntrinsics::_linkToNative:
break; // TODO: NYI
default:
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
break;

View File

@ -244,10 +244,11 @@ class IRScopeDebugInfo: public CompilationResourceObj {
bool reexecute = topmost ? should_reexecute() : false;
bool return_oop = false; // This flag will be ignored since it used only for C2 with escape analysis.
bool rethrow_exception = false;
bool is_opt_native = false;
bool has_ea_local_in_scope = false;
bool arg_escape = false;
recorder->describe_scope(pc_offset, methodHandle(), scope()->method(), bci(),
reexecute, rethrow_exception, is_method_handle_invoke, return_oop,
reexecute, rethrow_exception, is_method_handle_invoke, is_opt_native, return_oop,
has_ea_local_in_scope, arg_escape, locvals, expvals, monvals);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@ class ciNullObject;
class ciInstance;
class ciCallSite;
class ciMemberName;
class ciNativeEntryPoint;
class ciMethodHandle;
class ciMethodType;
class ciArray;
@ -97,6 +98,7 @@ friend class ciObject; \
friend class ciNullObject; \
friend class ciInstance; \
friend class ciMemberName; \
friend class ciNativeEntryPoint; \
friend class ciMethod; \
friend class ciMethodData; \
friend class ciMethodHandle; \

View File

@ -952,7 +952,8 @@ void ciEnv::register_method(ciMethod* target,
AbstractCompiler* compiler,
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state) {
RTMState rtm_state,
const GrowableArrayView<BufferBlob*>& native_invokers) {
VM_ENTRY_MARK;
nmethod* nm = NULL;
{
@ -1041,7 +1042,8 @@ void ciEnv::register_method(ciMethod* target,
debug_info(), dependencies(), code_buffer,
frame_words, oop_map_set,
handler_table, inc_table,
compiler, task()->comp_level());
compiler, task()->comp_level(),
native_invokers);
// Free codeBlobs
code_buffer->free_blob();

View File

@ -378,7 +378,8 @@ public:
AbstractCompiler* compiler,
bool has_unsafe_access,
bool has_wide_vectors,
RTMState rtm_state = NoRTM);
RTMState rtm_state = NoRTM,
const GrowableArrayView<BufferBlob*>& native_invokers = GrowableArrayView<BufferBlob*>::EMPTY);
// Access to certain well known ciObjects.

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "ci/ciClassList.hpp"
#include "ci/ciNativeEntryPoint.hpp"
#include "ci/ciUtilities.inline.hpp"
#include "ci/ciArray.hpp"
#include "classfile/javaClasses.hpp"
#include "oops/oop.inline.hpp"
#include "memory/allocation.hpp"
VMReg* getVMRegArray(ciArray* array) {
assert(array->element_basic_type() == T_LONG, "Unexpected type");
VMReg* out = NEW_ARENA_ARRAY(CURRENT_ENV->arena(), VMReg, array->length());
for (int i = 0; i < array->length(); i++) {
ciConstant con = array->element_value(i);
VMReg reg = VMRegImpl::as_VMReg(con.as_long());
out[i] = reg;
}
return out;
}
ciNativeEntryPoint::ciNativeEntryPoint(instanceHandle h_i) : ciInstance(h_i), _name(NULL) {
// Copy name
oop name_str = jdk_internal_invoke_NativeEntryPoint::name(get_oop());
if (name_str != NULL) {
char* temp_name = java_lang_String::as_quoted_ascii(name_str);
size_t len = strlen(temp_name) + 1;
char* name = (char*)CURRENT_ENV->arena()->Amalloc(len);
strncpy(name, temp_name, len);
_name = name;
}
_arg_moves = getVMRegArray(CURRENT_ENV->get_object(jdk_internal_invoke_NativeEntryPoint::argMoves(get_oop()))->as_array());
_ret_moves = getVMRegArray(CURRENT_ENV->get_object(jdk_internal_invoke_NativeEntryPoint::returnMoves(get_oop()))->as_array());
}
address ciNativeEntryPoint::entry_point() const {
VM_ENTRY_MARK;
return jdk_internal_invoke_NativeEntryPoint::addr(get_oop());
}
jint ciNativeEntryPoint::shadow_space() const {
VM_ENTRY_MARK;
return jdk_internal_invoke_NativeEntryPoint::shadow_space(get_oop());
}
VMReg* ciNativeEntryPoint::argMoves() const {
return _arg_moves;
}
VMReg* ciNativeEntryPoint::returnMoves() const {
return _ret_moves;
}
jboolean ciNativeEntryPoint::need_transition() const {
VM_ENTRY_MARK;
return jdk_internal_invoke_NativeEntryPoint::need_transition(get_oop());
}
ciMethodType* ciNativeEntryPoint::method_type() const {
VM_ENTRY_MARK;
return CURRENT_ENV->get_object(jdk_internal_invoke_NativeEntryPoint::method_type(get_oop()))->as_method_type();
}
const char* ciNativeEntryPoint::name() {
return _name;
}

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CI_CINATIVEENTRYPOINT_HPP
#define SHARE_VM_CI_CINATIVEENTRYPOINT_HPP
#include "ci/ciInstance.hpp"
#include "ci/ciMethodType.hpp"
#include "code/vmreg.hpp"
// ciNativeEntryPoint
//
// The class represents a java.lang.invoke.NativeEntryPoint object.
class ciNativeEntryPoint : public ciInstance {
private:
const char* _name;
VMReg* _arg_moves;
VMReg* _ret_moves;
public:
ciNativeEntryPoint(instanceHandle h_i);
// What kind of ciObject is this?
bool is_native_entry_point() const { return true; }
address entry_point() const;
jint shadow_space() const;
VMReg* argMoves() const;
VMReg* returnMoves() const;
jboolean need_transition() const;
ciMethodType* method_type() const;
const char* name();
};
#endif // SHARE_VM_CI_CINATIVEENTRYPOINT_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,7 @@
ciObject* ciObjArray::obj_at(int index) {
VM_ENTRY_MARK;
objArrayOop array = get_objArrayOop();
if (index < 0 || index >= array->length()) return NULL;
assert(index >= 0 && index < array->length(), "OOB access");
oop o = array->obj_at(index);
if (o == NULL) {
return ciNullObject::make();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -107,6 +107,7 @@ public:
virtual bool is_array() { return false; }
virtual bool is_obj_array() { return false; }
virtual bool is_type_array() { return false; }
virtual bool is_native_entry_point()const { return false; }
// Is this a type or value which has no associated class?
// It is true of primitive types and null objects.
@ -160,6 +161,10 @@ public:
assert(is_type_array(), "bad cast");
return (ciTypeArray*)this;
}
ciNativeEntryPoint* as_native_entry_point() {
assert(is_native_entry_point(), "bad cast");
return (ciNativeEntryPoint*)this;
}
// Print debugging output about this ciObject.
void print(outputStream* st);

View File

@ -27,6 +27,7 @@
#include "ci/ciInstance.hpp"
#include "ci/ciInstanceKlass.hpp"
#include "ci/ciMemberName.hpp"
#include "ci/ciNativeEntryPoint.hpp"
#include "ci/ciMethod.hpp"
#include "ci/ciMethodData.hpp"
#include "ci/ciMethodHandle.hpp"
@ -342,6 +343,8 @@ ciObject* ciObjectFactory::create_new_object(oop o) {
return new (arena()) ciCallSite(h_i);
else if (java_lang_invoke_MemberName::is_instance(o))
return new (arena()) ciMemberName(h_i);
else if (jdk_internal_invoke_NativeEntryPoint::is_instance(o))
return new (arena()) ciNativeEntryPoint(h_i);
else if (java_lang_invoke_MethodHandle::is_instance(o))
return new (arena()) ciMethodHandle(h_i);
else if (java_lang_invoke_MethodType::is_instance(o))

View File

@ -3815,6 +3815,65 @@ bool java_lang_invoke_LambdaForm::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
int jdk_internal_invoke_NativeEntryPoint::_addr_offset;
int jdk_internal_invoke_NativeEntryPoint::_shadow_space_offset;
int jdk_internal_invoke_NativeEntryPoint::_argMoves_offset;
int jdk_internal_invoke_NativeEntryPoint::_returnMoves_offset;
int jdk_internal_invoke_NativeEntryPoint::_need_transition_offset;
int jdk_internal_invoke_NativeEntryPoint::_method_type_offset;
int jdk_internal_invoke_NativeEntryPoint::_name_offset;
#define NEP_FIELDS_DO(macro) \
macro(_addr_offset, k, "addr", long_signature, false); \
macro(_shadow_space_offset, k, "shadowSpace", int_signature, false); \
macro(_argMoves_offset, k, "argMoves", long_array_signature, false); \
macro(_returnMoves_offset, k, "returnMoves", long_array_signature, false); \
macro(_need_transition_offset, k, "needTransition", bool_signature, false); \
macro(_method_type_offset, k, "methodType", java_lang_invoke_MethodType_signature, false); \
macro(_name_offset, k, "name", string_signature, false);
bool jdk_internal_invoke_NativeEntryPoint::is_instance(oop obj) {
return obj != NULL && is_subclass(obj->klass());
}
void jdk_internal_invoke_NativeEntryPoint::compute_offsets() {
InstanceKlass* k = SystemDictionary::NativeEntryPoint_klass();
NEP_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void jdk_internal_invoke_NativeEntryPoint::serialize_offsets(SerializeClosure* f) {
NEP_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
address jdk_internal_invoke_NativeEntryPoint::addr(oop entry) {
return (address)entry->long_field(_addr_offset);
}
jint jdk_internal_invoke_NativeEntryPoint::shadow_space(oop entry) {
return entry->int_field(_shadow_space_offset);
}
oop jdk_internal_invoke_NativeEntryPoint::argMoves(oop entry) {
return entry->obj_field(_argMoves_offset);
}
oop jdk_internal_invoke_NativeEntryPoint::returnMoves(oop entry) {
return entry->obj_field(_returnMoves_offset);
}
jboolean jdk_internal_invoke_NativeEntryPoint::need_transition(oop entry) {
return entry->bool_field(_need_transition_offset);
}
oop jdk_internal_invoke_NativeEntryPoint::method_type(oop entry) {
return entry->obj_field(_method_type_offset);
}
oop jdk_internal_invoke_NativeEntryPoint::name(oop entry) {
return entry->obj_field(_name_offset);
}
oop java_lang_invoke_MethodHandle::type(oop mh) {
return mh->obj_field(_type_offset);

View File

@ -75,6 +75,7 @@ class RecordComponent;
f(java_lang_StackFrameInfo) \
f(java_lang_LiveStackFrameInfo) \
f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
f(jdk_internal_invoke_NativeEntryPoint) \
f(jdk_internal_misc_UnsafeConstants) \
f(java_lang_boxing_object) \
f(vector_VectorPayload) \
@ -1008,6 +1009,51 @@ class java_lang_invoke_LambdaForm: AllStatic {
static int vmentry_offset() { CHECK_INIT(_vmentry_offset); }
};
// Interface to java.lang.invoke.NativeEntryPoint objects
// (These are a private interface for managing adapter code generation.)
class jdk_internal_invoke_NativeEntryPoint: AllStatic {
friend class JavaClasses;
private:
static int _addr_offset; // type is jlong
static int _shadow_space_offset;
static int _argMoves_offset;
static int _returnMoves_offset;
static int _need_transition_offset;
static int _method_type_offset;
static int _name_offset;
static void compute_offsets();
public:
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static address addr(oop entry);
static jint shadow_space(oop entry);
static oop argMoves(oop entry);
static oop returnMoves(oop entry);
static jboolean need_transition(oop entry);
static oop method_type(oop entry);
static oop name(oop entry);
// Testers
static bool is_subclass(Klass* klass) {
return SystemDictionary::NativeEntryPoint_klass() != NULL &&
klass->is_subclass_of(SystemDictionary::NativeEntryPoint_klass());
}
static bool is_instance(oop obj);
// Accessors for code generation:
static int addr_offset_in_bytes() { return _addr_offset; }
static int shadow_space_offset_in_bytes() { return _shadow_space_offset; }
static int argMoves_offset_in_bytes() { return _argMoves_offset; }
static int returnMoves_offset_in_bytes() { return _returnMoves_offset; }
static int need_transition_offset_in_bytes() { return _need_transition_offset; }
static int method_type_offset_in_bytes() { return _method_type_offset; }
static int name_offset_in_bytes() { return _name_offset; }
};
// Interface to java.lang.invoke.MemberName objects
// (These are a private interface for Java code to query the class hierarchy.)

View File

@ -170,6 +170,7 @@ class TableStatistics;
do_klass(MethodType_klass, java_lang_invoke_MethodType ) \
do_klass(BootstrapMethodError_klass, java_lang_BootstrapMethodError ) \
do_klass(CallSite_klass, java_lang_invoke_CallSite ) \
do_klass(NativeEntryPoint_klass, jdk_internal_invoke_NativeEntryPoint ) \
do_klass(Context_klass, java_lang_invoke_MethodHandleNatives_CallSiteContext ) \
do_klass(ConstantCallSite_klass, java_lang_invoke_ConstantCallSite ) \
do_klass(MutableCallSite_klass, java_lang_invoke_MutableCallSite ) \

View File

@ -964,6 +964,7 @@ class methodHandle;
do_intrinsic(_linkToStatic, java_lang_invoke_MethodHandle, linkToStatic_name, star_name, F_SN) \
do_intrinsic(_linkToSpecial, java_lang_invoke_MethodHandle, linkToSpecial_name, star_name, F_SN) \
do_intrinsic(_linkToInterface, java_lang_invoke_MethodHandle, linkToInterface_name, star_name, F_SN) \
do_intrinsic(_linkToNative, java_lang_invoke_MethodHandle, linkToNative_name, star_name, F_SN) \
/* special marker for bytecode generated for the JVM from a LambdaForm: */ \
do_intrinsic(_compiledLambdaForm, java_lang_invoke_MethodHandle, compiledLambdaForm_name, star_name, F_RN) \
\
@ -1039,7 +1040,7 @@ class vmIntrinsics : AllStatic {
LAST_COMPILER_INLINE = _VectorScatterOp,
FIRST_MH_SIG_POLY = _invokeGeneric,
FIRST_MH_STATIC = _linkToVirtual,
LAST_MH_SIG_POLY = _linkToInterface,
LAST_MH_SIG_POLY = _linkToNative,
FIRST_ID = _none + 1
};

View File

@ -273,6 +273,7 @@
template(linkToStatic_name, "linkToStatic") \
template(linkToSpecial_name, "linkToSpecial") \
template(linkToInterface_name, "linkToInterface") \
template(linkToNative_name, "linkToNative") \
template(compiledLambdaForm_name, "<compiledLambdaForm>") /*fake name*/ \
template(star_name, "*") /*not really a name*/ \
template(invoke_name, "invoke") \
@ -347,8 +348,11 @@
template(DEFAULT_CONTEXT_name, "DEFAULT_CONTEXT") \
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
\
/* Support for JVMCI */ \
/* Panama Support */ \
template(jdk_internal_invoke_NativeEntryPoint, "jdk/internal/invoke/NativeEntryPoint") \
template(jdk_internal_invoke_NativeEntryPoint_signature, "Ljdk/internal/invoke/NativeEntryPoint;") \
\
/* Support for JVMCI */ \
JVMCI_VM_SYMBOLS_DO(template, do_alias) \
\
template(java_lang_StackWalker, "java/lang/StackWalker") \
@ -520,6 +524,7 @@
template(byte_array_signature, "[B") \
template(char_array_signature, "[C") \
template(int_array_signature, "[I") \
template(long_array_signature, "[J") \
template(object_void_signature, "(Ljava/lang/Object;)V") \
template(object_int_signature, "(Ljava/lang/Object;)I") \
template(long_object_long_signature, "(JLjava/lang/Object;)J") \

View File

@ -559,10 +559,12 @@ void CodeCache::free(CodeBlob* cb) {
CodeHeap* heap = get_code_heap(cb);
print_trace("free", cb);
if (cb->is_nmethod()) {
nmethod* ptr = (nmethod *)cb;
heap->set_nmethod_count(heap->nmethod_count() - 1);
if (((nmethod *)cb)->has_dependencies()) {
if (ptr->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
ptr->free_native_invokers();
}
if (cb->is_adapter_blob()) {
heap->set_adapter_count(heap->adapter_count() - 1);

View File

@ -359,6 +359,7 @@ void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *
if (method() != NULL && !method()->is_native()) {
address pc = fr.pc();
SimpleScopeDesc ssd(this, pc);
if (ssd.is_optimized_linkToNative()) return; // call was replaced
Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
bool has_receiver = call.has_receiver();
bool has_appendix = call.has_appendix();

View File

@ -287,6 +287,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
bool reexecute,
bool rethrow_exception,
bool is_method_handle_invoke,
bool is_optimized_linkToNative,
bool return_oop,
bool has_ea_local_in_scope,
bool arg_escape,
@ -305,6 +306,7 @@ void DebugInformationRecorder::describe_scope(int pc_offset,
last_pd->set_should_reexecute(reexecute);
last_pd->set_rethrow_exception(rethrow_exception);
last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
last_pd->set_is_optimized_linkToNative(is_optimized_linkToNative);
last_pd->set_return_oop(return_oop);
last_pd->set_has_ea_local_in_scope(has_ea_local_in_scope);
last_pd->set_arg_escape(arg_escape);

View File

@ -104,6 +104,7 @@ class DebugInformationRecorder: public ResourceObj {
bool reexecute,
bool rethrow_exception = false,
bool is_method_handle_invoke = false,
bool is_optimized_linkToNative = false,
bool return_oop = false,
bool has_ea_local_in_scope = false,
bool arg_escape = false,

View File

@ -65,8 +65,10 @@
#include "runtime/sweeper.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/copy.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/resourceHash.hpp"
#include "utilities/xmlstream.hpp"
#if INCLUDE_JVMCI
@ -496,7 +498,8 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -518,6 +521,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
+ adjust_pcs_size(debug_info->pcs_size())
+ align_up((int)dependencies->size_in_bytes(), oopSize)
+ align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize)
+ align_up(handler_table->size_in_bytes() , oopSize)
+ align_up(nul_chk_table->size_in_bytes() , oopSize)
#if INCLUDE_JVMCI
@ -533,7 +537,8 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
handler_table,
nul_chk_table,
compiler,
comp_level
comp_level,
native_invokers
#if INCLUDE_JVMCI
, speculations,
speculations_len,
@ -621,7 +626,8 @@ nmethod::nmethod(
scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
_scopes_pcs_offset = scopes_data_offset;
_dependencies_offset = _scopes_pcs_offset;
_handler_table_offset = _dependencies_offset;
_native_invokers_offset = _dependencies_offset;
_handler_table_offset = _native_invokers_offset;
_nul_chk_table_offset = _handler_table_offset;
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset;
@ -717,7 +723,8 @@ nmethod::nmethod(
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -794,7 +801,8 @@ nmethod::nmethod(
_scopes_pcs_offset = scopes_data_offset + align_up(debug_info->data_size (), oopSize);
_dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
_handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes (), oopSize);
_native_invokers_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize);
_handler_table_offset = _native_invokers_offset + align_up(checked_cast<int>(native_invokers.data_size_in_bytes()), oopSize);
_nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
#if INCLUDE_JVMCI
_speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
@ -816,6 +824,10 @@ nmethod::nmethod(
code_buffer->copy_values_to(this);
debug_info->copy_to(this);
dependencies->copy_to(this);
if (native_invokers.is_nonempty()) { // can not get address of zero-length array
// Copy native stubs
memcpy(native_invokers_begin(), native_invokers.adr_at(0), native_invokers.data_size_in_bytes());
}
clear_unloading_state();
Universe::heap()->register_nmethod(this);
@ -978,6 +990,10 @@ void nmethod::print_nmethod(bool printmethod) {
print_dependencies();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod && native_invokers_begin() < native_invokers_end()) {
print_native_invokers();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
if (printmethod || PrintExceptionHandlers) {
print_handler_table();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
@ -1038,6 +1054,12 @@ void nmethod::copy_values(GrowableArray<Metadata*>* array) {
}
}
void nmethod::free_native_invokers() {
for (BufferBlob** it = native_invokers_begin(); it < native_invokers_end(); it++) {
CodeCache::free(*it);
}
}
void nmethod::fix_oop_relocations(address begin, address end, bool initialize_immediates) {
// re-patch all oop-bearing instructions, just in case some oops moved
RelocIterator iter(this, begin, end);
@ -2670,6 +2692,14 @@ void nmethod::print_pcs_on(outputStream* st) {
}
}
void nmethod::print_native_invokers() {
ResourceMark m; // in case methods get printed via debugger
tty->print_cr("Native invokers:");
for (BufferBlob** itt = native_invokers_begin(); itt < native_invokers_end(); itt++) {
(*itt)->print_on(tty);
}
}
void nmethod::print_handler_table() {
ExceptionHandlerTable(this).print();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -207,6 +207,7 @@ class nmethod : public CompiledMethod {
int _scopes_data_offset;
int _scopes_pcs_offset;
int _dependencies_offset;
int _native_invokers_offset;
int _handler_table_offset;
int _nul_chk_table_offset;
#if INCLUDE_JVMCI
@ -312,7 +313,8 @@ class nmethod : public CompiledMethod {
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers
#if INCLUDE_JVMCI
, char* speculations,
int speculations_len,
@ -360,7 +362,8 @@ class nmethod : public CompiledMethod {
ExceptionHandlerTable* handler_table,
ImplicitExceptionTable* nul_chk_table,
AbstractCompiler* compiler,
int comp_level
int comp_level,
const GrowableArrayView<BufferBlob*>& native_invokers = GrowableArrayView<BufferBlob*>::EMPTY
#if INCLUDE_JVMCI
, char* speculations = NULL,
int speculations_len = 0,
@ -409,7 +412,9 @@ class nmethod : public CompiledMethod {
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
address dependencies_end () const { return header_begin() + _handler_table_offset ; }
address dependencies_end () const { return header_begin() + _native_invokers_offset ; }
BufferBlob** native_invokers_begin() const { return (BufferBlob**)(header_begin() + _native_invokers_offset) ; }
BufferBlob** native_invokers_end () const { return (BufferBlob**)(header_begin() + _handler_table_offset); }
address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
@ -525,6 +530,8 @@ class nmethod : public CompiledMethod {
void copy_values(GrowableArray<jobject>* oops);
void copy_values(GrowableArray<Metadata*>* metadata);
void free_native_invokers();
// Relocation support
private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
@ -661,6 +668,7 @@ public:
void print_scopes() { print_scopes_on(tty); }
void print_scopes_on(outputStream* st) PRODUCT_RETURN;
void print_value_on(outputStream* st) const;
void print_native_invokers();
void print_handler_table();
void print_nul_chk_table();
void print_recorded_oops();

View File

@ -39,12 +39,13 @@ class PcDesc {
int _obj_decode_offset;
enum {
PCDESC_reexecute = 1 << 0,
PCDESC_is_method_handle_invoke = 1 << 1,
PCDESC_return_oop = 1 << 2,
PCDESC_rethrow_exception = 1 << 3,
PCDESC_has_ea_local_in_scope = 1 << 4,
PCDESC_arg_escape = 1 << 5
PCDESC_reexecute = 1 << 0,
PCDESC_is_method_handle_invoke = 1 << 1,
PCDESC_return_oop = 1 << 2,
PCDESC_rethrow_exception = 1 << 3,
PCDESC_has_ea_local_in_scope = 1 << 4,
PCDESC_arg_escape = 1 << 5,
PCDESC_is_optimized_linkToNative = 1 << 6
};
int _flags;
@ -88,6 +89,9 @@ class PcDesc {
bool is_method_handle_invoke() const { return (_flags & PCDESC_is_method_handle_invoke) != 0; }
void set_is_method_handle_invoke(bool z) { set_flag(PCDESC_is_method_handle_invoke, z); }
bool is_optimized_linkToNative() const { return (_flags & PCDESC_is_optimized_linkToNative) != 0; }
void set_is_optimized_linkToNative(bool z) { set_flag(PCDESC_is_optimized_linkToNative, z); }
bool return_oop() const { return (_flags & PCDESC_return_oop) != 0; }
void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); }

View File

@ -39,11 +39,14 @@ class SimpleScopeDesc : public StackObj {
private:
Method* _method;
int _bci;
bool _is_optimized_linkToNative;
public:
SimpleScopeDesc(CompiledMethod* code, address pc) {
PcDesc* pc_desc = code->pc_desc_at(pc);
assert(pc_desc != NULL, "Must be able to find matching PcDesc");
// save this here so we only have to look up the PcDesc once
_is_optimized_linkToNative = pc_desc->is_optimized_linkToNative();
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
int ignore_sender = buffer.read_int();
_method = buffer.read_method();
@ -52,6 +55,7 @@ class SimpleScopeDesc : public StackObj {
Method* method() { return _method; }
int bci() { return _bci; }
bool is_optimized_linkToNative() { return _is_optimized_linkToNative; }
};
// ScopeDescs contain the information that makes source-level debugging of

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -140,6 +140,8 @@ public:
static void set_regName();
static VMReg vmStorageToVMReg(int type, int index);
#include CPU_HEADER(vmreg)
};

View File

@ -1190,9 +1190,10 @@ void CodeInstaller::record_scope(jint pc_offset, JVMCIObject position, ScopeMode
}
// has_ea_local_in_scope and arg_escape should be added to JVMCI
const bool is_opt_native = false;
const bool has_ea_local_in_scope = false;
const bool arg_escape = false;
_debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, is_mh_invoke, return_oop,
_debug_recorder->describe_scope(pc_offset, method, NULL, bci, reexecute, throw_exception, is_mh_invoke, is_opt_native, return_oop,
has_ea_local_in_scope, arg_escape,
locals_token, expressions_token, monitors_token);
}

View File

@ -1612,7 +1612,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
debug_info, dependencies, code_buffer,
frame_words, oop_map_set,
handler_table, implicit_exception_table,
compiler, comp_level,
compiler, comp_level, GrowableArrayView<BufferBlob*>::EMPTY,
speculations, speculations_len,
nmethod_mirror_index, nmethod_mirror_name, failed_speculations);

View File

@ -1490,6 +1490,9 @@ methodHandle Method::make_method_handle_intrinsic(vmIntrinsics::ID iid,
m->set_vtable_index(Method::nonvirtual_vtable_index);
m->link_method(m, CHECK_(empty));
if (iid == vmIntrinsics::_linkToNative) {
m->set_interpreter_entry(m->adapter()->get_i2c_entry());
}
if (log_is_enabled(Info, methodhandles) && (Verbose || WizardMode)) {
LogTarget(Info, methodhandles) lt;
LogStream ls(lt);

View File

@ -40,6 +40,8 @@
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/sharedRuntime.hpp"
#include "ci/ciNativeEntryPoint.hpp"
#include "utilities/debug.hpp"
// Utility function.
const TypeFunc* CallGenerator::tf() const {
@ -867,6 +869,31 @@ CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* c
}
}
class NativeCallGenerator : public CallGenerator {
private:
ciNativeEntryPoint* _nep;
public:
NativeCallGenerator(ciMethod* m, ciNativeEntryPoint* nep)
: CallGenerator(m), _nep(nep) {}
virtual JVMState* generate(JVMState* jvms);
};
JVMState* NativeCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Node* call = kit.make_native_call(tf(), method()->arg_size(), _nep); // -fallback, - nep
if (call == NULL) return NULL;
kit.C->print_inlining_update(this);
address addr = _nep->entry_point();
if (kit.C->log() != NULL) {
kit.C->log()->elem("l2n_intrinsification_success bci='%d' entry_point='" INTPTR_FORMAT "'", jvms->bci(), p2i(addr));
}
return kit.transfer_exceptions_into_jvms();
}
CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
GraphKit kit(jvms);
PhaseGVN& gvn = kit.gvn();
@ -989,6 +1016,20 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
}
break;
case vmIntrinsics::_linkToNative:
{
Node* nep = kit.argument(callee->arg_size() - 1);
if (nep->Opcode() == Op_ConP) {
const TypeOopPtr* oop_ptr = nep->bottom_type()->is_oopptr();
ciNativeEntryPoint* nep = oop_ptr->const_oop()->as_native_entry_point();
return new NativeCallGenerator(callee, nep);
} else {
print_inlining_failure(C, callee, jvms->depth() - 1, jvms->bci(),
"NativeEntryPoint not constant");
}
}
break;
default:
fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
break;

View File

@ -44,6 +44,7 @@
#include "opto/runtime.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/powerOfTwo.hpp"
#include "code/vmreg.hpp"
// Portions of code courtesy of Clifford Click
@ -1131,11 +1132,110 @@ void CallRuntimeNode::dump_spec(outputStream *st) const {
}
#endif
//=============================================================================
uint CallNativeNode::size_of() const { return sizeof(*this); }
bool CallNativeNode::cmp( const Node &n ) const {
CallNativeNode &call = (CallNativeNode&)n;
return CallNode::cmp(call) && !strcmp(_name,call._name)
&& _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
}
Node* CallNativeNode::match(const ProjNode *proj, const Matcher *matcher) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::I_O:
case TypeFunc::Memory:
return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
case TypeFunc::ReturnAdr:
case TypeFunc::FramePtr:
ShouldNotReachHere();
case TypeFunc::Parms: {
const Type* field_at_con = tf()->range()->field_at(proj->_con);
const BasicType bt = field_at_con->basic_type();
OptoReg::Name optoreg = OptoReg::as_OptoReg(_ret_regs.at(proj->_con - TypeFunc::Parms));
OptoRegPair regs;
if (bt == T_DOUBLE || bt == T_LONG) {
regs.set2(optoreg);
} else {
regs.set1(optoreg);
}
RegMask rm = RegMask(regs.first());
if(OptoReg::is_valid(regs.second()))
rm.Insert(regs.second());
return new MachProjNode(this, proj->_con, rm, field_at_con->ideal_reg());
}
case TypeFunc::Parms + 1: {
assert(tf()->range()->field_at(proj->_con) == Type::HALF, "Expected HALF");
assert(_ret_regs.at(proj->_con - TypeFunc::Parms) == VMRegImpl::Bad(), "Unexpected register for Type::HALF");
// 2nd half of doubles and longs
return new MachProjNode(this, proj->_con, RegMask::Empty, (uint) OptoReg::Bad);
}
default:
ShouldNotReachHere();
}
return NULL;
}
#ifndef PRODUCT
void CallNativeNode::print_regs(const GrowableArray<VMReg>& regs, outputStream* st) {
st->print("{ ");
for (int i = 0; i < regs.length(); i++) {
regs.at(i)->print_on(st);
if (i < regs.length() - 1) {
st->print(", ");
}
}
st->print(" } ");
}
void CallNativeNode::dump_spec(outputStream *st) const {
st->print("# ");
st->print("%s ", _name);
st->print("_arg_regs: ");
print_regs(_arg_regs, st);
st->print("_ret_regs: ");
print_regs(_ret_regs, st);
CallNode::dump_spec(st);
}
#endif
//------------------------------calling_convention-----------------------------
void CallRuntimeNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const {
SharedRuntime::c_calling_convention(sig_bt, parm_regs, /*regs2=*/nullptr, argcnt);
}
void CallNativeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
assert((tf()->domain()->cnt() - TypeFunc::Parms) == argcnt, "arg counts must match!");
#ifdef ASSERT
for (uint i = 0; i < argcnt; i++) {
assert(tf()->domain()->field_at(TypeFunc::Parms + i)->basic_type() == sig_bt[i], "types must match!");
}
#endif
for (uint i = 0; i < argcnt; i++) {
switch (sig_bt[i]) {
case T_BOOLEAN:
case T_CHAR:
case T_BYTE:
case T_SHORT:
case T_INT:
case T_FLOAT:
parm_regs[i].set1(_arg_regs.at(i));
break;
case T_LONG:
case T_DOUBLE:
assert((i + 1) < argcnt && sig_bt[i + 1] == T_VOID, "expecting half");
parm_regs[i].set2(_arg_regs.at(i));
break;
case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
assert(_arg_regs.at(i) == VMRegImpl::Bad(), "expecting bad reg");
parm_regs[i].set_bad();
break;
default:
ShouldNotReachHere();
break;
}
}
}
//=============================================================================
//------------------------------calling_convention-----------------------------

View File

@ -32,6 +32,7 @@
#include "opto/phaseX.hpp"
#include "opto/replacednodes.hpp"
#include "opto/type.hpp"
#include "utilities/growableArray.hpp"
// Portions of code courtesy of Clifford Click
@ -48,6 +49,7 @@ class CallDynamicJavaNode;
class CallRuntimeNode;
class CallLeafNode;
class CallLeafNoFPNode;
class CallNativeNode;
class AllocateNode;
class AllocateArrayNode;
class BoxLockNode;
@ -807,6 +809,42 @@ public:
#endif
};
//------------------------------CallNativeNode-----------------------------------
// Make a direct call into a foreign function with an arbitrary ABI
// safepoints
class CallNativeNode : public CallNode {
friend class MachCallNativeNode;
virtual bool cmp( const Node &n ) const;
virtual uint size_of() const;
static void print_regs(const GrowableArray<VMReg>& regs, outputStream* st);
public:
GrowableArray<VMReg> _arg_regs;
GrowableArray<VMReg> _ret_regs;
const int _shadow_space_bytes;
const bool _need_transition;
CallNativeNode(const TypeFunc* tf, address addr, const char* name,
const TypePtr* adr_type,
const GrowableArray<VMReg>& arg_regs,
const GrowableArray<VMReg>& ret_regs,
int shadow_space_bytes,
bool need_transition)
: CallNode(tf, addr, adr_type), _arg_regs(arg_regs),
_ret_regs(ret_regs), _shadow_space_bytes(shadow_space_bytes),
_need_transition(need_transition)
{
init_class_id(Class_CallNative);
_name = name;
}
virtual int Opcode() const;
virtual bool guaranteed_safepoint() { return _need_transition; }
virtual Node* match(const ProjNode *proj, const Matcher *m);
virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
//------------------------------CallLeafNoFPNode-------------------------------
// CallLeafNode, not using floating point or using it in the same manner as
// the generated code

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,6 +59,7 @@ macro(CallJava)
macro(CallLeaf)
macro(CallLeafNoFP)
macro(CallRuntime)
macro(CallNative)
macro(CallStaticJava)
macro(CastII)
macro(CastLL)

View File

@ -536,6 +536,7 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
_vector_reboxing_late_inlines(comp_arena(), 2, 0, NULL),
_late_inlines_pos(0),
_number_of_mh_late_inlines(0),
_native_invokers(comp_arena(), 1, 0, NULL),
_print_inlining_stream(NULL),
_print_inlining_list(NULL),
_print_inlining_idx(0),
@ -832,6 +833,7 @@ Compile::Compile( ciEnv* ci_env,
_for_igvn(NULL),
_warm_calls(NULL),
_number_of_mh_late_inlines(0),
_native_invokers(),
_print_inlining_stream(NULL),
_print_inlining_list(NULL),
_print_inlining_idx(0),
@ -2901,6 +2903,7 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
frc.inc_java_call_count(); // Count java call site;
case Op_CallRuntime:
case Op_CallLeaf:
case Op_CallNative:
case Op_CallLeafNoFP: {
assert (n->is_Call(), "");
CallNode *call = n->as_Call();
@ -4696,3 +4699,6 @@ void Compile::igv_print_method_to_network(const char* phase_name) {
}
#endif
void Compile::add_native_invoker(BufferBlob* stub) {
_native_invokers.append(stub);
}

View File

@ -385,6 +385,7 @@ class Compile : public Phase {
int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
GrowableArray<BufferBlob*> _native_invokers;
// Inlining may not happen in parse order which would make
// PrintInlining output confusing. Keep track of PrintInlining
@ -936,6 +937,10 @@ class Compile : public Phase {
_vector_reboxing_late_inlines.push(cg);
}
void add_native_invoker(BufferBlob* stub);
const GrowableArray<BufferBlob*>& native_invokers() const { return _native_invokers; }
void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
void remove_useless_nodes (GrowableArray<Node*>& node_list, Unique_Node_List &useful);

View File

@ -25,6 +25,9 @@
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaClasses.hpp"
#include "ci/ciNativeEntryPoint.hpp"
#include "ci/ciObjArray.hpp"
#include "asm/register.hpp"
#include "compiler/compileLog.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
@ -47,6 +50,7 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/growableArray.hpp"
//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
@ -2560,6 +2564,128 @@ Node* GraphKit::make_runtime_call(int flags,
}
// i2b
Node* GraphKit::sign_extend_byte(Node* in) {
Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
}
// i2s
Node* GraphKit::sign_extend_short(Node* in) {
Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
}
//-----------------------------make_native_call-------------------------------
Node* GraphKit::make_native_call(const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep) {
uint n_filtered_args = nargs - 2; // -fallback, -nep;
ResourceMark rm;
Node** argument_nodes = NEW_RESOURCE_ARRAY(Node*, n_filtered_args);
const Type** arg_types = TypeTuple::fields(n_filtered_args);
GrowableArray<VMReg> arg_regs(C->comp_arena(), n_filtered_args, n_filtered_args, VMRegImpl::Bad());
VMReg* argRegs = nep->argMoves();
{
for (uint vm_arg_pos = 0, java_arg_read_pos = 0;
vm_arg_pos < n_filtered_args; vm_arg_pos++) {
uint vm_unfiltered_arg_pos = vm_arg_pos + 1; // +1 to skip fallback handle argument
Node* node = argument(vm_unfiltered_arg_pos);
const Type* type = call_type->domain()->field_at(TypeFunc::Parms + vm_unfiltered_arg_pos);
VMReg reg = type == Type::HALF
? VMRegImpl::Bad()
: argRegs[java_arg_read_pos++];
argument_nodes[vm_arg_pos] = node;
arg_types[TypeFunc::Parms + vm_arg_pos] = type;
arg_regs.at_put(vm_arg_pos, reg);
}
}
uint n_returns = call_type->range()->cnt() - TypeFunc::Parms;
GrowableArray<VMReg> ret_regs(C->comp_arena(), n_returns, n_returns, VMRegImpl::Bad());
const Type** ret_types = TypeTuple::fields(n_returns);
VMReg* retRegs = nep->returnMoves();
{
for (uint vm_ret_pos = 0, java_ret_read_pos = 0;
vm_ret_pos < n_returns; vm_ret_pos++) { // 0 or 1
const Type* type = call_type->range()->field_at(TypeFunc::Parms + vm_ret_pos);
VMReg reg = type == Type::HALF
? VMRegImpl::Bad()
: retRegs[java_ret_read_pos++];
ret_regs.at_put(vm_ret_pos, reg);
ret_types[TypeFunc::Parms + vm_ret_pos] = type;
}
}
const TypeFunc* new_call_type = TypeFunc::make(
TypeTuple::make(TypeFunc::Parms + n_filtered_args, arg_types),
TypeTuple::make(TypeFunc::Parms + n_returns, ret_types)
);
address call_addr = nep->entry_point();
if (nep->need_transition()) {
BufferBlob* invoker = SharedRuntime::make_native_invoker(call_addr,
nep->shadow_space(),
arg_regs, ret_regs);
if (invoker == NULL) {
C->record_failure("native invoker not implemented on this platform");
return NULL;
}
C->add_native_invoker(invoker);
call_addr = invoker->code_begin();
}
assert(call_addr != NULL, "sanity");
CallNativeNode* call = new CallNativeNode(new_call_type, call_addr, nep->name(), TypePtr::BOTTOM,
arg_regs,
ret_regs,
nep->shadow_space(),
nep->need_transition());
if (call->_need_transition) {
add_safepoint_edges(call);
}
set_predefined_input_for_runtime_call(call);
for (uint i = 0; i < n_filtered_args; i++) {
call->init_req(i + TypeFunc::Parms, argument_nodes[i]);
}
Node* c = gvn().transform(call);
assert(c == call, "cannot disappear");
set_predefined_output_for_runtime_call(call);
Node* ret;
if (method() == NULL || method()->return_type()->basic_type() == T_VOID) {
ret = top();
} else {
ret = gvn().transform(new ProjNode(call, TypeFunc::Parms));
// Unpack native results if needed
// Need this method type since it's unerased
switch (nep->method_type()->rtype()->basic_type()) {
case T_CHAR:
ret = _gvn.transform(new AndINode(ret, _gvn.intcon(0xFFFF)));
break;
case T_BYTE:
ret = sign_extend_byte(ret);
break;
case T_SHORT:
ret = sign_extend_short(ret);
break;
default: // do nothing
break;
}
}
push_node(method()->return_type()->basic_type(), ret);
return call;
}
//------------------------------merge_memory-----------------------------------
// Merge memory from one path into the current memory state.
void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -798,6 +798,12 @@ class GraphKit : public Phase {
Node* parm2 = NULL, Node* parm3 = NULL,
Node* parm4 = NULL, Node* parm5 = NULL,
Node* parm6 = NULL, Node* parm7 = NULL);
Node* sign_extend_byte(Node* in);
Node* sign_extend_short(Node* in);
Node* make_native_call(const TypeFunc* call_type, uint nargs, ciNativeEntryPoint* nep);
enum { // flag values for make_runtime_call
RC_NO_FP = 1, // CallLeafNoFPNode
RC_NO_IO = 2, // do not hook IO edges

View File

@ -862,6 +862,12 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
// Calling Java code so use Java calling convention
save_policy = _matcher._register_save_policy;
break;
case Op_CallNative:
// We use the c reg save policy here since Panama
// only supports the C ABI currently.
// TODO compute actual save policy based on nep->abi
save_policy = _matcher._c_reg_save_policy;
break;
default:
ShouldNotReachHere();
@ -875,7 +881,14 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
// done for oops since idealreg2debugmask takes care of debug info
// references but there no way to handle oops differently than other
// pointers as far as the kill mask goes.
bool exclude_soe = op == Op_CallRuntime;
//
// Also, native callees can not save oops, so we kill the SOE registers
// here in case a native call has a safepoint. This doesn't work for
// RBP though, which seems to be special-cased elsewhere to always be
// treated as alive, so we instead manually save the location of RBP
// before doing the native call (see NativeInvokerGenerator::generate).
bool exclude_soe = op == Op_CallRuntime
|| (op == Op_CallNative && mcall->guaranteed_safepoint());
// If the call is a MethodHandle invoke, we need to exclude the
// register which is used to save the SP value over MH invokes from

View File

@ -818,6 +818,23 @@ void MachCallRuntimeNode::dump_spec(outputStream *st) const {
}
#endif
//=============================================================================
uint MachCallNativeNode::size_of() const { return sizeof(*this); }
bool MachCallNativeNode::cmp( const Node &n ) const {
MachCallNativeNode &call = (MachCallNativeNode&)n;
return MachCallNode::cmp(call) && !strcmp(_name,call._name)
&& _arg_regs == call._arg_regs && _ret_regs == call._ret_regs;
}
#ifndef PRODUCT
void MachCallNativeNode::dump_spec(outputStream *st) const {
st->print("%s ",_name);
st->print("_arg_regs: ");
CallNativeNode::print_regs(_arg_regs, st);
st->print("_ret_regs: ");
CallNativeNode::print_regs(_ret_regs, st);
MachCallNode::dump_spec(st);
}
#endif
//=============================================================================
// A shared JVMState for all HaltNodes. Indicates the start of debug info
// is at TypeFunc::Parms. Only required for SOE register spill handling -
// to indicate where the stack-slot-only debug info inputs begin.

View File

@ -31,6 +31,7 @@
#include "opto/multnode.hpp"
#include "opto/node.hpp"
#include "opto/regmask.hpp"
#include "utilities/growableArray.hpp"
class BiasedLockingCounters;
class BufferBlob;
@ -39,6 +40,7 @@ class JVMState;
class MachCallDynamicJavaNode;
class MachCallJavaNode;
class MachCallLeafNode;
class MachCallNativeNode;
class MachCallNode;
class MachCallRuntimeNode;
class MachCallStaticJavaNode;
@ -880,14 +882,16 @@ public:
const TypeFunc *_tf; // Function type
address _entry_point; // Address of the method being called
float _cnt; // Estimate of number of times called
bool _guaranteed_safepoint; // Do we need to observe safepoint?
const TypeFunc* tf() const { return _tf; }
const address entry_point() const { return _entry_point; }
const float cnt() const { return _cnt; }
void set_tf(const TypeFunc* tf) { _tf = tf; }
void set_entry_point(address p) { _entry_point = p; }
void set_cnt(float c) { _cnt = c; }
void set_tf(const TypeFunc* tf) { _tf = tf; }
void set_entry_point(address p) { _entry_point = p; }
void set_cnt(float c) { _cnt = c; }
void set_guaranteed_safepoint(bool b) { _guaranteed_safepoint = b; }
MachCallNode() : MachSafePointNode() {
init_class_id(Class_MachCall);
@ -905,6 +909,8 @@ public:
// Similar to cousin class CallNode::returns_pointer
bool returns_pointer() const;
bool guaranteed_safepoint() const { return _guaranteed_safepoint; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
@ -1004,6 +1010,25 @@ public:
}
};
class MachCallNativeNode: public MachCallNode {
virtual bool cmp( const Node &n ) const;
virtual uint size_of() const;
void print_regs(const GrowableArray<VMReg>& regs, outputStream* st) const;
public:
const char *_name;
GrowableArray<VMReg> _arg_regs;
GrowableArray<VMReg> _ret_regs;
MachCallNativeNode() : MachCallNode() {
init_class_id(Class_MachCallNative);
}
virtual int ret_addr_offset();
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
};
//------------------------------MachHaltNode-----------------------------------
// Machine-specific versions of halt nodes
class MachHaltNode : public MachReturnNode {

View File

@ -1236,9 +1236,10 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
// Copy data from the Ideal SafePoint to the machine version
mcall = m->as_MachCall();
mcall->set_tf( call->tf());
mcall->set_entry_point(call->entry_point());
mcall->set_cnt( call->cnt());
mcall->set_tf( call->tf());
mcall->set_entry_point( call->entry_point());
mcall->set_cnt( call->cnt());
mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
if( mcall->is_MachCallJava() ) {
MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
@ -1265,6 +1266,13 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
else if( mcall->is_MachCallRuntime() ) {
mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
}
else if( mcall->is_MachCallNative() ) {
MachCallNativeNode* mach_call_native = mcall->as_MachCallNative();
CallNativeNode* call_native = call->as_CallNative();
mach_call_native->_name = call_native->_name;
mach_call_native->_arg_regs = call_native->_arg_regs;
mach_call_native->_ret_regs = call_native->_ret_regs;
}
msfpt = mcall;
}
// This is a non-call safepoint
@ -1299,6 +1307,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
// These are usually backing store for register arguments for varargs.
if( call != NULL && call->is_CallRuntime() )
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
if( call != NULL && call->is_CallNative() )
out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call, call->as_CallNative()->_shadow_space_bytes);
// Do the normal argument list (parameters) register masks

View File

@ -51,6 +51,7 @@ class CallJavaNode;
class CallLeafNode;
class CallNode;
class CallRuntimeNode;
class CallNativeNode;
class CallStaticJavaNode;
class CastIINode;
class CastLLNode;
@ -93,6 +94,7 @@ class MachCallDynamicJavaNode;
class MachCallJavaNode;
class MachCallLeafNode;
class MachCallNode;
class MachCallNativeNode;
class MachCallRuntimeNode;
class MachCallStaticJavaNode;
class MachConstantBaseNode;
@ -629,6 +631,7 @@ public:
DEFINE_CLASS_ID(Lock, AbstractLock, 0)
DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
DEFINE_CLASS_ID(ArrayCopy, Call, 4)
DEFINE_CLASS_ID(CallNative, Call, 5)
DEFINE_CLASS_ID(MultiBranch, Multi, 1)
DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
DEFINE_CLASS_ID(Catch, PCTable, 0)
@ -652,6 +655,7 @@ public:
DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
DEFINE_CLASS_ID(MachCallNative, MachCall, 2)
DEFINE_CLASS_ID(MachBranch, Mach, 1)
DEFINE_CLASS_ID(MachIf, MachBranch, 0)
DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
@ -809,6 +813,7 @@ public:
DEFINE_CLASS_QUERY(Bool)
DEFINE_CLASS_QUERY(BoxLock)
DEFINE_CLASS_QUERY(Call)
DEFINE_CLASS_QUERY(CallNative)
DEFINE_CLASS_QUERY(CallDynamicJava)
DEFINE_CLASS_QUERY(CallJava)
DEFINE_CLASS_QUERY(CallLeaf)
@ -850,6 +855,7 @@ public:
DEFINE_CLASS_QUERY(Mach)
DEFINE_CLASS_QUERY(MachBranch)
DEFINE_CLASS_QUERY(MachCall)
DEFINE_CLASS_QUERY(MachCallNative)
DEFINE_CLASS_QUERY(MachCallDynamicJava)
DEFINE_CLASS_QUERY(MachCallJava)
DEFINE_CLASS_QUERY(MachCallLeaf)

View File

@ -35,6 +35,7 @@
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/allocation.hpp"
#include "opto/ad.hpp"
#include "opto/block.hpp"
#include "opto/c2compiler.hpp"
@ -1002,6 +1003,7 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
int safepoint_pc_offset = current_offset;
bool is_method_handle_invoke = false;
bool is_opt_native = false;
bool return_oop = false;
bool has_ea_local_in_scope = sfn->_has_ea_local_in_scope;
bool arg_escape = false;
@ -1020,6 +1022,8 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
is_method_handle_invoke = true;
}
arg_escape = mcall->as_MachCallJava()->_arg_escape;
} else if (mcall->is_MachCallNative()) {
is_opt_native = true;
}
// Check if a call returns an object.
@ -1140,10 +1144,22 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
// Now we can describe the scope.
methodHandle null_mh;
bool rethrow_exception = false;
C->debug_info()->describe_scope(safepoint_pc_offset, null_mh, scope_method, jvms->bci(),
jvms->should_reexecute(), rethrow_exception, is_method_handle_invoke,
return_oop, has_ea_local_in_scope, arg_escape,
locvals, expvals, monvals);
C->debug_info()->describe_scope(
safepoint_pc_offset,
null_mh,
scope_method,
jvms->bci(),
jvms->should_reexecute(),
rethrow_exception,
is_method_handle_invoke,
is_opt_native,
return_oop,
has_ea_local_in_scope,
arg_escape,
locvals,
expvals,
monvals
);
} // End jvms loop
// Mark the end of the scope set.
@ -1519,6 +1535,7 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
current_offset = cb->insts_size();
}
bool observe_safepoint = is_sfn;
// Remember the start of the last call in a basic block
if (is_mcall) {
MachCallNode *mcall = mach->as_MachCall();
@ -1529,15 +1546,11 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
// Save the return address
call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
if (mcall->is_MachCallLeaf()) {
is_mcall = false;
is_sfn = false;
}
observe_safepoint = mcall->guaranteed_safepoint();
}
// sfn will be valid whenever mcall is valid now because of inheritance
if (is_sfn || is_mcall) {
if (observe_safepoint) {
// Handle special safepoint nodes for synchronization
if (!is_mcall) {
MachSafePointNode *sfn = mach->as_MachSafePoint();
@ -1683,6 +1696,8 @@ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
n->emit(*cb, C->regalloc());
current_offset = cb->insts_size();
assert(!is_mcall || (call_returns[block->_pre_order] <= (uint) current_offset), "ret_addr_offset() not within emitted code");
// Above we only verified that there is enough space in the instruction section.
// However, the instruction may emit stubs that cause code buffer expansion.
// Bail out here if expansion failed due to a lack of code cache space.
@ -3390,7 +3405,8 @@ void PhaseOutput::install_code(ciMethod* target,
compiler,
has_unsafe_access,
SharedRuntime::is_wide_vector(C->max_vector_size()),
C->rtm_state());
C->rtm_state(),
C->native_invokers());
if (C->log() != NULL) { // Print code cache state into compiler log
C->log()->code_cache_state();

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "foreign_globals.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#define FOREIGN_ABI "jdk/internal/foreign/abi/"
static int field_offset(InstanceKlass* cls, const char* fieldname, Symbol* sigsym) {
TempNewSymbol fieldnamesym = SymbolTable::new_symbol(fieldname, (int)strlen(fieldname));
fieldDescriptor fd;
bool success = cls->find_field(fieldnamesym, sigsym, false, &fd);
assert(success, "Field not found");
return fd.offset();
}
static InstanceKlass* find_InstanceKlass(const char* name, TRAPS) {
TempNewSymbol sym = SymbolTable::new_symbol(name, (int)strlen(name));
Klass* k = SystemDictionary::resolve_or_null(sym, Handle(), Handle(), THREAD);
assert(k != nullptr, "Can not find class: %s", name);
return InstanceKlass::cast(k);
}
const ForeignGlobals& ForeignGlobals::instance() {
static ForeignGlobals globals; // thread-safe lazy init-once (since C++11)
return globals;
}
const ABIDescriptor ForeignGlobals::parse_abi_descriptor(jobject jabi) {
return instance().parse_abi_descriptor_impl(jabi);
}
const BufferLayout ForeignGlobals::parse_buffer_layout(jobject jlayout) {
return instance().parse_buffer_layout_impl(jlayout);
}
ForeignGlobals::ForeignGlobals() {
Thread* current_thread = Thread::current();
ResourceMark rm(current_thread);
// ABIDescriptor
InstanceKlass* k_ABI = find_InstanceKlass(FOREIGN_ABI "ABIDescriptor", current_thread);
const char* strVMSArray = "[[L" FOREIGN_ABI "VMStorage;";
Symbol* symVMSArray = SymbolTable::new_symbol(strVMSArray, (int)strlen(strVMSArray));
ABI.inputStorage_offset = field_offset(k_ABI, "inputStorage", symVMSArray);
ABI.outputStorage_offset = field_offset(k_ABI, "outputStorage", symVMSArray);
ABI.volatileStorage_offset = field_offset(k_ABI, "volatileStorage", symVMSArray);
ABI.stackAlignment_offset = field_offset(k_ABI, "stackAlignment", vmSymbols::int_signature());
ABI.shadowSpace_offset = field_offset(k_ABI, "shadowSpace", vmSymbols::int_signature());
// VMStorage
InstanceKlass* k_VMS = find_InstanceKlass(FOREIGN_ABI "VMStorage", current_thread);
VMS.index_offset = field_offset(k_VMS, "index", vmSymbols::int_signature());
// BufferLayout
InstanceKlass* k_BL = find_InstanceKlass(FOREIGN_ABI "BufferLayout", current_thread);
BL.size_offset = field_offset(k_BL, "size", vmSymbols::long_signature());
BL.arguments_next_pc_offset = field_offset(k_BL, "arguments_next_pc", vmSymbols::long_signature());
BL.stack_args_bytes_offset = field_offset(k_BL, "stack_args_bytes", vmSymbols::long_signature());
BL.stack_args_offset = field_offset(k_BL, "stack_args", vmSymbols::long_signature());
BL.input_type_offsets_offset = field_offset(k_BL, "input_type_offsets", vmSymbols::long_array_signature());
BL.output_type_offsets_offset = field_offset(k_BL, "output_type_offsets", vmSymbols::long_array_signature());
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_PRIMS_FOREIGN_GLOBALS
#define SHARE_PRIMS_FOREIGN_GLOBALS
#include "utilities/macros.hpp"
#include CPU_HEADER(foreign_globals)
class ForeignGlobals {
private:
struct {
int inputStorage_offset;
int outputStorage_offset;
int volatileStorage_offset;
int stackAlignment_offset;
int shadowSpace_offset;
} ABI;
struct {
int index_offset;
} VMS;
struct {
int size_offset;
int arguments_next_pc_offset;
int stack_args_bytes_offset;
int stack_args_offset;
int input_type_offsets_offset;
int output_type_offsets_offset;
} BL;
ForeignGlobals();
static const ForeignGlobals& instance();
template<typename R>
static R cast(oop theOop);
template<typename T, typename Func>
void loadArray(objArrayOop jarray, int type_index, GrowableArray<T>& array, Func converter) const;
const ABIDescriptor parse_abi_descriptor_impl(jobject jabi) const;
const BufferLayout parse_buffer_layout_impl(jobject jlayout) const;
public:
static const ABIDescriptor parse_abi_descriptor(jobject jabi);
static const BufferLayout parse_buffer_layout(jobject jlayout);
};
#endif // SHARE_PRIMS_FOREIGN_GLOBALS

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_PRIMS_FOREIGN_GLOBALS_INLINE_HPP
#define SHARE_PRIMS_FOREIGN_GLOBALS_INLINE_HPP
#include "prims/foreign_globals.hpp"
#include "oops/oopsHierarchy.hpp"
#include "oops/objArrayOop.hpp"
template<typename T>
static bool check_type(oop theOop) {
static_assert(sizeof(T) == 0, "No check_type specialization found for this type");
return false;
}
template<>
inline bool check_type<objArrayOop>(oop theOop) { return theOop->is_objArray(); }
template<>
inline bool check_type<typeArrayOop>(oop theOop) { return theOop->is_typeArray(); }
template<typename R>
R ForeignGlobals::cast(oop theOop) {
assert(check_type<R>(theOop), "Invalid cast");
return (R) theOop;
}
template<typename T, typename Func>
void ForeignGlobals::loadArray(objArrayOop jarray, int type_index, GrowableArray<T>& array, Func converter) const {
objArrayOop subarray = cast<objArrayOop>(jarray->obj_at(type_index));
int subarray_length = subarray->length();
for (int i = 0; i < subarray_length; i++) {
oop storage = subarray->obj_at(i);
jint index = storage->int_field(VMS.index_offset);
array.push(converter(index));
}
}
#endif // SHARE_PRIMS_FOREIGN_GLOBALS_INLINE_HPP

View File

@ -426,6 +426,7 @@ Symbol* MethodHandles::signature_polymorphic_intrinsic_name(vmIntrinsics::ID iid
case vmIntrinsics::_linkToStatic: return vmSymbols::linkToStatic_name();
case vmIntrinsics::_linkToSpecial: return vmSymbols::linkToSpecial_name();
case vmIntrinsics::_linkToInterface: return vmSymbols::linkToInterface_name();
case vmIntrinsics::_linkToNative: return vmSymbols::linkToNative_name();
default:
fatal("unexpected intrinsic id: %d %s", iid, vmIntrinsics::name_at(iid));
return 0;
@ -448,6 +449,7 @@ Bytecodes::Code MethodHandles::signature_polymorphic_intrinsic_bytecode(vmIntrin
int MethodHandles::signature_polymorphic_intrinsic_ref_kind(vmIntrinsics::ID iid) {
switch (iid) {
case vmIntrinsics::_invokeBasic: return 0;
case vmIntrinsics::_linkToNative: return 0;
case vmIntrinsics::_linkToVirtual: return JVM_REF_invokeVirtual;
case vmIntrinsics::_linkToStatic: return JVM_REF_invokeStatic;
case vmIntrinsics::_linkToSpecial: return JVM_REF_invokeSpecial;
@ -471,6 +473,7 @@ vmIntrinsics::ID MethodHandles::signature_polymorphic_name_id(Symbol* name) {
case VM_SYMBOL_ENUM_NAME(linkToStatic_name): return vmIntrinsics::_linkToStatic;
case VM_SYMBOL_ENUM_NAME(linkToSpecial_name): return vmIntrinsics::_linkToSpecial;
case VM_SYMBOL_ENUM_NAME(linkToInterface_name): return vmIntrinsics::_linkToInterface;
case VM_SYMBOL_ENUM_NAME(linkToNative_name): return vmIntrinsics::_linkToNative;
default: break;
}

View File

@ -118,7 +118,7 @@ class MethodHandles: AllStatic {
static bool has_member_arg(vmIntrinsics::ID iid) {
assert(is_signature_polymorphic(iid), "");
return (iid >= vmIntrinsics::_linkToVirtual &&
iid <= vmIntrinsics::_linkToInterface);
iid <= vmIntrinsics::_linkToNative);
}
static bool has_member_arg(Symbol* klass, Symbol* name) {
if ((klass == vmSymbols::java_lang_invoke_MethodHandle() ||

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "code/vmreg.hpp"
JNI_LEAF(jlong, NEP_vmStorageToVMReg(JNIEnv* env, jclass _unused, jint type, jint index))
return VMRegImpl::vmStorageToVMReg(type, index)->value();
JNI_END
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
static JNINativeMethod NEP_methods[] = {
{CC "vmStorageToVMReg", CC "(II)J", FN_PTR(NEP_vmStorageToVMReg)},
};
JNI_LEAF(void, JVM_RegisterNativeEntryPointMethods(JNIEnv *env, jclass NEP_class))
int status = env->RegisterNatives(NEP_class, NEP_methods, sizeof(NEP_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.invoke.NativeEntryPoint natives");
JNI_END

View File

@ -216,6 +216,11 @@ char* NativeLookup::long_jni_name(const methodHandle& method) {
extern "C" {
void JNICALL JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterReferencesMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterUpcallHandlerMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterProgrammableUpcallHandlerMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterProgrammableInvokerMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterNativeEntryPointMethods(JNIEnv *env, jclass unsafecls);
void JNICALL JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass);
void JNICALL JVM_RegisterWhiteBoxMethods(JNIEnv *env, jclass wbclass);
void JNICALL JVM_RegisterVectorSupportMethods(JNIEnv *env, jclass vsclass);
@ -231,6 +236,10 @@ extern "C" {
static JNINativeMethod lookup_special_native_methods[] = {
{ CC"Java_jdk_internal_misc_Unsafe_registerNatives", NULL, FN_PTR(JVM_RegisterJDKInternalMiscUnsafeMethods) },
{ CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
{ CC"Java_jdk_internal_foreign_abi_UpcallStubs_registerNatives", NULL, FN_PTR(JVM_RegisterUpcallHandlerMethods) },
{ CC"Java_jdk_internal_foreign_abi_ProgrammableUpcallHandler_registerNatives", NULL, FN_PTR(JVM_RegisterProgrammableUpcallHandlerMethods) },
{ CC"Java_jdk_internal_foreign_abi_ProgrammableInvoker_registerNatives", NULL, FN_PTR(JVM_RegisterProgrammableInvokerMethods) },
{ CC"Java_jdk_internal_invoke_NativeEntryPoint_registerNatives", NULL, FN_PTR(JVM_RegisterNativeEntryPointMethods) },
{ CC"Java_jdk_internal_perf_Perf_registerNatives", NULL, FN_PTR(JVM_RegisterPerfMethods) },
{ CC"Java_sun_hotspot_WhiteBox_registerNatives", NULL, FN_PTR(JVM_RegisterWhiteBoxMethods) },
{ CC"Java_jdk_internal_vm_vector_VectorSupport_registerNatives", NULL, FN_PTR(JVM_RegisterVectorSupportMethods)},

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "prims/universalNativeInvoker.hpp"
#include "runtime/interfaceSupport.inline.hpp"
ProgrammableInvoker::Generator::Generator(CodeBuffer* code, const ABIDescriptor* abi, const BufferLayout* layout)
: StubCodeGenerator(code),
_abi(abi),
_layout(layout) {}
void ProgrammableInvoker::invoke_native(Stub stub, address buff, JavaThread* thread) {
ThreadToNativeFromVM ttnfvm(thread);
stub(buff);
}
JNI_ENTRY(void, PI_invokeNative(JNIEnv* env, jclass _unused, jlong adapter_stub, jlong buff))
assert(thread->thread_state() == _thread_in_vm, "thread state is: %d", thread->thread_state());
ProgrammableInvoker::Stub stub = (ProgrammableInvoker::Stub) adapter_stub;
address c = (address) buff;
ProgrammableInvoker::invoke_native(stub, c, thread);
JNI_END
JNI_ENTRY(jlong, PI_generateAdapter(JNIEnv* env, jclass _unused, jobject abi, jobject layout))
return (jlong) ProgrammableInvoker::generate_adapter(abi, layout);
JNI_END
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
#define FOREIGN_ABI "Ljdk/internal/foreign/abi"
static JNINativeMethod PI_methods[] = {
{CC "invokeNative", CC "(JJ)V", FN_PTR(PI_invokeNative) },
{CC "generateAdapter", CC "(" FOREIGN_ABI "/ABIDescriptor;" FOREIGN_ABI "/BufferLayout;" ")J", FN_PTR(PI_generateAdapter)}
};
JNI_LEAF(void, JVM_RegisterProgrammableInvokerMethods(JNIEnv *env, jclass PI_class))
int status = env->RegisterNatives(PI_class, PI_methods, sizeof(PI_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.foreign.abi.programmable.ProgrammableInvoker natives");
JNI_END

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_VM_PRIMS_UNIVERSALNATIVEINVOKER_HPP
#define SHARE_VM_PRIMS_UNIVERSALNATIVEINVOKER_HPP
#include "runtime/stubCodeGenerator.hpp"
#include "prims/foreign_globals.hpp"
class ProgrammableInvoker: AllStatic {
private:
static constexpr CodeBuffer::csize_t native_invoker_size = 1024;
class Generator : StubCodeGenerator {
private:
const ABIDescriptor* _abi;
const BufferLayout* _layout;
public:
Generator(CodeBuffer* code, const ABIDescriptor* abi, const BufferLayout* layout);
void generate();
};
public:
using Stub = void(*)(address);
static void invoke_native(Stub stub, address buff, JavaThread* thread);
static address generate_adapter(jobject abi, jobject layout);
};
#endif // SHARE_VM_PRIMS_UNIVERSALNATIVEINVOKER_HPP

View File

@ -0,0 +1,112 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
#include "memory/resourceArea.hpp"
#include "prims/universalUpcallHandler.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#define FOREIGN_ABI "jdk/internal/foreign/abi/"
extern struct JavaVM_ main_vm;
JNI_ENTRY(void, ProgrammableUpcallHandler::upcall_helper(JNIEnv* env, jobject rec, address buff))
const UpcallMethod& upcall_method = instance().upcall_method;
ResourceMark rm(thread);
JavaValue result(T_VOID);
JavaCallArguments args(2); // long = 2 slots
args.push_jobject(rec);
args.push_long((jlong) buff);
JavaCalls::call_static(&result, upcall_method.klass, upcall_method.name, upcall_method.sig, &args, thread);
JNI_END
void ProgrammableUpcallHandler::attach_thread_and_do_upcall(jobject rec, address buff) {
Thread* thread = Thread::current_or_null();
bool should_detach = false;
JNIEnv* p_env = nullptr;
if (thread == nullptr) {
JavaVM_ *vm = (JavaVM *)(&main_vm);
jint result = vm->functions->AttachCurrentThread(vm, (void**) &p_env, nullptr);
guarantee(result == JNI_OK, "Could not attach thread for upcall. JNI error code: %d", result);
should_detach = true;
thread = Thread::current();
} else {
p_env = thread->as_Java_thread()->jni_environment();
}
upcall_helper(p_env, rec, buff);
if (should_detach) {
JavaVM_ *vm = (JavaVM *)(&main_vm);
vm->functions->DetachCurrentThread(vm);
}
}
const ProgrammableUpcallHandler& ProgrammableUpcallHandler::instance() {
static ProgrammableUpcallHandler handler;
return handler;
}
ProgrammableUpcallHandler::ProgrammableUpcallHandler() {
Thread* THREAD = Thread::current();
ResourceMark rm(THREAD);
Symbol* sym = SymbolTable::new_symbol(FOREIGN_ABI "ProgrammableUpcallHandler");
Klass* k = SystemDictionary::resolve_or_null(sym, Handle(), Handle(), CATCH);
k->initialize(CATCH);
upcall_method.klass = k;
upcall_method.name = SymbolTable::new_symbol("invoke");
upcall_method.sig = SymbolTable::new_symbol("(L" FOREIGN_ABI "ProgrammableUpcallHandler;J)V");
assert(upcall_method.klass->lookup_method(upcall_method.name, upcall_method.sig) != nullptr,
"Could not find upcall method: %s.%s%s", upcall_method.klass->external_name(),
upcall_method.name->as_C_string(), upcall_method.sig->as_C_string());
}
JNI_ENTRY(jlong, PUH_AllocateUpcallStub(JNIEnv *env, jobject rec, jobject abi, jobject buffer_layout))
Handle receiver(THREAD, JNIHandles::resolve(rec));
jobject global_rec = JNIHandles::make_global(receiver);
return (jlong) ProgrammableUpcallHandler::generate_upcall_stub(global_rec, abi, buffer_layout);
JNI_END
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
static JNINativeMethod PUH_methods[] = {
{CC "allocateUpcallStub", CC "(L" FOREIGN_ABI "ABIDescriptor;L" FOREIGN_ABI "BufferLayout;" ")J", FN_PTR(PUH_AllocateUpcallStub)},
};
/**
* This one function is exported, used by NativeLookup.
*/
JNI_LEAF(void, JVM_RegisterProgrammableUpcallHandlerMethods(JNIEnv *env, jclass PUH_class))
int status = env->RegisterNatives(PUH_class, PUH_methods, sizeof(PUH_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.foreign.abi.ProgrammableUpcallHandler natives");
JNI_END

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_VM_PRIMS_UNIVERSALUPCALLHANDLER_HPP
#define SHARE_VM_PRIMS_UNIVERSALUPCALLHANDLER_HPP
#include "prims/foreign_globals.hpp"
class ProgrammableUpcallHandler {
private:
static constexpr CodeBuffer::csize_t upcall_stub_size = 1024;
struct UpcallMethod {
Klass* klass;
Symbol* name;
Symbol* sig;
} upcall_method;
ProgrammableUpcallHandler();
static const ProgrammableUpcallHandler& instance();
static void upcall_helper(JNIEnv* env, jobject rec, address buff);
static void attach_thread_and_do_upcall(jobject rec, address buff);
public:
static address generate_upcall_stub(jobject rec, jobject abi, jobject buffer_layout);
};
#endif // SHARE_VM_PRIMS_UNIVERSALUPCALLHANDLER_HPP

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "code/codeCache.hpp"
JVM_ENTRY(static jboolean, UH_FreeUpcallStub0(JNIEnv *env, jobject _unused, jlong addr))
//acquire code cache lock
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
//find code blob
CodeBlob* cb = CodeCache::find_blob((char*)addr);
if (cb == NULL) {
return false;
}
//free global JNI handle
jobject* rec_ptr = (jobject*)(void*)cb -> content_begin();
JNIHandles::destroy_global(*rec_ptr);
//free code blob
CodeCache::free(cb);
return true;
JVM_END
#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
#define LANG "Ljava/lang/"
// These are the native methods on jdk.internal.foreign.NativeInvoker.
static JNINativeMethod UH_methods[] = {
{CC "freeUpcallStub0", CC "(J)Z", FN_PTR(UH_FreeUpcallStub0)}
};
/**
* This one function is exported, used by NativeLookup.
*/
JVM_LEAF(void, JVM_RegisterUpcallHandlerMethods(JNIEnv *env, jclass UH_class))
int status = env->RegisterNatives(UH_class, UH_methods, sizeof(UH_methods)/sizeof(JNINativeMethod));
guarantee(status == JNI_OK && !env->ExceptionOccurred(),
"register jdk.internal.foreign.abi.UpcallStubs natives");
JVM_END

View File

@ -2255,6 +2255,25 @@ WB_ENTRY(void, WB_CheckThreadObjOfTerminatingThread(JNIEnv* env, jobject wb, job
}
WB_END
WB_ENTRY(void, WB_VerifyFrames(JNIEnv* env, jobject wb, jboolean log))
intx tty_token = -1;
if (log) {
tty_token = ttyLocker::hold_tty();
tty->print_cr("[WhiteBox::VerifyFrames] Walking Frames");
}
for (StackFrameStream fst(JavaThread::current(), true, true); !fst.is_done(); fst.next()) {
frame* current_frame = fst.current();
if (log) {
current_frame->print_value();
}
current_frame->verify(fst.register_map());
}
if (log) {
tty->print_cr("[WhiteBox::VerifyFrames] Done");
ttyLocker::release_tty(tty_token);
}
WB_END
WB_ENTRY(jboolean, WB_IsJVMTIIncluded(JNIEnv* env, jobject wb))
return INCLUDE_JVMTI ? JNI_TRUE : JNI_FALSE;
WB_END
@ -2490,6 +2509,7 @@ static JNINativeMethod methods[] = {
{CC"handshakeWalkStack", CC"(Ljava/lang/Thread;Z)I", (void*)&WB_HandshakeWalkStack },
{CC"asyncHandshakeWalkStack", CC"(Ljava/lang/Thread;)V", (void*)&WB_AsyncHandshakeWalkStack },
{CC"checkThreadObjOfTerminatingThread", CC"(Ljava/lang/Thread;)V", (void*)&WB_CheckThreadObjOfTerminatingThread },
{CC"verifyFrames", CC"(Z)V", (void*)&WB_VerifyFrames },
{CC"addCompilerDirective", CC"(Ljava/lang/String;)I",
(void*)&WB_AddCompilerDirective },
{CC"removeCompilerDirective", CC"(I)V", (void*)&WB_RemoveCompilerDirective },

View File

@ -941,6 +941,7 @@ class CompiledArgumentOopFinder: public SignatureIterator {
// In LP64-land, the high-order bits are valid but unhelpful.
VMReg reg = _regs[_offset].first();
oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
assert(loc != NULL, "missing register map entry");
_f->do_oop(loc);
}

View File

@ -36,6 +36,8 @@
#include "memory/universe.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "prims/universalNativeInvoker.hpp"
#include "runtime/globals.hpp"
#include "runtime/atomic.hpp"
#include "runtime/flags/jvmFlag.hpp"
#include "runtime/handles.inline.hpp"

View File

@ -516,6 +516,11 @@ class SharedRuntime: AllStatic {
static address handle_unsafe_access(JavaThread* thread, address next_pc);
static BufferBlob* make_native_invoker(address call_target,
int shadow_space_bytes,
const GrowableArray<VMReg>& input_registers,
const GrowableArray<VMReg>& output_registers);
#ifndef PRODUCT
// Collect and print inline cache miss statistics

View File

@ -1522,6 +1522,7 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
declare_c2_type(CallDynamicJavaNode, CallJavaNode) \
declare_c2_type(CallRuntimeNode, CallNode) \
declare_c2_type(CallLeafNode, CallRuntimeNode) \
declare_c2_type(CallNativeNode, CallNode) \
declare_c2_type(CallLeafNoFPNode, CallLeafNode) \
declare_c2_type(AllocateNode, CallNode) \
declare_c2_type(AllocateArrayNode, AllocateNode) \
@ -1638,6 +1639,7 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
declare_c2_type(MachCallStaticJavaNode, MachCallJavaNode) \
declare_c2_type(MachCallDynamicJavaNode, MachCallJavaNode) \
declare_c2_type(MachCallRuntimeNode, MachCallNode) \
declare_c2_type(MachCallNativeNode, MachCallNode) \
declare_c2_type(MachHaltNode, MachReturnNode) \
declare_c2_type(MachTempNode, MachNode) \
declare_c2_type(MemNode, Node) \
@ -2553,6 +2555,7 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
declare_constant(vmIntrinsics::_linkToStatic) \
declare_constant(vmIntrinsics::_linkToSpecial) \
declare_constant(vmIntrinsics::_linkToInterface) \
declare_constant(vmIntrinsics::_linkToNative) \
\
/********************************/ \
/* Calling convention constants */ \

View File

@ -124,6 +124,23 @@ protected:
~GrowableArrayView() {}
public:
const static GrowableArrayView EMPTY;
bool operator==(const GrowableArrayView<E>& rhs) const {
if (_len != rhs._len)
return false;
for (int i = 0; i < _len; i++) {
if (at(i) != rhs.at(i)) {
return false;
}
}
return true;
}
bool operator!=(const GrowableArrayView<E>& rhs) const {
return !(*this == rhs);
}
E& at(int i) {
assert(0 <= i && i < _len, "illegal index");
return _data[i];
@ -295,7 +312,11 @@ public:
return min;
}
void print() {
size_t data_size_in_bytes() const {
return _len * sizeof(E);
}
void print() const {
tty->print("Growable Array " INTPTR_FORMAT, p2i(this));
tty->print(": length %d (_max %d) { ", _len, _max);
for (int i = 0; i < _len; i++) {
@ -305,6 +326,9 @@ public:
}
};
template<typename E>
const GrowableArrayView<E> GrowableArrayView<E>::EMPTY(nullptr, 0, 0);
// GrowableArrayWithAllocator extends the "view" with
// the capability to grow and deallocate the data array.
//

View File

@ -605,6 +605,10 @@ public abstract class MethodHandle implements Constable {
/*non-public*/
static native @PolymorphicSignature Object linkToInterface(Object... args) throws Throwable;
/** TODO */
@IntrinsicCandidate
/*non-public*/ static native @PolymorphicSignature Object linkToNative(Object... args) throws Throwable;
/**
* Performs a variable arity invocation, passing the arguments in the given array
* to the method handle, as if via an inexact {@link #invoke invoke} from a call site

View File

@ -27,6 +27,7 @@ package java.lang.invoke;
import jdk.internal.access.JavaLangInvokeAccess;
import jdk.internal.access.SharedSecrets;
import jdk.internal.invoke.NativeEntryPoint;
import jdk.internal.org.objectweb.asm.ClassWriter;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.reflect.CallerSensitive;
@ -1774,6 +1775,11 @@ abstract class MethodHandleImpl {
return VarHandles.makeMemoryAddressViewHandle(carrier, skipAlignmentMaskCheck, alignmentMask, order);
}
@Override
public MethodHandle nativeMethodHandle(NativeEntryPoint nep, MethodHandle fallback) {
return NativeMethodHandle.make(nep, fallback);
}
@Override
public VarHandle filterValue(VarHandle target, MethodHandle filterToTarget, MethodHandle filterFromTarget) {
return VarHandles.filterValue(target, filterToTarget, filterFromTarget);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,7 +86,8 @@ final class MethodTypeForm {
LF_TF = 18, // tryFinally
LF_LOOP = 19, // loop
LF_INVSPECIAL_IFC = 20, // DMH invokeSpecial of (private) interface method
LF_LIMIT = 21;
LF_INVNATIVE = 21, // NMH invokeNative
LF_LIMIT = 22;
/** Return the type corresponding uniquely (1-1) to this MT-form.
* It might have any primitive returns or arguments, but will have no references except Object.

View File

@ -0,0 +1,174 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.lang.invoke;
import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.invoke.NativeEntryPoint;
import static java.lang.invoke.LambdaForm.*;
import static java.lang.invoke.MethodHandleNatives.Constants.LM_TRUSTED;
import static java.lang.invoke.MethodHandleNatives.Constants.REF_invokeStatic;
import static java.lang.invoke.MethodHandleStatics.newInternalError;
/**
* This class models a method handle to a native function. A native method handle is made up of a {@link NativeEntryPoint},
* which is used to capture the characteristics of the native call (such as calling convention to be used,
* or whether a native transition is required) and a <em>fallback</em> method handle, which can be used
* when intrinsification of this method handle is not possible.
*/
/*non-public*/ class NativeMethodHandle extends MethodHandle {
final NativeEntryPoint nep;
final MethodHandle fallback;
private NativeMethodHandle(MethodType type, LambdaForm form, MethodHandle fallback, NativeEntryPoint nep) {
super(type, form);
this.fallback = fallback;
this.nep = nep;
}
/**
* Creates a new native method handle with given {@link NativeEntryPoint} and <em>fallback</em> method handle.
*/
public static MethodHandle make(NativeEntryPoint nep, MethodHandle fallback) {
MethodType type = nep.type();
if (!allTypesPrimitive(type))
throw new IllegalArgumentException("Type must only contain primitives: " + type);
if (type != fallback.type())
throw new IllegalArgumentException("Type of fallback must match");
LambdaForm lform = preparedLambdaForm(type);
return new NativeMethodHandle(type, lform, fallback, nep);
}
private static boolean allTypesPrimitive(MethodType type) {
if (!type.returnType().isPrimitive())
return false;
for (Class<?> pType : type.parameterArray()) {
if (!pType.isPrimitive())
return false;
}
return true;
}
private static final MemberName.Factory IMPL_NAMES = MemberName.getFactory();
private static LambdaForm preparedLambdaForm(MethodType mtype) {
int id = MethodTypeForm.LF_INVNATIVE;
mtype = mtype.basicType();
LambdaForm lform = mtype.form().cachedLambdaForm(id);
if (lform != null) return lform;
lform = makePreparedLambdaForm(mtype);
return mtype.form().setCachedLambdaForm(id, lform);
}
private static LambdaForm makePreparedLambdaForm(MethodType mtype) {
MethodType linkerType = mtype.insertParameterTypes(0, MethodHandle.class)
.appendParameterTypes(Object.class);
MemberName linker = new MemberName(MethodHandle.class, "linkToNative", linkerType, REF_invokeStatic);
try {
linker = IMPL_NAMES.resolveOrFail(REF_invokeStatic, linker, null, LM_TRUSTED, NoSuchMethodException.class);
} catch (ReflectiveOperationException ex) {
throw newInternalError(ex);
}
final int NMH_THIS = 0;
final int ARG_BASE = 1;
final int ARG_LIMIT = ARG_BASE + mtype.parameterCount();
int nameCursor = ARG_LIMIT;
final int GET_FALLBACK = nameCursor++;
final int GET_NEP = nameCursor++;
final int LINKER_CALL = nameCursor++;
LambdaForm.Name[] names = arguments(nameCursor - ARG_LIMIT, mtype.invokerType());
assert (names.length == nameCursor);
names[GET_FALLBACK] = new LambdaForm.Name(Lazy.NF_internalFallback, names[NMH_THIS]);
names[GET_NEP] = new LambdaForm.Name(Lazy.NF_internalNativeEntryPoint, names[NMH_THIS]);
Object[] outArgs = new Object[linkerType.parameterCount()];
// Need to pass fallback here so we can call it without destroying the receiver register!!
outArgs[0] = names[GET_FALLBACK];
System.arraycopy(names, ARG_BASE, outArgs, 1, mtype.parameterCount());
outArgs[outArgs.length - 1] = names[GET_NEP];
names[LINKER_CALL] = new LambdaForm.Name(linker, outArgs);
LambdaForm lform = new LambdaForm(ARG_LIMIT, names, LAST_RESULT);
// This is a tricky bit of code. Don't send it through the LF interpreter.
lform.compileToBytecode();
return lform;
}
final
@Override
MethodHandle copyWith(MethodType mt, LambdaForm lf) {
assert (this.getClass() == NativeMethodHandle.class); // must override in subclasses
return new NativeMethodHandle(mt, lf, fallback, nep);
}
@Override
BoundMethodHandle rebind() {
return BoundMethodHandle.makeReinvoker(this);
}
@ForceInline
static Object internalNativeEntryPoint(Object mh) {
return ((NativeMethodHandle)mh).nep;
}
@ForceInline
static MethodHandle internalFallback(Object mh) {
return ((NativeMethodHandle)mh).fallback;
}
/**
* Pre-initialized NamedFunctions for bootstrapping purposes.
* Factored in an inner class to delay initialization until first usage.
*/
private static class Lazy {
static final NamedFunction
NF_internalNativeEntryPoint;
static final NamedFunction
NF_internalFallback;
static {
try {
Class<NativeMethodHandle> THIS_CLASS = NativeMethodHandle.class;
NamedFunction[] nfs = new NamedFunction[]{
NF_internalNativeEntryPoint = new NamedFunction(
THIS_CLASS.getDeclaredMethod("internalNativeEntryPoint", Object.class)),
NF_internalFallback = new NamedFunction(
THIS_CLASS.getDeclaredMethod("internalFallback", Object.class))
};
for (NamedFunction nf : nfs) {
// Each nf must be statically invocable or we get tied up in our bootstraps.
assert (InvokerBytecodeGenerator.isStaticallyInvocable(nf.member)) : nf;
nf.resolve();
}
} catch (ReflectiveOperationException ex) {
throw newInternalError(ex);
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,8 @@
package jdk.internal.access;
import jdk.internal.invoke.NativeEntryPoint;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
import java.lang.invoke.VarHandle;
@ -119,4 +121,15 @@ public interface JavaLangInvokeAccess {
* Used by {@code jdk.incubator.foreign.MemoryHandles}.
*/
VarHandle insertCoordinates(VarHandle target, int pos, Object... values);
/**
* Returns a native method handle with given arguments as fallback and steering info.
*
* Will allow JIT to intrinsify.
*
* @param nep the native entry point
* @param fallback the fallback handle
* @return the native method handle
*/
MethodHandle nativeMethodHandle(NativeEntryPoint nep, MethodHandle fallback);
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package jdk.internal.access.foreign;
public interface NativeLibraryProxy {
long lookup(String name) throws NoSuchMethodException;
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.invoke;
public interface ABIDescriptorProxy {
int shadowSpaceBytes();
}

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.invoke;
import java.lang.invoke.MethodType;
import java.util.Objects;
/**
* This class describes a native call, including arguments/return shuffle moves, PC entry point and
* various other info which are relevant when the call will be intrinsified by C2.
*/
public class NativeEntryPoint {
static {
registerNatives();
}
private final long addr;
private final int shadowSpace;
// encoded as VMRegImpl*
private final long[] argMoves;
private final long[] returnMoves;
private final boolean needTransition;
private final MethodType methodType; // C2 sees erased version (byte -> int), so need this explicitly
private final String name;
private NativeEntryPoint(long addr, int shadowSpace, long[] argMoves, long[] returnMoves,
boolean needTransition, MethodType methodType, String name) {
this.addr = addr;
this.shadowSpace = shadowSpace;
this.argMoves = Objects.requireNonNull(argMoves);
this.returnMoves = Objects.requireNonNull(returnMoves);
this.needTransition = needTransition;
this.methodType = methodType;
this.name = name;
}
public static NativeEntryPoint make(long addr, String name, ABIDescriptorProxy abi, VMStorageProxy[] argMoves, VMStorageProxy[] returnMoves,
boolean needTransition, MethodType methodType) {
if (returnMoves.length > 1) {
throw new IllegalArgumentException("Multiple register return not supported");
}
return new NativeEntryPoint(
addr, abi.shadowSpaceBytes(), encodeVMStorages(argMoves), encodeVMStorages(returnMoves), needTransition, methodType, name);
}
private static long[] encodeVMStorages(VMStorageProxy[] moves) {
long[] out = new long[moves.length];
for (int i = 0; i < moves.length; i++) {
out[i] = vmStorageToVMReg(moves[i].type(), moves[i].index());
}
return out;
}
private static native long vmStorageToVMReg(int type, int index);
public MethodType type() {
return methodType;
}
private static native void registerNatives();
}

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.internal.invoke;
public interface VMStorageProxy {
int type();
int index();
}

View File

@ -384,6 +384,20 @@ public final class NativeLibraries {
}
}
public static final NativeLibrary defaultLibrary = new NativeLibraryImpl(Object.class, "<default>", true, true) {
@Override
boolean open() {
throw new UnsupportedOperationException("Cannot load default library");
}
@Override
public long find(String name) {
return NativeLibraries.findEntryInProcess(name);
}
};
/*
* The run() method will be invoked when this class loader becomes
* phantom reachable to unload the native library.
@ -464,4 +478,5 @@ public final class NativeLibraries {
private static native void unload(String name, boolean isBuiltin, boolean isJNI, long handle);
private static native String findBuiltinLib(String name);
private static native long findEntry0(NativeLibraryImpl lib, String name);
private static native long findEntryInProcess(String name);
}

View File

@ -164,7 +164,8 @@ module java.base {
exports jdk.internal.loader to
java.instrument,
java.logging,
java.naming;
java.naming,
jdk.incubator.foreign;
exports jdk.internal.jmod to
jdk.compiler,
jdk.jlink;
@ -351,6 +352,8 @@ module java.base {
java.prefs;
exports sun.util.resources to
jdk.localedata;
exports jdk.internal.invoke to
jdk.incubator.foreign;
// the service types defined by the APIs in this module

View File

@ -246,6 +246,29 @@ Java_jdk_internal_loader_NativeLibraries_findEntry0
return res;
}
/*
* Class: jdk_internal_loader_NativeLibraries
* Method: findEntryInProcess
* Signature: (Ljava/lang/String;)J
*/
JNIEXPORT jlong JNICALL
Java_jdk_internal_loader_NativeLibraries_findEntryInProcess
(JNIEnv *env, jclass cls, jstring name)
{
const char *cname;
jlong res;
if (!initIDs(env))
return jlong_zero;
cname = (*env)->GetStringUTFChars(env, name, 0);
if (cname == 0)
return jlong_zero;
res = ptr_to_jlong(findEntryInProcess(cname));
(*env)->ReleaseStringUTFChars(env, name, cname);
return res;
}
/*
* Class: jdk_internal_loader_NativeLibraries
* Method: findBuiltinLib

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -333,6 +333,8 @@ JNIEXPORT void InitializeEncoding(JNIEnv *env, const char *name);
void* getProcessHandle();
void* findEntryInProcess(const char* name);
void buildJniFunctionName(const char *sym, const char *cname,
char *jniEntryName);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@
#include <errno.h>
#include <string.h>
#include "jvm.h"
#include "jni.h"
#include "jni_util.h"
#include "dlfcn.h"
@ -50,6 +51,10 @@ void* getProcessHandle() {
return procHandle;
}
void* findEntryInProcess(const char* name) {
return JVM_FindLibraryEntry(RTLD_DEFAULT, name);
}
void buildJniFunctionName(const char *sym, const char *cname,
char *jniEntryName) {
strcpy(jniEntryName, sym);

View File

@ -26,6 +26,7 @@
#include <stdlib.h>
#include <string.h>
#include <windows.h>
#include <psapi.h>
#include <locale.h>
#include "jni.h"
@ -35,6 +36,31 @@ void* getProcessHandle() {
return (void*)GetModuleHandle(NULL);
}
/*
* Windows doesn't have an RTLD_DEFAULT equivalent, so in stead we have to
* iterate over all the modules loaded by the process to implement the
* default library behaviour.
*/
void* findEntryInProcess(const char* name) {
HANDLE hProcess = GetCurrentProcess();
HMODULE hMods[1024];
DWORD cbNeeded; // array size in bytes
// first come, first served
if (EnumProcessModules(hProcess, hMods, sizeof(hMods), &cbNeeded)) {
for (int i = 0; i < (cbNeeded / sizeof(HMODULE)); i++) {
HMODULE mod = hMods[i];
FARPROC proc = GetProcAddress(mod, name);
if(proc != NULL) {
return proc;
}
}
}
return NULL;
}
/*
* Windows symbols can be simple like JNI_OnLoad or __stdcall format
* like _JNI_OnLoad@8. We need to handle both.

Some files were not shown because too many files have changed in this diff Show More