mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-02 22:48:35 +00:00
8042309: Some bugfixes for the ppc64 port
Reviewed-by: kvn
This commit is contained in:
parent
142ed91465
commit
9d479b16de
@ -1,3 +1,4 @@
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2012, 2014 SAP AG. All rights reserved.
|
||||
@ -403,7 +404,7 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_ov
|
||||
BLOCK_COMMENT("compute_interpreter_state {");
|
||||
|
||||
// access_flags = method->access_flags();
|
||||
// TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
|
||||
// TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
|
||||
__ lwa(access_flags, method_(access_flags));
|
||||
|
||||
// parameter_count = method->constMethod->size_of_parameters();
|
||||
@ -1053,7 +1054,7 @@ address CppInterpreterGenerator::generate_native_entry(void) {
|
||||
assert(access_flags->is_nonvolatile(),
|
||||
"access_flags must be in a non-volatile register");
|
||||
// Type check.
|
||||
// TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
|
||||
// TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
|
||||
__ lwz(access_flags, method_(access_flags));
|
||||
|
||||
// We don't want to reload R19_method and access_flags after calls
|
||||
@ -1836,7 +1837,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
// Interpreter state fields.
|
||||
const Register msg = R24_tmp4;
|
||||
|
||||
// MethodOop fields.
|
||||
// Method fields.
|
||||
const Register parameter_count = R25_tmp5;
|
||||
const Register result_index = R26_tmp6;
|
||||
|
||||
@ -2021,7 +2022,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
__ add(R17_tos, R17_tos, parameter_count);
|
||||
|
||||
// Result stub address array index
|
||||
// TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
|
||||
// TODO: PPC port: assert(4 == sizeof(AccessFlags), "unexpected field size");
|
||||
__ lwa(result_index, method_(result_index));
|
||||
|
||||
__ li(msg, BytecodeInterpreter::method_resume);
|
||||
@ -2707,7 +2708,7 @@ address CppInterpreterGenerator::generate_normal_entry(void) {
|
||||
__ ld(R3_ARG1, state_(_result._osr._osr_buf));
|
||||
__ mtctr(R12_scratch2);
|
||||
|
||||
// Load method oop, gc may move it during execution of osr'd method.
|
||||
// Load method, gc may move it during execution of osr'd method.
|
||||
__ ld(R22_tmp2, state_(_method));
|
||||
// Load message 'call_method'.
|
||||
__ li(R23_tmp3, BytecodeInterpreter::call_method);
|
||||
|
||||
@ -26,6 +26,8 @@
|
||||
#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
|
||||
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
// Inline functions for ppc64 frames:
|
||||
|
||||
// Find codeblob and set deopt_state.
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
|
||||
#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
|
||||
|
||||
#include "assembler_ppc.inline.hpp"
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
|
||||
// This file specializes the assembler with interpreter-specific macros.
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
|
||||
@ -139,32 +139,16 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// Signature is in R3_RET. Signature is callee saved.
|
||||
__ mr(signature, R3_RET);
|
||||
|
||||
// Reload method, it may have moved.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ ld(R19_method, 0, target_sp);
|
||||
__ ld(R19_method, _ijava_state_neg(method), R19_method);
|
||||
#endif
|
||||
|
||||
// Get the result handler.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
|
||||
|
||||
// Reload method, it may have moved.
|
||||
#ifdef CC_INTERP
|
||||
__ ld(R19_method, state_(_method));
|
||||
#else
|
||||
__ ld(R19_method, 0, target_sp);
|
||||
__ ld(R19_method, _ijava_state_neg(method), R19_method);
|
||||
#endif
|
||||
|
||||
{
|
||||
Label L;
|
||||
// test if static
|
||||
// _access_flags._flags must be at offset 0.
|
||||
// TODO PPC port: requires change in shared code.
|
||||
//assert(in_bytes(AccessFlags::flags_offset()) == 0,
|
||||
// "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
|
||||
// "MethodDesc._access_flags == MethodDesc._access_flags._flags");
|
||||
// _access_flags must be a 32 bit value.
|
||||
assert(sizeof(AccessFlags) == 4, "wrong size");
|
||||
__ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
|
||||
|
||||
@ -32,7 +32,7 @@
|
||||
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
// we don't have fast jni accessors.
|
||||
// We don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
@ -57,12 +57,12 @@ address JNI_FastGetField::generate_fast_get_int_field() {
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_long_field() {
|
||||
// we don't have fast jni accessors.
|
||||
// We don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
|
||||
// e don't have fast jni accessors.
|
||||
// We don't have fast jni accessors.
|
||||
return (address) -1;
|
||||
}
|
||||
|
||||
|
||||
@ -898,7 +898,7 @@ source_hpp %{
|
||||
// To keep related declarations/definitions/uses close together,
|
||||
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
|
||||
|
||||
// Returns true if Node n is followed by a MemBar node that
|
||||
// Returns true if Node n is followed by a MemBar node that
|
||||
// will do an acquire. If so, this node must not do the acquire
|
||||
// operation.
|
||||
bool followed_by_acquire(const Node *n);
|
||||
@ -908,7 +908,7 @@ source %{
|
||||
|
||||
// Optimize load-acquire.
|
||||
//
|
||||
// Check if acquire is unnecessary due to following operation that does
|
||||
// Check if acquire is unnecessary due to following operation that does
|
||||
// acquire anyways.
|
||||
// Walk the pattern:
|
||||
//
|
||||
@ -919,12 +919,12 @@ source %{
|
||||
// Proj(ctrl) Proj(mem)
|
||||
// | |
|
||||
// MemBarRelease/Volatile
|
||||
//
|
||||
//
|
||||
bool followed_by_acquire(const Node *load) {
|
||||
assert(load->is_Load(), "So far implemented only for loads.");
|
||||
|
||||
// Find MemBarAcquire.
|
||||
const Node *mba = NULL;
|
||||
const Node *mba = NULL;
|
||||
for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
|
||||
const Node *out = load->fast_out(i);
|
||||
if (out->Opcode() == Op_MemBarAcquire) {
|
||||
@ -937,7 +937,7 @@ bool followed_by_acquire(const Node *load) {
|
||||
|
||||
// Find following MemBar node.
|
||||
//
|
||||
// The following node must be reachable by control AND memory
|
||||
// The following node must be reachable by control AND memory
|
||||
// edge to assure no other operations are in between the two nodes.
|
||||
//
|
||||
// So first get the Proj node, mem_proj, to use it to iterate forward.
|
||||
@ -1135,6 +1135,7 @@ class CallStubImpl {
|
||||
|
||||
public:
|
||||
|
||||
// Emit call stub, compiled java to interpreter.
|
||||
static void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
|
||||
|
||||
// Size of call trampoline stub.
|
||||
@ -2755,7 +2756,7 @@ encode %{
|
||||
// inputs for new nodes
|
||||
m1->add_req(NULL, n_toc);
|
||||
m2->add_req(NULL, m1);
|
||||
|
||||
|
||||
// operands for new nodes
|
||||
m1->_opnds[0] = new (C) iRegPdstOper(); // dst
|
||||
m1->_opnds[1] = op_src; // src
|
||||
@ -2763,29 +2764,29 @@ encode %{
|
||||
m2->_opnds[0] = new (C) iRegPdstOper(); // dst
|
||||
m2->_opnds[1] = op_src; // src
|
||||
m2->_opnds[2] = new (C) iRegLdstOper(); // base
|
||||
|
||||
|
||||
// Initialize ins_attrib TOC fields.
|
||||
m1->_const_toc_offset = -1;
|
||||
m2->_const_toc_offset_hi_node = m1;
|
||||
|
||||
|
||||
// Register allocation for new nodes.
|
||||
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
|
||||
nodes->push(m1);
|
||||
nodes->push(m2);
|
||||
assert(m2->bottom_type()->isa_ptr(), "must be ptr");
|
||||
} else {
|
||||
loadConPNode *m2 = new (C) loadConPNode();
|
||||
|
||||
|
||||
// inputs for new nodes
|
||||
m2->add_req(NULL, n_toc);
|
||||
|
||||
|
||||
// operands for new nodes
|
||||
m2->_opnds[0] = new (C) iRegPdstOper(); // dst
|
||||
m2->_opnds[1] = op_src; // src
|
||||
m2->_opnds[2] = new (C) iRegPdstOper(); // toc
|
||||
|
||||
|
||||
// Register allocation for new nodes.
|
||||
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
@ -2977,17 +2978,17 @@ encode %{
|
||||
n_sub_base->_opnds[1] = op_crx;
|
||||
n_sub_base->_opnds[2] = op_src;
|
||||
n_sub_base->_bottom_type = _bottom_type;
|
||||
|
||||
|
||||
n_shift->add_req(n_region, n_sub_base);
|
||||
n_shift->_opnds[0] = op_dst;
|
||||
n_shift->_opnds[1] = op_dst;
|
||||
n_shift->_bottom_type = _bottom_type;
|
||||
|
||||
|
||||
ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
||||
ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
|
||||
nodes->push(n_move);
|
||||
nodes->push(n_compare);
|
||||
nodes->push(n_sub_base);
|
||||
@ -3064,20 +3065,20 @@ encode %{
|
||||
} else {
|
||||
// before Power 7
|
||||
cond_add_baseNode *n_add_base = new (C) cond_add_baseNode();
|
||||
|
||||
|
||||
n_add_base->add_req(n_region, n_compare, n_shift);
|
||||
n_add_base->_opnds[0] = op_dst;
|
||||
n_add_base->_opnds[1] = op_crx;
|
||||
n_add_base->_opnds[2] = op_dst;
|
||||
n_add_base->_bottom_type = _bottom_type;
|
||||
|
||||
|
||||
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
||||
ra_->set_oop(n_add_base, true);
|
||||
|
||||
|
||||
ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
||||
ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
|
||||
|
||||
nodes->push(n_compare);
|
||||
nodes->push(n_shift);
|
||||
nodes->push(n_add_base);
|
||||
@ -3634,11 +3635,11 @@ encode %{
|
||||
// Req...
|
||||
for (uint i = 0; i < req(); ++i) {
|
||||
// The expanded node does not need toc any more.
|
||||
// Add the inline cache constant here instead. This expresses the
|
||||
// Add the inline cache constant here instead. This expresses the
|
||||
// register of the inline cache must be live at the call.
|
||||
// Else we would have to adapt JVMState by -1.
|
||||
if (i == mach_constant_base_node_input()) {
|
||||
call->add_req(loadConLNodes_IC._last);
|
||||
call->add_req(loadConLNodes_IC._last);
|
||||
} else {
|
||||
call->add_req(in(i));
|
||||
}
|
||||
@ -3666,6 +3667,8 @@ encode %{
|
||||
%}
|
||||
|
||||
// Compound version of call dynamic
|
||||
// Toc is only passed so that it can be used in ins_encode statement.
|
||||
// In the code we have to use $constanttablebase.
|
||||
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
MacroAssembler _masm(&cbuf);
|
||||
@ -3673,14 +3676,17 @@ encode %{
|
||||
|
||||
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
|
||||
#if 0
|
||||
int vtable_index = this->_vtable_index;
|
||||
if (_vtable_index < 0) {
|
||||
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
|
||||
assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
|
||||
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
|
||||
AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
|
||||
|
||||
// Virtual call relocation will point to ic load.
|
||||
address virtual_call_meta_addr = __ pc();
|
||||
__ load_const_from_method_toc(ic_reg, meta, Rtoc);
|
||||
// Load a clear inline cache.
|
||||
AddressLiteral empty_ic((address) Universe::non_oop_word());
|
||||
__ load_const_from_method_toc(ic_reg, empty_ic, Rtoc);
|
||||
// CALL to fixup routine. Fixup routine uses ScopeDesc info
|
||||
// to determine who we intended to call.
|
||||
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
|
||||
@ -3713,7 +3719,6 @@ encode %{
|
||||
"Fix constant in ret_addr_offset()");
|
||||
}
|
||||
#endif
|
||||
guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
|
||||
Unimplemented(); // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
|
||||
%}
|
||||
|
||||
@ -5439,7 +5444,7 @@ instruct loadI_ac(iRegIdst dst, memory mem) %{
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
// Match loading integer and casting it to unsigned int in
|
||||
// Match loading integer and casting it to unsigned int in
|
||||
// long register.
|
||||
// LoadI + ConvI2L + AndL 0xffffffff.
|
||||
instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
|
||||
@ -6081,7 +6086,7 @@ instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// This needs a match rule so that build_oop_map knows this is
|
||||
// This needs a match rule so that build_oop_map knows this is
|
||||
// not a narrow oop.
|
||||
instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
||||
match(Set dst src1);
|
||||
@ -6705,7 +6710,7 @@ instruct cond_set_0_oop(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// This is a Power7 instruction for which no machine description exists.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
__ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -6850,7 +6855,7 @@ instruct cond_set_0_ptr(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// This is a Power7 instruction for which no machine description exists.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
__ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7067,7 +7072,7 @@ instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc s
|
||||
n1->_bottom_type = _bottom_type;
|
||||
|
||||
decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
|
||||
n2->add_req(n_region, n2);
|
||||
n2->add_req(n_region, n1);
|
||||
n2->_opnds[0] = op_dst;
|
||||
n2->_opnds[1] = op_dst;
|
||||
n2->_bottom_type = _bottom_type;
|
||||
@ -7202,7 +7207,7 @@ instruct membar_volatile() %{
|
||||
// inline_unsafe_load_store).
|
||||
//
|
||||
// Add this node again if we found a good solution for inline_unsafe_load_store().
|
||||
// Don't forget to look at the implementation of post_store_load_barrier again,
|
||||
// Don't forget to look at the implementation of post_store_load_barrier again,
|
||||
// we did other fixes in that method.
|
||||
//instruct unnecessary_membar_volatile() %{
|
||||
// match(MemBarVolatile);
|
||||
@ -7240,7 +7245,7 @@ instruct cmovI_reg_isel(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
|
||||
// exists. Anyways, the scheduler should be off on Power7.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
int cc = $cmp$$cmpcode;
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7286,7 +7291,7 @@ instruct cmovL_reg_isel(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
|
||||
// exists. Anyways, the scheduler should be off on Power7.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
int cc = $cmp$$cmpcode;
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7332,7 +7337,7 @@ instruct cmovN_reg_isel(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
|
||||
// exists. Anyways, the scheduler should be off on Power7.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
int cc = $cmp$$cmpcode;
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7379,7 +7384,7 @@ instruct cmovP_reg_isel(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegPsrc src) %{
|
||||
// exists. Anyways, the scheduler should be off on Power7.
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
int cc = $cmp$$cmpcode;
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
__ isel($dst$$Register, $crx$$CondRegister,
|
||||
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7525,8 +7530,8 @@ instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, true);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -7932,7 +7937,23 @@ instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
|
||||
|
||||
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
|
||||
// positive longs and 0xF...F for negative ones.
|
||||
instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{
|
||||
instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
|
||||
// no match-rule, false predicate
|
||||
effect(DEF dst, USE src);
|
||||
predicate(false);
|
||||
|
||||
format %{ "SRADI $dst, $src, #63" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_sradi);
|
||||
__ sradi($dst$$Register, $src$$Register, 0x3f);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
|
||||
// positive longs and 0xF...F for negative ones.
|
||||
instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
|
||||
// no match-rule, false predicate
|
||||
effect(DEF dst, USE src);
|
||||
predicate(false);
|
||||
@ -8896,7 +8917,7 @@ instruct andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
|
||||
__ rlwinm($dst$$Register, $src1$$Register, 0,
|
||||
__ rlwinm($dst$$Register, $src1$$Register, 0,
|
||||
(31-log2_long((jlong) $src2$$constant)) & 0x1f, (31-log2_long((jlong) $src2$$constant)) & 0x1f);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -9622,14 +9643,14 @@ instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
||||
ins_cost(DEFAULT_COST*4);
|
||||
|
||||
expand %{
|
||||
iRegIdst src1s;
|
||||
iRegIdst src2s;
|
||||
iRegIdst diff;
|
||||
sxtI_reg(src1s, src1); // ensure proper sign extention
|
||||
sxtI_reg(src2s, src2); // ensure proper sign extention
|
||||
subI_reg_reg(diff, src1s, src2s);
|
||||
iRegLdst src1s;
|
||||
iRegLdst src2s;
|
||||
iRegLdst diff;
|
||||
convI2L_reg(src1s, src1); // Ensure proper sign extension.
|
||||
convI2L_reg(src2s, src2); // Ensure proper sign extension.
|
||||
subL_reg_reg(diff, src1s, src2s);
|
||||
// Need to consider >=33 bit result, therefore we need signmaskL.
|
||||
signmask64I_regI(dst, diff);
|
||||
signmask64I_regL(dst, diff);
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -10866,7 +10887,7 @@ instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P supe
|
||||
format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_compound);
|
||||
__ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
|
||||
__ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
|
||||
$tmp_klass$$Register, NULL, $result$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@ -11181,18 +11202,18 @@ instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
||||
ins_cost(DEFAULT_COST*6);
|
||||
|
||||
expand %{
|
||||
iRegIdst src1s;
|
||||
iRegIdst src2s;
|
||||
iRegIdst diff;
|
||||
iRegIdst sm;
|
||||
iRegIdst doz; // difference or zero
|
||||
sxtI_reg(src1s, src1); // Ensure proper sign extention.
|
||||
sxtI_reg(src2s, src2); // Ensure proper sign extention.
|
||||
subI_reg_reg(diff, src2s, src1s);
|
||||
iRegLdst src1s;
|
||||
iRegLdst src2s;
|
||||
iRegLdst diff;
|
||||
iRegLdst sm;
|
||||
iRegLdst doz; // difference or zero
|
||||
convI2L_reg(src1s, src1); // Ensure proper sign extension.
|
||||
convI2L_reg(src2s, src2); // Ensure proper sign extension.
|
||||
subL_reg_reg(diff, src2s, src1s);
|
||||
// Need to consider >=33 bit result, therefore we need signmaskL.
|
||||
signmask64I_regI(sm, diff);
|
||||
andI_reg_reg(doz, diff, sm); // <=0
|
||||
addI_reg_reg(dst, doz, src1s);
|
||||
signmask64L_regL(sm, diff);
|
||||
andL_reg_reg(doz, diff, sm); // <=0
|
||||
addI_regL_regL(dst, doz, src1s);
|
||||
%}
|
||||
%}
|
||||
|
||||
@ -11201,19 +11222,18 @@ instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
||||
ins_cost(DEFAULT_COST*6);
|
||||
|
||||
expand %{
|
||||
immI_minus1 m1 %{ -1 %}
|
||||
iRegIdst src1s;
|
||||
iRegIdst src2s;
|
||||
iRegIdst diff;
|
||||
iRegIdst sm;
|
||||
iRegIdst doz; // difference or zero
|
||||
sxtI_reg(src1s, src1); // Ensure proper sign extention.
|
||||
sxtI_reg(src2s, src2); // Ensure proper sign extention.
|
||||
subI_reg_reg(diff, src2s, src1s);
|
||||
iRegLdst src1s;
|
||||
iRegLdst src2s;
|
||||
iRegLdst diff;
|
||||
iRegLdst sm;
|
||||
iRegLdst doz; // difference or zero
|
||||
convI2L_reg(src1s, src1); // Ensure proper sign extension.
|
||||
convI2L_reg(src2s, src2); // Ensure proper sign extension.
|
||||
subL_reg_reg(diff, src2s, src1s);
|
||||
// Need to consider >=33 bit result, therefore we need signmaskL.
|
||||
signmask64I_regI(sm, diff);
|
||||
andcI_reg_reg(doz, sm, m1, diff); // >=0
|
||||
addI_reg_reg(dst, doz, src1s);
|
||||
signmask64L_regL(sm, diff);
|
||||
andcL_reg_reg(doz, diff, sm); // >=0
|
||||
addI_regL_regL(dst, doz, src1s);
|
||||
%}
|
||||
%}
|
||||
|
||||
|
||||
@ -81,24 +81,18 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
|
||||
#if 0
|
||||
// Call special ClassCastException constructor taking object to cast
|
||||
// and target class as arguments.
|
||||
address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler(const char* name) {
|
||||
address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handler() {
|
||||
address entry = __ pc();
|
||||
|
||||
// Target class oop is in register R6_ARG4 by convention!
|
||||
|
||||
// Expression stack must be empty before entering the VM if an
|
||||
// exception happened.
|
||||
__ empty_expression_stack();
|
||||
// Setup parameters.
|
||||
|
||||
// Thread will be loaded to R3_ARG1.
|
||||
__ load_const_optimized(R4_ARG2, (address) name);
|
||||
__ mr(R5_ARG3, R17_tos);
|
||||
// R6_ARG4 contains specified class.
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose));
|
||||
#ifdef ASSERT
|
||||
// Target class oop is in register R5_ARG3 by convention!
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
|
||||
// Above call must not return here since exception pending.
|
||||
__ should_not_reach_here();
|
||||
#endif
|
||||
DEBUG_ONLY(__ should_not_reach_here();)
|
||||
return entry;
|
||||
}
|
||||
#endif
|
||||
@ -1538,14 +1532,32 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ stw(R0, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
|
||||
|
||||
// Get out of the current method and re-execute the call that called us.
|
||||
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ return_pc, R11_scratch1, R12_scratch2);
|
||||
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ noreg, R11_scratch1, R12_scratch2);
|
||||
__ restore_interpreter_state(R11_scratch1);
|
||||
__ ld(R12_scratch2, _ijava_state_neg(top_frame_sp), R11_scratch1);
|
||||
__ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
|
||||
__ mtlr(return_pc);
|
||||
if (ProfileInterpreter) {
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
}
|
||||
#if INCLUDE_JVMTI
|
||||
Label L_done;
|
||||
|
||||
__ lbz(R11_scratch1, 0, R14_bcp);
|
||||
__ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
|
||||
__ bne(CCR0, L_done);
|
||||
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
|
||||
__ ld(R4_ARG2, 0, R18_locals);
|
||||
__ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
|
||||
R4_ARG2, R19_method, R14_bcp);
|
||||
|
||||
__ cmpdi(CCR0, R11_scratch1, 0);
|
||||
__ beq(CCR0, L_done);
|
||||
|
||||
__ std(R11_scratch1, wordSize, R15_esp);
|
||||
__ bind(L_done);
|
||||
#endif // INCLUDE_JVMTI
|
||||
__ dispatch_next(vtos);
|
||||
}
|
||||
// end of JVMTI PopFrame support
|
||||
|
||||
@ -64,7 +64,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
|
||||
|
||||
switch (barrier) {
|
||||
#ifndef SERIALGC
|
||||
#if INCLUDE_ALL_GCS
|
||||
case BarrierSet::G1SATBCT:
|
||||
case BarrierSet::G1SATBCTLogging:
|
||||
{
|
||||
@ -104,7 +104,7 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
||||
__ bind(Ldone);
|
||||
}
|
||||
break;
|
||||
#endif // SERIALGC
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
case BarrierSet::CardTableModRef:
|
||||
case BarrierSet::CardTableExtension:
|
||||
{
|
||||
@ -259,17 +259,17 @@ void TemplateTable::fconst(int value) {
|
||||
switch (value) {
|
||||
default: ShouldNotReachHere();
|
||||
case 0: {
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
|
||||
__ lfs(F15_ftos, simm16_offset, R11_scratch1);
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
|
||||
__ lfs(F15_ftos, simm16_offset, R11_scratch1);
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0);
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
|
||||
__ lfs(F15_ftos, simm16_offset, R11_scratch1);
|
||||
break;
|
||||
}
|
||||
@ -282,12 +282,12 @@ void TemplateTable::dconst(int value) {
|
||||
static double one = 1.0;
|
||||
switch (value) {
|
||||
case 0: {
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0);
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
|
||||
__ lfd(F15_ftos, simm16_offset, R11_scratch1);
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0);
|
||||
int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
|
||||
__ lfd(F15_ftos, simm16_offset, R11_scratch1);
|
||||
break;
|
||||
}
|
||||
@ -3712,9 +3712,9 @@ void TemplateTable::checkcast() {
|
||||
transition(atos, atos);
|
||||
|
||||
Label Ldone, Lis_null, Lquicked, Lresolved;
|
||||
Register Roffset = R5_ARG3,
|
||||
Register Roffset = R6_ARG4,
|
||||
RobjKlass = R4_ARG2,
|
||||
RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect this register.
|
||||
RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
|
||||
Rcpool = R11_scratch1,
|
||||
Rtags = R12_scratch2;
|
||||
|
||||
|
||||
@ -53,41 +53,41 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *
|
||||
|
||||
inline jlong Atomic::load(volatile jlong* src) { return *src; }
|
||||
|
||||
/*
|
||||
machine barrier instructions:
|
||||
|
||||
- sync two-way memory barrier, aka fence
|
||||
- lwsync orders Store|Store,
|
||||
Load|Store,
|
||||
Load|Load,
|
||||
but not Store|Load
|
||||
- eieio orders memory accesses for device memory (only)
|
||||
- isync invalidates speculatively executed instructions
|
||||
From the POWER ISA 2.06 documentation:
|
||||
"[...] an isync instruction prevents the execution of
|
||||
instructions following the isync until instructions
|
||||
preceding the isync have completed, [...]"
|
||||
From IBM's AIX assembler reference:
|
||||
"The isync [...] instructions causes the processor to
|
||||
refetch any instructions that might have been fetched
|
||||
prior to the isync instruction. The instruction isync
|
||||
causes the processor to wait for all previous instructions
|
||||
to complete. Then any instructions already fetched are
|
||||
discarded and instruction processing continues in the
|
||||
environment established by the previous instructions."
|
||||
|
||||
semantic barrier instructions:
|
||||
(as defined in orderAccess.hpp)
|
||||
|
||||
- release orders Store|Store, (maps to lwsync)
|
||||
Load|Store
|
||||
- acquire orders Load|Store, (maps to lwsync)
|
||||
Load|Load
|
||||
- fence orders Store|Store, (maps to sync)
|
||||
Load|Store,
|
||||
Load|Load,
|
||||
Store|Load
|
||||
*/
|
||||
//
|
||||
// machine barrier instructions:
|
||||
//
|
||||
// - sync two-way memory barrier, aka fence
|
||||
// - lwsync orders Store|Store,
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// but not Store|Load
|
||||
// - eieio orders memory accesses for device memory (only)
|
||||
// - isync invalidates speculatively executed instructions
|
||||
// From the POWER ISA 2.06 documentation:
|
||||
// "[...] an isync instruction prevents the execution of
|
||||
// instructions following the isync until instructions
|
||||
// preceding the isync have completed, [...]"
|
||||
// From IBM's AIX assembler reference:
|
||||
// "The isync [...] instructions causes the processor to
|
||||
// refetch any instructions that might have been fetched
|
||||
// prior to the isync instruction. The instruction isync
|
||||
// causes the processor to wait for all previous instructions
|
||||
// to complete. Then any instructions already fetched are
|
||||
// discarded and instruction processing continues in the
|
||||
// environment established by the previous instructions."
|
||||
//
|
||||
// semantic barrier instructions:
|
||||
// (as defined in orderAccess.hpp)
|
||||
//
|
||||
// - release orders Store|Store, (maps to lwsync)
|
||||
// Load|Store
|
||||
// - acquire orders Load|Store, (maps to lwsync)
|
||||
// Load|Load
|
||||
// - fence orders Store|Store, (maps to sync)
|
||||
// Load|Store,
|
||||
// Load|Load,
|
||||
// Store|Load
|
||||
//
|
||||
|
||||
#define strasm_sync "\n sync \n"
|
||||
#define strasm_lwsync "\n lwsync \n"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user