mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-18 06:15:16 +00:00
Merge
This commit is contained in:
commit
9bf565afeb
@ -2565,7 +2565,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
__ ld_ptr(receiver_addr, tmp1);
|
||||
__ verify_oop(tmp1);
|
||||
__ verify_klass_ptr(tmp1);
|
||||
__ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test);
|
||||
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
|
||||
@ -404,7 +404,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
if (id == fast_new_instance_init_check_id) {
|
||||
// make sure the klass is initialized
|
||||
__ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
|
||||
__ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
|
||||
__ cmp(G3_t1, InstanceKlass::fully_initialized);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// assert object can be fast path allocated
|
||||
@ -515,7 +517,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
|
||||
// check that array length is small enough for fast path
|
||||
__ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
|
||||
__ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);
|
||||
__ cmp(G4_length, G3_t1);
|
||||
__ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);
|
||||
__ delayed()->nop();
|
||||
|
||||
// if we got here then the TLAB allocation failed, so try
|
||||
// refilling the TLAB or allocating directly from eden.
|
||||
|
||||
@ -3333,7 +3333,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||
|
||||
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
|
||||
// No allocation in the shared eden.
|
||||
ba_short(slow_case);
|
||||
ba(slow_case);
|
||||
delayed()->nop();
|
||||
}
|
||||
|
||||
ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
|
||||
@ -3358,7 +3359,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||
add(t2, 1, t2);
|
||||
stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
|
||||
}
|
||||
ba_short(try_eden);
|
||||
ba(try_eden);
|
||||
delayed()->nop();
|
||||
|
||||
bind(discard_tlab);
|
||||
if (TLABStats) {
|
||||
@ -3420,7 +3422,8 @@ void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case
|
||||
sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
|
||||
st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
|
||||
verify_tlab();
|
||||
ba_short(retry);
|
||||
ba(retry);
|
||||
delayed()->nop();
|
||||
}
|
||||
|
||||
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
|
||||
|
||||
@ -1206,6 +1206,10 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
LIR_Address* addr = src->as_address_ptr();
|
||||
Address from_addr = as_Address(addr);
|
||||
|
||||
if (addr->base()->type() == T_OBJECT) {
|
||||
__ verify_oop(addr->base()->as_pointer_register());
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE: // fall through
|
||||
|
||||
@ -34,9 +34,9 @@
|
||||
// Run with +PrintInterpreter to get the VM to print out the size.
|
||||
// Max size with JVMTI
|
||||
#ifdef AMD64
|
||||
const static int InterpreterCodeSize = 208 * 1024;
|
||||
const static int InterpreterCodeSize = 256 * 1024;
|
||||
#else
|
||||
const static int InterpreterCodeSize = 176 * 1024;
|
||||
const static int InterpreterCodeSize = 224 * 1024;
|
||||
#endif // AMD64
|
||||
|
||||
#endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
|
||||
|
||||
@ -536,12 +536,6 @@ bool InstructForm::rematerialize(FormDict &globals, RegisterForm *registers ) {
|
||||
if( data_type != Form::none )
|
||||
rematerialize = true;
|
||||
|
||||
// Ugly: until a better fix is implemented, disable rematerialization for
|
||||
// negD nodes because they are proved to be problematic.
|
||||
if (is_ideal_negD()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Constants
|
||||
if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
|
||||
rematerialize = true;
|
||||
|
||||
@ -1265,6 +1265,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
|
||||
LIRItem rcvr(x->argument_at(0), this);
|
||||
rcvr.load_item();
|
||||
LIR_Opr temp = new_register(T_METADATA);
|
||||
LIR_Opr result = rlock_result(x);
|
||||
|
||||
// need to perform the null check on the rcvr
|
||||
@ -1272,8 +1273,11 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
if (x->needs_null_check()) {
|
||||
info = state_for(x);
|
||||
}
|
||||
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info);
|
||||
__ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
|
||||
|
||||
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the
|
||||
// meaning of these two is mixed up (see JDK-8026837).
|
||||
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
|
||||
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -638,7 +638,10 @@
|
||||
"Find best control for expensive operations") \
|
||||
\
|
||||
product(bool, UseMathExactIntrinsics, true, \
|
||||
"Enables intrinsification of various java.lang.Math funcitons")
|
||||
"Enables intrinsification of various java.lang.Math functions") \
|
||||
\
|
||||
experimental(bool, ReplaceInParentMaps, false, \
|
||||
"Propagate type improvements in callers of inlinee if possible")
|
||||
|
||||
C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
|
||||
|
||||
|
||||
@ -63,12 +63,12 @@ public:
|
||||
}
|
||||
|
||||
virtual bool is_parse() const { return true; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
int is_osr() { return _is_osr; }
|
||||
|
||||
};
|
||||
|
||||
JVMState* ParseGenerator::generate(JVMState* jvms) {
|
||||
JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile* C = Compile::current();
|
||||
|
||||
if (is_osr()) {
|
||||
@ -80,7 +80,7 @@ JVMState* ParseGenerator::generate(JVMState* jvms) {
|
||||
return NULL; // bailing out of the compile; do not try to parse
|
||||
}
|
||||
|
||||
Parse parser(jvms, method(), _expected_uses);
|
||||
Parse parser(jvms, method(), _expected_uses, parent_parser);
|
||||
// Grab signature for matching/allocation
|
||||
#ifdef ASSERT
|
||||
if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
|
||||
@ -119,12 +119,12 @@ class DirectCallGenerator : public CallGenerator {
|
||||
_separate_io_proj(separate_io_proj)
|
||||
{
|
||||
}
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
|
||||
CallStaticJavaNode* call_node() const { return _call_node; }
|
||||
};
|
||||
|
||||
JVMState* DirectCallGenerator::generate(JVMState* jvms) {
|
||||
JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
GraphKit kit(jvms);
|
||||
bool is_static = method()->is_static();
|
||||
address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
|
||||
@ -171,10 +171,10 @@ public:
|
||||
vtable_index >= 0, "either invalid or usable");
|
||||
}
|
||||
virtual bool is_virtual() const { return true; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
};
|
||||
|
||||
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
|
||||
JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
GraphKit kit(jvms);
|
||||
Node* receiver = kit.argument(0);
|
||||
|
||||
@ -276,7 +276,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
// Convert the CallStaticJava into an inline
|
||||
virtual void do_late_inline();
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
C->print_inlining_skip(this);
|
||||
|
||||
@ -290,7 +290,7 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
// that the late inlining logic can distinguish between fall
|
||||
// through and exceptional uses of the memory and io projections
|
||||
// as is done for allocations and macro expansion.
|
||||
return DirectCallGenerator::generate(jvms);
|
||||
return DirectCallGenerator::generate(jvms, parent_parser);
|
||||
}
|
||||
|
||||
virtual void print_inlining_late(const char* msg) {
|
||||
@ -389,7 +389,7 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||
}
|
||||
|
||||
// Now perform the inling using the synthesized JVMState
|
||||
JVMState* new_jvms = _inline_cg->generate(jvms);
|
||||
JVMState* new_jvms = _inline_cg->generate(jvms, NULL);
|
||||
if (new_jvms == NULL) return; // no change
|
||||
if (C->failing()) return;
|
||||
|
||||
@ -429,8 +429,8 @@ class LateInlineMHCallGenerator : public LateInlineCallGenerator {
|
||||
|
||||
virtual bool is_mh_late_inline() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser);
|
||||
if (_input_not_const) {
|
||||
// inlining won't be possible so no need to enqueue right now.
|
||||
call_node()->set_generator(this);
|
||||
@ -477,13 +477,13 @@ class LateInlineStringCallGenerator : public LateInlineCallGenerator {
|
||||
LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||
LateInlineCallGenerator(method, inline_cg) {}
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
C->print_inlining_skip(this);
|
||||
|
||||
C->add_string_late_inline(this);
|
||||
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||
return new_jvms;
|
||||
}
|
||||
};
|
||||
@ -498,13 +498,13 @@ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
|
||||
LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
|
||||
LateInlineCallGenerator(method, inline_cg) {}
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile *C = Compile::current();
|
||||
C->print_inlining_skip(this);
|
||||
|
||||
C->add_boxing_late_inline(this);
|
||||
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms);
|
||||
JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser);
|
||||
return new_jvms;
|
||||
}
|
||||
};
|
||||
@ -540,7 +540,7 @@ public:
|
||||
virtual bool is_virtual() const { return _is_virtual; }
|
||||
virtual bool is_deferred() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
};
|
||||
|
||||
|
||||
@ -550,12 +550,12 @@ CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
|
||||
return new WarmCallGenerator(ci, if_cold, if_hot);
|
||||
}
|
||||
|
||||
JVMState* WarmCallGenerator::generate(JVMState* jvms) {
|
||||
JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
Compile* C = Compile::current();
|
||||
if (C->log() != NULL) {
|
||||
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
||||
}
|
||||
jvms = _if_cold->generate(jvms);
|
||||
jvms = _if_cold->generate(jvms, parent_parser);
|
||||
if (jvms != NULL) {
|
||||
Node* m = jvms->map()->control();
|
||||
if (m->is_CatchProj()) m = m->in(0); else m = C->top();
|
||||
@ -616,7 +616,7 @@ public:
|
||||
virtual bool is_inline() const { return _if_hit->is_inline(); }
|
||||
virtual bool is_deferred() const { return _if_hit->is_deferred(); }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
};
|
||||
|
||||
|
||||
@ -628,7 +628,7 @@ CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
|
||||
}
|
||||
|
||||
|
||||
JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
||||
JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
GraphKit kit(jvms);
|
||||
PhaseGVN& gvn = kit.gvn();
|
||||
// We need an explicit receiver null_check before checking its type.
|
||||
@ -656,7 +656,7 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
||||
{ PreserveJVMState pjvms(&kit);
|
||||
kit.set_control(slow_ctl);
|
||||
if (!kit.stopped()) {
|
||||
slow_jvms = _if_missed->generate(kit.sync_jvms());
|
||||
slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser);
|
||||
if (kit.failing())
|
||||
return NULL; // might happen because of NodeCountInliningCutoff
|
||||
assert(slow_jvms != NULL, "must be");
|
||||
@ -677,12 +677,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
||||
kit.replace_in_map(receiver, exact_receiver);
|
||||
|
||||
// Make the hot call:
|
||||
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
|
||||
JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser);
|
||||
if (new_jvms == NULL) {
|
||||
// Inline failed, so make a direct call.
|
||||
assert(_if_hit->is_inline(), "must have been a failed inline");
|
||||
CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
|
||||
new_jvms = cg->generate(kit.sync_jvms());
|
||||
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
|
||||
}
|
||||
kit.add_exception_states_from(new_jvms);
|
||||
kit.set_jvms(new_jvms);
|
||||
@ -874,7 +874,7 @@ public:
|
||||
virtual bool is_inlined() const { return true; }
|
||||
virtual bool is_intrinsic() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
};
|
||||
|
||||
|
||||
@ -884,7 +884,7 @@ CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
|
||||
}
|
||||
|
||||
|
||||
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
||||
JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
GraphKit kit(jvms);
|
||||
PhaseGVN& gvn = kit.gvn();
|
||||
|
||||
@ -904,7 +904,7 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
||||
PreserveJVMState pjvms(&kit);
|
||||
kit.set_control(slow_ctl);
|
||||
if (!kit.stopped()) {
|
||||
slow_jvms = _cg->generate(kit.sync_jvms());
|
||||
slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser);
|
||||
if (kit.failing())
|
||||
return NULL; // might happen because of NodeCountInliningCutoff
|
||||
assert(slow_jvms != NULL, "must be");
|
||||
@ -922,12 +922,12 @@ JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
|
||||
}
|
||||
|
||||
// Generate intrinsic code:
|
||||
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
|
||||
JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser);
|
||||
if (new_jvms == NULL) {
|
||||
// Intrinsic failed, so use slow code or make a direct call.
|
||||
if (slow_map == NULL) {
|
||||
CallGenerator* cg = CallGenerator::for_direct_call(method());
|
||||
new_jvms = cg->generate(kit.sync_jvms());
|
||||
new_jvms = cg->generate(kit.sync_jvms(), parent_parser);
|
||||
} else {
|
||||
kit.set_jvms(slow_jvms);
|
||||
return kit.transfer_exceptions_into_jvms();
|
||||
@ -997,7 +997,7 @@ public:
|
||||
virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
|
||||
virtual bool is_trap() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
};
|
||||
|
||||
|
||||
@ -1009,7 +1009,7 @@ CallGenerator::for_uncommon_trap(ciMethod* m,
|
||||
}
|
||||
|
||||
|
||||
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
|
||||
JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
GraphKit kit(jvms);
|
||||
// Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
|
||||
int nargs = method()->arg_size();
|
||||
|
||||
@ -31,6 +31,8 @@
|
||||
#include "opto/type.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
|
||||
class Parse;
|
||||
|
||||
//---------------------------CallGenerator-------------------------------------
|
||||
// The subclasses of this class handle generation of ideal nodes for
|
||||
// call sites and method entry points.
|
||||
@ -108,7 +110,7 @@ class CallGenerator : public ResourceObj {
|
||||
//
|
||||
// If the result is NULL, it means that this CallGenerator was unable
|
||||
// to handle the given call, and another CallGenerator should be consulted.
|
||||
virtual JVMState* generate(JVMState* jvms) = 0;
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
|
||||
|
||||
// How to generate a call site that is inlined:
|
||||
static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
|
||||
|
||||
@ -655,7 +655,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_inlining_progress(false),
|
||||
_inlining_incrementally(false),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining_idx(0) {
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0) {
|
||||
C = this;
|
||||
|
||||
CompileWrapper cw(this);
|
||||
@ -763,7 +764,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
return;
|
||||
}
|
||||
JVMState* jvms = build_start_state(start(), tf());
|
||||
if ((jvms = cg->generate(jvms)) == NULL) {
|
||||
if ((jvms = cg->generate(jvms, NULL)) == NULL) {
|
||||
record_method_not_compilable("method parse failed");
|
||||
return;
|
||||
}
|
||||
@ -940,7 +941,8 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_inlining_progress(false),
|
||||
_inlining_incrementally(false),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining_idx(0) {
|
||||
_print_inlining_idx(0),
|
||||
_preserve_jvm_state(0) {
|
||||
C = this;
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
@ -425,6 +425,9 @@ class Compile : public Phase {
|
||||
// Expensive nodes list already sorted?
|
||||
bool expensive_nodes_sorted() const;
|
||||
|
||||
// Are we within a PreserveJVMState block?
|
||||
int _preserve_jvm_state;
|
||||
|
||||
public:
|
||||
|
||||
outputStream* print_inlining_stream() const {
|
||||
@ -820,7 +823,9 @@ class Compile : public Phase {
|
||||
|
||||
// Decide how to build a call.
|
||||
// The profile factor is a discount to apply to this site's interp. profile.
|
||||
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true, bool delayed_forbidden = false);
|
||||
CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
|
||||
JVMState* jvms, bool allow_inline, float profile_factor, bool allow_intrinsics = true,
|
||||
bool delayed_forbidden = false);
|
||||
bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
|
||||
return should_delay_string_inlining(call_method, jvms) ||
|
||||
should_delay_boxing_inlining(call_method, jvms);
|
||||
@ -1156,6 +1161,21 @@ class Compile : public Phase {
|
||||
|
||||
// Auxiliary method for randomized fuzzing/stressing
|
||||
static bool randomized_select(int count);
|
||||
|
||||
// enter a PreserveJVMState block
|
||||
void inc_preserve_jvm_state() {
|
||||
_preserve_jvm_state++;
|
||||
}
|
||||
|
||||
// exit a PreserveJVMState block
|
||||
void dec_preserve_jvm_state() {
|
||||
_preserve_jvm_state--;
|
||||
assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative");
|
||||
}
|
||||
|
||||
bool has_preserve_jvm_state() const {
|
||||
return _preserve_jvm_state > 0;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_COMPILE_HPP
|
||||
|
||||
@ -495,7 +495,7 @@ void Parse::do_call() {
|
||||
// because exceptions don't return to the call site.)
|
||||
profile_call(receiver);
|
||||
|
||||
JVMState* new_jvms = cg->generate(jvms);
|
||||
JVMState* new_jvms = cg->generate(jvms, this);
|
||||
if (new_jvms == NULL) {
|
||||
// When inlining attempt fails (e.g., too many arguments),
|
||||
// it may contaminate the current compile state, making it
|
||||
@ -509,7 +509,7 @@ void Parse::do_call() {
|
||||
// intrinsic was expecting to optimize. Should always be possible to
|
||||
// get a normal java call that may inline in that case
|
||||
cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
|
||||
if ((new_jvms = cg->generate(jvms)) == NULL) {
|
||||
if ((new_jvms = cg->generate(jvms, this)) == NULL) {
|
||||
guarantee(failing(), "call failed to generate: calls should work");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -639,6 +639,7 @@ PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
|
||||
_map = kit->map(); // preserve the map
|
||||
_sp = kit->sp();
|
||||
kit->set_map(clone_map ? kit->clone_map() : NULL);
|
||||
Compile::current()->inc_preserve_jvm_state();
|
||||
#ifdef ASSERT
|
||||
_bci = kit->bci();
|
||||
Parse* parser = kit->is_Parse();
|
||||
@ -656,6 +657,7 @@ PreserveJVMState::~PreserveJVMState() {
|
||||
#endif
|
||||
kit->set_map(_map);
|
||||
kit->set_sp(_sp);
|
||||
Compile::current()->dec_preserve_jvm_state();
|
||||
}
|
||||
|
||||
|
||||
@ -1373,17 +1375,70 @@ Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
|
||||
|
||||
//--------------------------replace_in_map-------------------------------------
|
||||
void GraphKit::replace_in_map(Node* old, Node* neww) {
|
||||
this->map()->replace_edge(old, neww);
|
||||
if (old == neww) {
|
||||
return;
|
||||
}
|
||||
|
||||
map()->replace_edge(old, neww);
|
||||
|
||||
// Note: This operation potentially replaces any edge
|
||||
// on the map. This includes locals, stack, and monitors
|
||||
// of the current (innermost) JVM state.
|
||||
|
||||
// We can consider replacing in caller maps.
|
||||
// The idea would be that an inlined function's null checks
|
||||
// can be shared with the entire inlining tree.
|
||||
// The expense of doing this is that the PreserveJVMState class
|
||||
// would have to preserve caller states too, with a deep copy.
|
||||
if (!ReplaceInParentMaps) {
|
||||
return;
|
||||
}
|
||||
|
||||
// PreserveJVMState doesn't do a deep copy so we can't modify
|
||||
// parents
|
||||
if (Compile::current()->has_preserve_jvm_state()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Parse* parser = is_Parse();
|
||||
bool progress = true;
|
||||
Node* ctrl = map()->in(0);
|
||||
// Follow the chain of parsers and see whether the update can be
|
||||
// done in the map of callers. We can do the replace for a caller if
|
||||
// the current control post dominates the control of a caller.
|
||||
while (parser != NULL && parser->caller() != NULL && progress) {
|
||||
progress = false;
|
||||
Node* parent_map = parser->caller()->map();
|
||||
assert(parser->exits().map()->jvms()->depth() == parser->caller()->depth(), "map mismatch");
|
||||
|
||||
Node* parent_ctrl = parent_map->in(0);
|
||||
|
||||
while (parent_ctrl->is_Region()) {
|
||||
Node* n = parent_ctrl->as_Region()->is_copy();
|
||||
if (n == NULL) {
|
||||
break;
|
||||
}
|
||||
parent_ctrl = n;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
if (ctrl == parent_ctrl) {
|
||||
// update the map of the exits which is the one that will be
|
||||
// used when compilation resume after inlining
|
||||
parser->exits().map()->replace_edge(old, neww);
|
||||
progress = true;
|
||||
break;
|
||||
}
|
||||
if (ctrl->is_Proj() && ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||
ctrl = ctrl->in(0)->in(0);
|
||||
} else if (ctrl->is_Region()) {
|
||||
Node* n = ctrl->as_Region()->is_copy();
|
||||
if (n == NULL) {
|
||||
break;
|
||||
}
|
||||
ctrl = n;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
parser = parser->parent_parser();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -1019,7 +1019,7 @@ void IfNode::dominated_by( Node *prev_dom, PhaseIterGVN *igvn ) {
|
||||
// be skipped. For example, range check predicate has two checks
|
||||
// for lower and upper bounds.
|
||||
ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
|
||||
if (PhaseIdealLoop::is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
|
||||
if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
|
||||
prev_dom = idom;
|
||||
|
||||
// Now walk the current IfNode's projections.
|
||||
|
||||
@ -63,7 +63,7 @@ class LibraryIntrinsic : public InlineCallGenerator {
|
||||
virtual bool is_virtual() const { return _is_virtual; }
|
||||
virtual bool is_predicted() const { return _is_predicted; }
|
||||
virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; }
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
virtual JVMState* generate(JVMState* jvms, Parse* parent_parser);
|
||||
virtual Node* generate_predicate(JVMState* jvms);
|
||||
vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; }
|
||||
};
|
||||
@ -556,7 +556,7 @@ void Compile::register_library_intrinsics() {
|
||||
// Nothing to do here.
|
||||
}
|
||||
|
||||
JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||
JVMState* LibraryIntrinsic::generate(JVMState* jvms, Parse* parent_parser) {
|
||||
LibraryCallKit kit(jvms, this);
|
||||
Compile* C = kit.C;
|
||||
int nodes = C->unique();
|
||||
|
||||
@ -41,63 +41,6 @@
|
||||
* checks (such as null checks).
|
||||
*/
|
||||
|
||||
//-------------------------------is_uncommon_trap_proj----------------------------
|
||||
// Return true if proj is the form of "proj->[region->..]call_uct"
|
||||
bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
|
||||
int path_limit = 10;
|
||||
assert(proj, "invalid argument");
|
||||
Node* out = proj;
|
||||
for (int ct = 0; ct < path_limit; ct++) {
|
||||
out = out->unique_ctrl_out();
|
||||
if (out == NULL)
|
||||
return false;
|
||||
if (out->is_CallStaticJava()) {
|
||||
int req = out->as_CallStaticJava()->uncommon_trap_request();
|
||||
if (req != 0) {
|
||||
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
|
||||
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false; // don't do further after call
|
||||
}
|
||||
if (out->Opcode() != Op_Region)
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//-------------------------------is_uncommon_trap_if_pattern-------------------------
|
||||
// Return true for "if(test)-> proj -> ...
|
||||
// |
|
||||
// V
|
||||
// other_proj->[region->..]call_uct"
|
||||
//
|
||||
// "must_reason_predicate" means the uct reason must be Reason_predicate
|
||||
bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
|
||||
Node *in0 = proj->in(0);
|
||||
if (!in0->is_If()) return false;
|
||||
// Variation of a dead If node.
|
||||
if (in0->outcnt() < 2) return false;
|
||||
IfNode* iff = in0->as_If();
|
||||
|
||||
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
|
||||
if (reason != Deoptimization::Reason_none) {
|
||||
if (iff->in(1)->Opcode() != Op_Conv2B ||
|
||||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
|
||||
if (is_uncommon_trap_proj(other_proj, reason)) {
|
||||
assert(reason == Deoptimization::Reason_none ||
|
||||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//-------------------------------register_control-------------------------
|
||||
void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
|
||||
assert(n->is_CFG(), "must be control node");
|
||||
@ -147,7 +90,7 @@ void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred)
|
||||
// This code is also used to clone predicates to clonned loops.
|
||||
ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
|
||||
Deoptimization::DeoptReason reason) {
|
||||
assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
|
||||
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
|
||||
IfNode* iff = cont_proj->in(0)->as_If();
|
||||
|
||||
ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
|
||||
@ -235,7 +178,7 @@ ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node*
|
||||
ProjNode* PhaseIterGVN::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
|
||||
Deoptimization::DeoptReason reason) {
|
||||
assert(new_entry != 0, "only used for clone predicate");
|
||||
assert(PhaseIdealLoop::is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
|
||||
assert(cont_proj->is_uncommon_trap_if_pattern(reason), "must be a uct if pattern!");
|
||||
IfNode* iff = cont_proj->in(0)->as_If();
|
||||
|
||||
ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
|
||||
@ -422,7 +365,7 @@ Node* PhaseIdealLoop::skip_loop_predicates(Node* entry) {
|
||||
ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
|
||||
if (start_c == NULL || !start_c->is_Proj())
|
||||
return NULL;
|
||||
if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
|
||||
if (start_c->as_Proj()->is_uncommon_trap_if_pattern(reason)) {
|
||||
return start_c->as_Proj();
|
||||
}
|
||||
return NULL;
|
||||
@ -773,7 +716,7 @@ bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
|
||||
ProjNode* proj = if_proj_list.pop()->as_Proj();
|
||||
IfNode* iff = proj->in(0)->as_If();
|
||||
|
||||
if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
|
||||
if (!proj->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||
if (loop->is_loop_exit(iff)) {
|
||||
// stop processing the remaining projs in the list because the execution of them
|
||||
// depends on the condition of "iff" (iff->in(1)).
|
||||
|
||||
@ -167,7 +167,7 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
|
||||
// expensive nodes will notice the loop and skip over it to try to
|
||||
// move the node further up.
|
||||
if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) {
|
||||
if (!is_uncommon_trap_if_pattern(ctl->in(1)->as_Proj(), Deoptimization::Reason_none)) {
|
||||
if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||
break;
|
||||
}
|
||||
next = idom(ctl->in(1)->in(0));
|
||||
@ -181,7 +181,7 @@ Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) {
|
||||
} else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) {
|
||||
next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control();
|
||||
} else if (parent_ctl->is_If()) {
|
||||
if (!is_uncommon_trap_if_pattern(ctl->as_Proj(), Deoptimization::Reason_none)) {
|
||||
if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) {
|
||||
break;
|
||||
}
|
||||
assert(idom(ctl) == parent_ctl, "strange");
|
||||
|
||||
@ -876,13 +876,6 @@ public:
|
||||
// Return true if exp is a scaled induction var plus (or minus) constant
|
||||
bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0);
|
||||
|
||||
// Return true if proj is for "proj->[region->..]call_uct"
|
||||
static bool is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason);
|
||||
// Return true for "if(test)-> proj -> ...
|
||||
// |
|
||||
// V
|
||||
// other_proj->[region->..]call_uct"
|
||||
static bool is_uncommon_trap_if_pattern(ProjNode* proj, Deoptimization::DeoptReason reason);
|
||||
// Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted
|
||||
ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
|
||||
Deoptimization::DeoptReason reason);
|
||||
|
||||
@ -238,7 +238,7 @@ void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exc
|
||||
ProjNode* dp_proj = dp->as_Proj();
|
||||
ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
|
||||
if (exclude_loop_predicate &&
|
||||
is_uncommon_trap_proj(unc_proj, Deoptimization::Reason_predicate))
|
||||
unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
|
||||
return; // Let IGVN transformation change control dependence.
|
||||
|
||||
IdealLoopTree *old_loop = get_loop(dp);
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "opto/callnode.hpp"
|
||||
#include "opto/cfgnode.hpp"
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/mathexactnode.hpp"
|
||||
#include "opto/multnode.hpp"
|
||||
@ -150,3 +151,59 @@ const RegMask &ProjNode::out_RegMask() const {
|
||||
uint ProjNode::ideal_reg() const {
|
||||
return bottom_type()->ideal_reg();
|
||||
}
|
||||
|
||||
//-------------------------------is_uncommon_trap_proj----------------------------
|
||||
// Return true if proj is the form of "proj->[region->..]call_uct"
|
||||
bool ProjNode::is_uncommon_trap_proj(Deoptimization::DeoptReason reason) {
|
||||
int path_limit = 10;
|
||||
Node* out = this;
|
||||
for (int ct = 0; ct < path_limit; ct++) {
|
||||
out = out->unique_ctrl_out();
|
||||
if (out == NULL)
|
||||
return false;
|
||||
if (out->is_CallStaticJava()) {
|
||||
int req = out->as_CallStaticJava()->uncommon_trap_request();
|
||||
if (req != 0) {
|
||||
Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
|
||||
if (trap_reason == reason || reason == Deoptimization::Reason_none) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false; // don't do further after call
|
||||
}
|
||||
if (out->Opcode() != Op_Region)
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//-------------------------------is_uncommon_trap_if_pattern-------------------------
|
||||
// Return true for "if(test)-> proj -> ...
|
||||
// |
|
||||
// V
|
||||
// other_proj->[region->..]call_uct"
|
||||
//
|
||||
// "must_reason_predicate" means the uct reason must be Reason_predicate
|
||||
bool ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason) {
|
||||
Node *in0 = in(0);
|
||||
if (!in0->is_If()) return false;
|
||||
// Variation of a dead If node.
|
||||
if (in0->outcnt() < 2) return false;
|
||||
IfNode* iff = in0->as_If();
|
||||
|
||||
// we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
|
||||
if (reason != Deoptimization::Reason_none) {
|
||||
if (iff->in(1)->Opcode() != Op_Conv2B ||
|
||||
iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ProjNode* other_proj = iff->proj_out(1-_con)->as_Proj();
|
||||
if (other_proj->is_uncommon_trap_proj(reason)) {
|
||||
assert(reason == Deoptimization::Reason_none ||
|
||||
Compile::current()->is_predicate_opaq(iff->in(1)->in(1)), "should be on the list");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -88,6 +88,14 @@ public:
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
|
||||
// Return true if proj is for "proj->[region->..]call_uct"
|
||||
bool is_uncommon_trap_proj(Deoptimization::DeoptReason reason);
|
||||
// Return true for "if(test)-> proj -> ...
|
||||
// |
|
||||
// V
|
||||
// other_proj->[region->..]call_uct"
|
||||
bool is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason);
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_MULTNODE_HPP
|
||||
|
||||
@ -349,13 +349,15 @@ class Parse : public GraphKit {
|
||||
int _est_switch_depth; // Debugging SwitchRanges.
|
||||
#endif
|
||||
|
||||
// parser for the caller of the method of this object
|
||||
Parse* const _parent;
|
||||
|
||||
public:
|
||||
// Constructor
|
||||
Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
|
||||
Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent);
|
||||
|
||||
virtual Parse* is_Parse() const { return (Parse*)this; }
|
||||
|
||||
public:
|
||||
// Accessors.
|
||||
JVMState* caller() const { return _caller; }
|
||||
float expected_uses() const { return _expected_uses; }
|
||||
@ -407,6 +409,8 @@ class Parse : public GraphKit {
|
||||
return block()->successor_for_bci(bci);
|
||||
}
|
||||
|
||||
Parse* parent_parser() const { return _parent; }
|
||||
|
||||
private:
|
||||
// Create a JVMS & map for the initial state of this method.
|
||||
SafePointNode* create_entry_map();
|
||||
|
||||
@ -381,8 +381,8 @@ void Parse::load_interpreter_state(Node* osr_buf) {
|
||||
|
||||
//------------------------------Parse------------------------------------------
|
||||
// Main parser constructor.
|
||||
Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
|
||||
: _exits(caller)
|
||||
Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses, Parse* parent)
|
||||
: _exits(caller), _parent(parent)
|
||||
{
|
||||
// Init some variables
|
||||
_caller = caller;
|
||||
|
||||
@ -51,15 +51,6 @@
|
||||
|
||||
static const char out_of_nodes[] = "out of nodes during split";
|
||||
|
||||
static bool contains_no_live_range_input(const Node* def) {
|
||||
for (uint i = 1; i < def->req(); ++i) {
|
||||
if (def->in(i) != NULL && def->in_RegMask(i).is_NotEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------get_spillcopy_wide-----------------------------
|
||||
// Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the
|
||||
// wide ideal-register spill-mask if possible. If the 'wide-mask' does
|
||||
@ -326,12 +317,11 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
|
||||
if( def->req() > 1 ) {
|
||||
for( uint i = 1; i < def->req(); i++ ) {
|
||||
Node *in = def->in(i);
|
||||
// Check for single-def (LRG cannot redefined)
|
||||
uint lidx = _lrg_map.live_range_id(in);
|
||||
if (lidx >= _lrg_map.max_lrg_id()) {
|
||||
continue; // Value is a recent spill-copy
|
||||
}
|
||||
if (lrgs(lidx).is_singledef()) {
|
||||
// We do not need this for live ranges that are only defined once.
|
||||
// However, this is not true for spill copies that are added in this
|
||||
// Split() pass, since they might get coalesced later on in this pass.
|
||||
if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1327,7 +1317,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
|
||||
Node *def = Reaches[pidx][slidx];
|
||||
assert( def, "must have reaching def" );
|
||||
// If input up/down sense and reg-pressure DISagree
|
||||
if (def->rematerialize() && contains_no_live_range_input(def)) {
|
||||
if (def->rematerialize()) {
|
||||
// Place the rematerialized node above any MSCs created during
|
||||
// phi node splitting. end_idx points at the insertion point
|
||||
// so look at the node before it.
|
||||
|
||||
75
hotspot/test/compiler/tiered/CompLevelsTest.java
Normal file
75
hotspot/test/compiler/tiered/CompLevelsTest.java
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Abstract class for testing of used compilation levels correctness.
|
||||
*
|
||||
* @author igor.ignatyev@oracle.com
|
||||
*/
|
||||
public abstract class CompLevelsTest extends CompilerWhiteBoxTest {
|
||||
protected CompLevelsTest(TestCase testCase) {
|
||||
super(testCase);
|
||||
// to prevent inlining of #method
|
||||
WHITE_BOX.testSetDontInlineMethod(method, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that level is available.
|
||||
* @param compLevel level to check
|
||||
*/
|
||||
protected void testAvailableLevel(int compLevel, int bci) {
|
||||
if (IS_VERBOSE) {
|
||||
System.out.printf("testAvailableLevel(level = %d, bci = %d)%n",
|
||||
compLevel, bci);
|
||||
}
|
||||
WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
|
||||
checkCompiled();
|
||||
checkLevel(compLevel, getCompLevel());
|
||||
deoptimize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that level is unavailable.
|
||||
* @param compLevel level to check
|
||||
*/
|
||||
protected void testUnavailableLevel(int compLevel, int bci) {
|
||||
if (IS_VERBOSE) {
|
||||
System.out.printf("testUnavailableLevel(level = %d, bci = %d)%n",
|
||||
compLevel, bci);
|
||||
}
|
||||
WHITE_BOX.enqueueMethodForCompilation(method, compLevel, bci);
|
||||
checkNotCompiled();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks validity of compilation level.
|
||||
* @param expected expected level
|
||||
* @param actual actually level
|
||||
*/
|
||||
protected void checkLevel(int expected, int actual) {
|
||||
if (expected != actual) {
|
||||
throw new RuntimeException("expected[" + expected + "] != actual["
|
||||
+ actual + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
96
hotspot/test/compiler/tiered/NonTieredLevelsTest.java
Normal file
96
hotspot/test/compiler/tiered/NonTieredLevelsTest.java
Normal file
@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
import java.util.function.IntPredicate;
|
||||
|
||||
/**
|
||||
* @test NonTieredLevelsTest
|
||||
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox
|
||||
* @build NonTieredLevelsTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:CompileCommand=compileonly,TestCase$Helper::*
|
||||
* NonTieredLevelsTest
|
||||
* @summary Verify that only one level can be used
|
||||
* @author igor.ignatyev@oracle.com
|
||||
*/
|
||||
public class NonTieredLevelsTest extends CompLevelsTest {
|
||||
private static final int AVAILABLE_COMP_LEVEL;
|
||||
private static final IntPredicate IS_AVAILABLE_COMPLEVEL;
|
||||
static {
|
||||
String vmName = System.getProperty("java.vm.name");
|
||||
if (vmName.endsWith(" Server VM")) {
|
||||
AVAILABLE_COMP_LEVEL = COMP_LEVEL_FULL_OPTIMIZATION;
|
||||
IS_AVAILABLE_COMPLEVEL = x -> x == COMP_LEVEL_FULL_OPTIMIZATION;
|
||||
} else if (vmName.endsWith(" Client VM")
|
||||
|| vmName.endsWith(" Minimal VM")) {
|
||||
AVAILABLE_COMP_LEVEL = COMP_LEVEL_SIMPLE;
|
||||
IS_AVAILABLE_COMPLEVEL = x -> x >= COMP_LEVEL_SIMPLE
|
||||
&& x <= COMP_LEVEL_FULL_PROFILE;
|
||||
} else {
|
||||
throw new RuntimeException("Unknown VM: " + vmName);
|
||||
}
|
||||
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (TIERED_COMPILATION) {
|
||||
System.err.println("Test isn't applicable w/ enabled "
|
||||
+ "TieredCompilation. Skip test.");
|
||||
return;
|
||||
}
|
||||
for (TestCase test : TestCase.values()) {
|
||||
new NonTieredLevelsTest(test).runTest();
|
||||
}
|
||||
}
|
||||
|
||||
private NonTieredLevelsTest(TestCase testCase) {
|
||||
super(testCase);
|
||||
// to prevent inlining of #method
|
||||
WHITE_BOX.testSetDontInlineMethod(method, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void test() throws Exception {
|
||||
checkNotCompiled();
|
||||
compile();
|
||||
checkCompiled();
|
||||
|
||||
int compLevel = getCompLevel();
|
||||
checkLevel(AVAILABLE_COMP_LEVEL, compLevel);
|
||||
int bci = WHITE_BOX.getMethodEntryBci(method);
|
||||
deoptimize();
|
||||
if (!testCase.isOsr) {
|
||||
for (int level = 1; level <= COMP_LEVEL_MAX; ++level) {
|
||||
if (IS_AVAILABLE_COMPLEVEL.test(level)) {
|
||||
testAvailableLevel(level, bci);
|
||||
} else {
|
||||
testUnavailableLevel(level, bci);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
System.out.println("skip other levels testing in OSR");
|
||||
testAvailableLevel(AVAILABLE_COMP_LEVEL, bci);
|
||||
}
|
||||
}
|
||||
}
|
||||
88
hotspot/test/compiler/tiered/TieredLevelsTest.java
Normal file
88
hotspot/test/compiler/tiered/TieredLevelsTest.java
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test TieredLevelsTest
|
||||
* @library /testlibrary /testlibrary/whitebox /compiler/whitebox
|
||||
* @build TieredLevelsTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:CompileCommand=compileonly,TestCase$Helper::*
|
||||
* TieredLevelsTest
|
||||
* @summary Verify that all levels < 'TieredStopAtLevel' can be used
|
||||
* @author igor.ignatyev@oracle.com
|
||||
*/
|
||||
public class TieredLevelsTest extends CompLevelsTest {
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (!TIERED_COMPILATION) {
|
||||
System.err.println("Test isn't applicable w/ disabled "
|
||||
+ "TieredCompilation. Skip test.");
|
||||
return;
|
||||
}
|
||||
for (TestCase test : TestCase.values()) {
|
||||
new TieredLevelsTest(test).runTest();
|
||||
}
|
||||
}
|
||||
|
||||
private TieredLevelsTest(TestCase testCase) {
|
||||
super(testCase);
|
||||
// to prevent inlining of #method
|
||||
WHITE_BOX.testSetDontInlineMethod(method, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void test() throws Exception {
|
||||
checkNotCompiled();
|
||||
compile();
|
||||
checkCompiled();
|
||||
|
||||
int compLevel = getCompLevel();
|
||||
if (compLevel > TIERED_STOP_AT_LEVEL) {
|
||||
throw new RuntimeException("method.compLevel[" + compLevel
|
||||
+ "] > TieredStopAtLevel [" + TIERED_STOP_AT_LEVEL + "]");
|
||||
}
|
||||
int bci = WHITE_BOX.getMethodEntryBci(method);
|
||||
deoptimize();
|
||||
|
||||
for (int testedTier = 1; testedTier <= TIERED_STOP_AT_LEVEL;
|
||||
++testedTier) {
|
||||
testAvailableLevel(testedTier, bci);
|
||||
}
|
||||
for (int testedTier = TIERED_STOP_AT_LEVEL + 1;
|
||||
testedTier <= COMP_LEVEL_MAX; ++testedTier) {
|
||||
testUnavailableLevel(testedTier, bci);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void checkLevel(int expected, int actual) {
|
||||
if (expected == COMP_LEVEL_FULL_PROFILE
|
||||
&& actual == COMP_LEVEL_LIMITED_PROFILE) {
|
||||
// for simple method full_profile may be replaced by limited_profile
|
||||
return;
|
||||
}
|
||||
super.checkLevel(expected, actual);
|
||||
}
|
||||
}
|
||||
@ -80,8 +80,7 @@ public abstract class CompilerWhiteBoxTest {
|
||||
|
||||
static {
|
||||
if (TIERED_COMPILATION) {
|
||||
THRESHOLD = 150000;
|
||||
BACKEDGE_THRESHOLD = 0xFFFFFFFFL;
|
||||
BACKEDGE_THRESHOLD = THRESHOLD = 150000;
|
||||
} else {
|
||||
THRESHOLD = COMPILE_THRESHOLD;
|
||||
BACKEDGE_THRESHOLD = COMPILE_THRESHOLD * Long.parseLong(getVMOption(
|
||||
@ -364,7 +363,7 @@ enum TestCase {
|
||||
/** OSR constructor test case */
|
||||
OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR,
|
||||
Helper.OSR_CONSTRUCTOR_CALLABLE, true),
|
||||
/** OSR method test case */
|
||||
/** OSR method test case */
|
||||
OSR_METOD_TEST(Helper.OSR_METHOD, Helper.OSR_METHOD_CALLABLE, true),
|
||||
/** OSR static method test case */
|
||||
OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true);
|
||||
@ -373,7 +372,7 @@ enum TestCase {
|
||||
final Executable executable;
|
||||
/** object to invoke {@linkplain #executable} */
|
||||
final Callable<Integer> callable;
|
||||
/** flag for OSR test case */
|
||||
/** flag for OSR test case */
|
||||
final boolean isOsr;
|
||||
|
||||
private TestCase(Executable executable, Callable<Integer> callable,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user