*nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -607,16 +607,15 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_size_in_bytes();
int bangsize = C->output()->bang_size_in_bytes();
__ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode(), C->stub_function() != nullptr);
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@@ -664,18 +663,17 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
- MacroAssembler _masm(&cbuf);
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
- _masm.vzeroupper();
+ __ vzeroupper();
}
// If method set FPU control word, restore to standard control word
if (C->in_24_bit_fp_mode()) {
- _masm.fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
+ __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
}
int framesize = C->output()->frame_size_in_bytes();
@@ -686,16 +684,16 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
if (framesize >= 128) {
- emit_opcode(cbuf, 0x81); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d32(cbuf, framesize);
+ emit_opcode(masm, 0x81); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d32(masm, framesize);
} else if (framesize) {
- emit_opcode(cbuf, 0x83); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d8(cbuf, framesize);
+ emit_opcode(masm, 0x83); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d8(masm, framesize);
}
- emit_opcode(cbuf, 0x58 | EBP_enc);
+ emit_opcode(masm, 0x58 | EBP_enc);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
@@ -703,7 +701,6 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (do_polling() && C->is_method_compilation()) {
Register thread = as_Register(EBX_enc);
- MacroAssembler masm(&cbuf);
__ get_thread(thread);
Label dummy_label;
Label* code_stub = &dummy_label;
@@ -712,7 +709,9 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C->output()->add_stub(stub);
code_stub = &stub->entry();
}
+ __ set_inst_mark();
__ relocate(relocInfo::poll_return_type);
+ __ clear_inst_mark();
__ safepoint_poll(*code_stub, thread, true /* at_return */, true /* in_nmethod */);
}
}
@@ -749,11 +748,13 @@ static enum RC rc_class( OptoReg::Name reg ) {
return rc_xmm;
}
-static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg,
+static int impl_helper( C2_MacroAssembler *masm, bool do_size, bool is_load, int offset, int reg,
int opcode, const char *op_str, int size, outputStream* st ) {
- if( cbuf ) {
- emit_opcode (*cbuf, opcode );
- encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
+ if( masm ) {
+ masm->set_inst_mark();
+ emit_opcode (masm, opcode );
+ encode_RegMem(masm, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
+ masm->clear_inst_mark();
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -770,7 +771,7 @@ static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset
}
// Helper for XMM registers. Extra opcode bits, limited syntax.
-static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
+static int impl_x_helper( C2_MacroAssembler *masm, bool do_size, bool is_load,
int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
int in_size_in_bits = Assembler::EVEX_32bit;
int evex_encoding = 0;
@@ -778,11 +779,10 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
in_size_in_bits = Assembler::EVEX_64bit;
evex_encoding = Assembler::VEX_W;
}
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations,
// it maps more cases to single byte displacement
- _masm.set_managed();
+ __ set_managed();
if (reg_lo+1 == reg_hi) { // double move?
if (is_load) {
__ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
@@ -829,12 +829,11 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
}
-static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movx_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
__ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
as_XMMRegister(Matcher::_regEncode[src_lo]));
@@ -868,13 +867,12 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
return size + sz;
}
-static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movgpr2x_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
// 32-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
__ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
#ifndef PRODUCT
@@ -886,13 +884,12 @@ static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
}
-static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movx2gpr_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
// 32-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
__ movdl(as_Register(Matcher::_regEncode[dst_lo]),
as_XMMRegister(Matcher::_regEncode[src_lo]));
#ifndef PRODUCT
@@ -903,10 +900,10 @@ static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
return (UseAVX> 2) ? 6 : 4;
}
-static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
- if( cbuf ) {
- emit_opcode(*cbuf, 0x8B );
- emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
+static int impl_mov_helper( C2_MacroAssembler *masm, bool do_size, int src, int dst, int size, outputStream* st ) {
+ if( masm ) {
+ emit_opcode(masm, 0x8B );
+ emit_rm (masm, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -916,12 +913,12 @@ static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, in
return size+2;
}
-static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
+static int impl_fp_store_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
int offset, int size, outputStream* st ) {
if( src_lo != FPR1L_num ) { // Move value to top of FP stack, if not already there
- if( cbuf ) {
- emit_opcode( *cbuf, 0xD9 ); // FLD (i.e., push it)
- emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] );
+ if( masm ) {
+ emit_opcode( masm, 0xD9 ); // FLD (i.e., push it)
+ emit_d8( masm, 0xC0-1+Matcher::_regEncode[src_lo] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -943,20 +940,19 @@ static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" );
}
- return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
+ return impl_helper(masm,do_size,false,offset,st_op,op,op_str,size, st);
}
// Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
-static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
+static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st);
-void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
-static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
+static void vec_stack_to_stack_helper(C2_MacroAssembler *masm, int src_offset,
int dst_offset, uint ireg, outputStream* st) {
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
switch (ireg) {
case Op_VecS:
__ pushl(Address(rsp, src_offset));
@@ -1032,7 +1028,7 @@ static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
}
}
-uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
+uint MachSpillCopyNode::implementation( C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
// Get registers to move
OptoReg::Name src_second = ra_->get_reg_second(in(1));
OptoReg::Name src_first = ra_->get_reg_first(in(1));
@@ -1061,15 +1057,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// mem -> mem
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
- vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
+ vec_stack_to_stack_helper(masm, src_offset, dst_offset, ireg, st);
} else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
- vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
+ vec_mov_helper(masm, src_first, dst_first, src_second, dst_second, ireg, st);
} else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
int stack_offset = ra_->reg2offset(dst_first);
- vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
+ vec_spill_helper(masm, false, stack_offset, src_first, ireg, st);
} else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
int stack_offset = ra_->reg2offset(src_first);
- vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
+ vec_spill_helper(masm, true, stack_offset, dst_first, ireg, st);
} else {
ShouldNotReachHere();
}
@@ -1081,16 +1077,16 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
if( src_second == dst_first ) { // overlapping stack copy ranges
assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" );
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
src_second_rc = dst_second_rc = rc_bad; // flag as already moved the second bits
}
// move low bits
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st);
if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
}
return size;
}
@@ -1098,41 +1094,41 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// --------------------------------------
// Check for integer reg-reg copy
if( src_first_rc == rc_int && dst_first_rc == rc_int )
- size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st);
+ size = impl_mov_helper(masm,do_size,src_first,dst_first,size, st);
// Check for integer store
if( src_first_rc == rc_int && dst_first_rc == rc_stack )
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
// Check for integer load
if( src_first_rc == rc_stack && dst_first_rc == rc_int )
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
// Check for integer reg-xmm reg copy
if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
"no 64 bit integer-float reg moves" );
- return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movgpr2x_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// --------------------------------------
// Check for float reg-reg copy
if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
(src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" );
- if( cbuf ) {
+ if( masm ) {
// Note the mucking with the register encode to compensate for the 0/1
// indexing issue mentioned in a comment in the reg_def sections
// for FPR registers many lines above here.
if( src_first != FPR1L_num ) {
- emit_opcode (*cbuf, 0xD9 ); // FLD ST(i)
- emit_d8 (*cbuf, 0xC0+Matcher::_regEncode[src_first]-1 );
- emit_opcode (*cbuf, 0xDD ); // FSTP ST(i)
- emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
+ emit_opcode (masm, 0xD9 ); // FLD ST(i)
+ emit_d8 (masm, 0xC0+Matcher::_regEncode[src_first]-1 );
+ emit_opcode (masm, 0xDD ); // FSTP ST(i)
+ emit_d8 (masm, 0xD8+Matcher::_regEncode[dst_first] );
} else {
- emit_opcode (*cbuf, 0xDD ); // FST ST(i)
- emit_d8 (*cbuf, 0xD0+Matcher::_regEncode[dst_first]-1 );
+ emit_opcode (masm, 0xDD ); // FST ST(i)
+ emit_d8 (masm, 0xD0+Matcher::_regEncode[dst_first]-1 );
}
#ifndef PRODUCT
} else if( !do_size ) {
@@ -1146,7 +1142,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for float store
if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
- return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
+ return impl_fp_store_helper(masm,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
}
// Check for float load
@@ -1162,11 +1158,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
op = 0xD9;
assert( src_second_rc == rc_bad && dst_second_rc == rc_bad, "no non-adjacent float-loads" );
}
- if( cbuf ) {
- emit_opcode (*cbuf, op );
- encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
- emit_opcode (*cbuf, 0xDD ); // FSTP ST(i)
- emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
+ if( masm ) {
+ masm->set_inst_mark();
+ emit_opcode (masm, op );
+ encode_RegMem(masm, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
+ emit_opcode (masm, 0xDD ); // FSTP ST(i)
+ emit_d8 (masm, 0xD8+Matcher::_regEncode[dst_first] );
+ masm->clear_inst_mark();
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1182,35 +1180,35 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
(src_first+1 == src_second && dst_first+1 == dst_second),
"no non-adjacent float-moves" );
- return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movx_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// Check for xmm reg-integer reg copy
if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
"no 64 bit float-integer reg moves" );
- return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movx2gpr_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// Check for xmm store
if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
- return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first), src_first, src_second, size, st);
+ return impl_x_helper(masm,do_size,false,ra_->reg2offset(dst_first), src_first, src_second, size, st);
}
// Check for float xmm load
if( src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
- return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
+ return impl_x_helper(masm,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
}
// Copy from float reg to xmm reg
if( src_first_rc == rc_float && dst_first_rc == rc_xmm ) {
// copy to the top of stack from floating point reg
// and use LEA to preserve flags
- if( cbuf ) {
- emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP-8]
- emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
- emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(*cbuf,0xF8);
+ if( masm ) {
+ emit_opcode(masm,0x8D); // LEA ESP,[ESP-8]
+ emit_rm(masm, 0x1, ESP_enc, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm,0xF8);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1219,16 +1217,16 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
}
size += 4;
- size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
+ size = impl_fp_store_helper(masm,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
// Copy from the temp memory to the xmm reg.
- size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st);
+ size = impl_x_helper(masm,do_size,true ,0,dst_first, dst_second, size, st);
- if( cbuf ) {
- emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP+8]
- emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
- emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(*cbuf,0x08);
+ if( masm ) {
+ emit_opcode(masm,0x8D); // LEA ESP,[ESP+8]
+ emit_rm(masm, 0x1, ESP_enc, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm,0x08);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1244,8 +1242,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
int offset = ra_->reg2offset(src_first);
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1259,8 +1256,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
int offset = ra_->reg2offset(dst_first);
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1283,8 +1279,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
if (src_first_rc == rc_kreg && dst_first_rc == rc_kreg) {
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1304,15 +1299,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for second word int-int move
if( src_second_rc == rc_int && dst_second_rc == rc_int )
- return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
+ return impl_mov_helper(masm,do_size,src_second,dst_second,size, st);
// Check for second word integer store
if( src_second_rc == rc_int && dst_second_rc == rc_stack )
- return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
+ return impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
// Check for second word integer load
if( dst_second_rc == rc_int && src_second_rc == rc_stack )
- return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
+ return impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
Unimplemented();
return 0; // Mute compiler
@@ -1324,8 +1319,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation( &cbuf, ra_, false, nullptr );
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation( masm, ra_, false, nullptr );
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1342,20 +1337,20 @@ void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
if( offset >= 128 ) {
- emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
- emit_rm(cbuf, 0x2, reg, 0x04);
- emit_rm(cbuf, 0x0, 0x04, ESP_enc);
- emit_d32(cbuf, offset);
+ emit_opcode(masm, 0x8D); // LEA reg,[SP+offset]
+ emit_rm(masm, 0x2, reg, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d32(masm, offset);
}
else {
- emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
- emit_rm(cbuf, 0x1, reg, 0x04);
- emit_rm(cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(cbuf, offset);
+ emit_opcode(masm, 0x8D); // LEA reg,[SP+offset]
+ emit_rm(masm, 0x1, reg, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm, offset);
}
}
@@ -1381,9 +1376,8 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- MacroAssembler masm(&cbuf);
- masm.ic_check(CodeEntryAlignment);
+void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ __ ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1528,37 +1522,49 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
+ // Set instruction mark in MacroAssembler. This is used only in
+ // instructions that emit bytes directly to the CodeBuffer wraped
+ // in the MacroAssembler. Should go away once all "instruct" are
+ // patched to emit bytes only using methods in MacroAssembler.
+ enc_class SetInstMark %{
+ __ set_inst_mark();
+ %}
+
+ enc_class ClearInstMark %{
+ __ clear_inst_mark();
+ %}
+
// Emit primary opcode
enc_class OpcP %{
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
%}
// Emit secondary opcode
enc_class OpcS %{
- emit_opcode(cbuf, $secondary);
+ emit_opcode(masm, $secondary);
%}
// Emit opcode directly
enc_class Opcode(immI d8) %{
- emit_opcode(cbuf, $d8$$constant);
+ emit_opcode(masm, $d8$$constant);
%}
enc_class SizePrefix %{
- emit_opcode(cbuf,0x66);
+ emit_opcode(masm,0x66);
%}
enc_class RegReg (rRegI dst, rRegI src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class OpcRegReg (immI opcode, rRegI dst, rRegI src) %{ // OpcRegReg(Many)
- emit_opcode(cbuf,$opcode$$constant);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,$opcode$$constant);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class mov_r32_imm0( rRegI dst ) %{
- emit_opcode( cbuf, 0xB8 + $dst$$reg ); // 0xB8+ rd -- MOV r32 ,imm32
- emit_d32 ( cbuf, 0x0 ); // imm32==0x0
+ emit_opcode( masm, 0xB8 + $dst$$reg ); // 0xB8+ rd -- MOV r32 ,imm32
+ emit_d32 ( masm, 0x0 ); // imm32==0x0
%}
enc_class cdq_enc %{
@@ -1585,26 +1591,26 @@ encode %{
// F7 F9 idiv rax,ecx
// done:
//
- emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x80); // cmp rax,80000000h
- emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x85);
- emit_opcode(cbuf,0x0B); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00); // jne normal_case
- emit_opcode(cbuf,0x33); emit_d8(cbuf,0xD2); // xor rdx,edx
- emit_opcode(cbuf,0x83); emit_d8(cbuf,0xF9); emit_d8(cbuf,0xFF); // cmp rcx,0FFh
- emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x84);
- emit_opcode(cbuf,0x03); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00); // je done
+ emit_opcode(masm,0x81); emit_d8(masm,0xF8);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x80); // cmp rax,80000000h
+ emit_opcode(masm,0x0F); emit_d8(masm,0x85);
+ emit_opcode(masm,0x0B); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00); // jne normal_case
+ emit_opcode(masm,0x33); emit_d8(masm,0xD2); // xor rdx,edx
+ emit_opcode(masm,0x83); emit_d8(masm,0xF9); emit_d8(masm,0xFF); // cmp rcx,0FFh
+ emit_opcode(masm,0x0F); emit_d8(masm,0x84);
+ emit_opcode(masm,0x03); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00); // je done
// normal_case:
- emit_opcode(cbuf,0x99); // cdq
+ emit_opcode(masm,0x99); // cdq
// idiv (note: must be emitted by the user of this rule)
// normal:
%}
// Dense encoding for older common ops
enc_class Opc_plus(immI opcode, rRegI reg) %{
- emit_opcode(cbuf, $opcode$$constant + $reg$$reg);
+ emit_opcode(masm, $opcode$$constant + $reg$$reg);
%}
@@ -1612,10 +1618,10 @@ encode %{
enc_class OpcSE (immI imm) %{ // Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
- emit_opcode(cbuf, $primary | 0x02);
+ emit_opcode(masm, $primary | 0x02);
}
else { // If 32-bit immediate
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
}
%}
@@ -1623,12 +1629,12 @@ encode %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
- emit_opcode(cbuf, $primary | 0x02); }
+ emit_opcode(masm, $primary | 0x02); }
else { // If 32-bit immediate
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
}
// Emit r/m byte with secondary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
%}
enc_class Con8or32 (immI imm) %{ // Con8or32(storeImmI), 8 or 32 bits
@@ -1646,62 +1652,62 @@ encode %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
int con = (int)$imm$$constant; // Throw away top bits
- emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
+ emit_opcode(masm, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
// Emit r/m byte with secondary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
- if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
- else emit_d32(cbuf,con);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
+ if ((con >= -128) && (con <= 127)) emit_d8 (masm,con);
+ else emit_d32(masm,con);
%}
enc_class Long_OpcSErm_Hi(eRegL dst, immL imm) %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
int con = (int)($imm$$constant >> 32); // Throw away bottom bits
- emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
+ emit_opcode(masm, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
// Emit r/m byte with tertiary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW_ENC($dst$$reg));
- if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
- else emit_d32(cbuf,con);
+ emit_rm(masm, 0x3, $tertiary, HIGH_FROM_LOW_ENC($dst$$reg));
+ if ((con >= -128) && (con <= 127)) emit_d8 (masm,con);
+ else emit_d32(masm,con);
%}
enc_class OpcSReg (rRegI dst) %{ // BSWAP
- emit_cc(cbuf, $secondary, $dst$$reg );
+ emit_cc(masm, $secondary, $dst$$reg );
%}
enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP
int destlo = $dst$$reg;
int desthi = HIGH_FROM_LOW_ENC(destlo);
// bswap lo
- emit_opcode(cbuf, 0x0F);
- emit_cc(cbuf, 0xC8, destlo);
+ emit_opcode(masm, 0x0F);
+ emit_cc(masm, 0xC8, destlo);
// bswap hi
- emit_opcode(cbuf, 0x0F);
- emit_cc(cbuf, 0xC8, desthi);
+ emit_opcode(masm, 0x0F);
+ emit_cc(masm, 0xC8, desthi);
// xchg lo and hi
- emit_opcode(cbuf, 0x87);
- emit_rm(cbuf, 0x3, destlo, desthi);
+ emit_opcode(masm, 0x87);
+ emit_rm(masm, 0x3, destlo, desthi);
%}
enc_class RegOpc (rRegI div) %{ // IDIV, IMOD, JMP indirect, ...
- emit_rm(cbuf, 0x3, $secondary, $div$$reg );
+ emit_rm(masm, 0x3, $secondary, $div$$reg );
%}
enc_class enc_cmov(cmpOp cop ) %{ // CMOV
$$$emit8$primary;
- emit_cc(cbuf, $secondary, $cop$$cmpcode);
+ emit_cc(masm, $secondary, $cop$$cmpcode);
%}
enc_class enc_cmov_dpr(cmpOp cop, regDPR src ) %{ // CMOV
int op = 0xDA00 + $cop$$cmpcode + ($src$$reg-1);
- emit_d8(cbuf, op >> 8 );
- emit_d8(cbuf, op & 255);
+ emit_d8(masm, op >> 8 );
+ emit_d8(masm, op & 255);
%}
// emulate a CMOV with a conditional branch around a MOV
enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
// Invert sense of branch from sense of CMOV
- emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
- emit_d8( cbuf, $brOffs$$constant );
+ emit_cc( masm, 0x70, ($cop$$cmpcode^1) );
+ emit_d8( masm, $brOffs$$constant );
%}
enc_class enc_PartialSubtypeCheck( ) %{
@@ -1711,7 +1717,6 @@ encode %{
Register Resi = as_Register(ESI_enc); // sub class
Label miss;
- MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
nullptr, &miss,
/*set_cond_codes:*/ true);
@@ -1722,43 +1727,40 @@ encode %{
%}
enc_class FFree_Float_Stack_All %{ // Free_Float_Stack_All
- MacroAssembler masm(&cbuf);
- int start = masm.offset();
+ int start = __ offset();
if (UseSSE >= 2) {
if (VerifyFPU) {
- masm.verify_FPU(0, "must be empty in SSE2+ mode");
+ __ verify_FPU(0, "must be empty in SSE2+ mode");
}
} else {
// External c_calling_convention expects the FPU stack to be 'clean'.
// Compiled code leaves it dirty. Do cleanup now.
- masm.empty_FPU_stack();
+ __ empty_FPU_stack();
}
if (sizeof_FFree_Float_Stack_All == -1) {
- sizeof_FFree_Float_Stack_All = masm.offset() - start;
+ sizeof_FFree_Float_Stack_All = __ offset() - start;
} else {
- assert(masm.offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
+ assert(__ offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
}
%}
enc_class Verify_FPU_For_Leaf %{
if( VerifyFPU ) {
- MacroAssembler masm(&cbuf);
- masm.verify_FPU( -3, "Returning from Runtime Leaf call");
+ __ verify_FPU( -3, "Returning from Runtime Leaf call");
}
%}
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
// This is the instruction starting address for relocation info.
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
// CALL directly to the runtime
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
if (UseSSE >= 2) {
- MacroAssembler _masm(&cbuf);
BasicType rt = tf()->return_type();
if ((rt == T_FLOAT || rt == T_DOUBLE) && !return_value_is_used()) {
@@ -1783,54 +1785,53 @@ encode %{
enc_class pre_call_resets %{
// If method sets FPU control word restore it here
- debug_only(int off0 = cbuf.insts_size());
+ debug_only(int off0 = __ offset());
if (ra_->C->in_24_bit_fp_mode()) {
- MacroAssembler _masm(&cbuf);
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
}
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
- MacroAssembler _masm(&cbuf);
__ vzeroupper();
- debug_only(int off1 = cbuf.insts_size());
+ debug_only(int off1 = __ offset());
assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
// If method sets FPU control word do it here also
if (Compile::current()->in_24_bit_fp_mode()) {
- MacroAssembler masm(&cbuf);
- masm.fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24()));
+ __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24()));
}
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
if (!_method) {
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
runtime_call_Relocation::spec(),
RELOC_IMM32);
+ __ clear_inst_mark();
__ post_call_nop();
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
rspec, RELOC_DISP32);
__ post_call_nop();
- address mark = cbuf.insts_mark();
+ address mark = __ inst_mark();
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
- cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
+ __ code()->shared_stub_to_interp_for(_method, __ code()->insts()->mark_off());
+ __ clear_inst_mark();
} else {
// Emit stubs for static call.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, mark);
+ __ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -1840,8 +1841,7 @@ encode %{
%}
enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
- MacroAssembler _masm(&cbuf);
- __ ic_call((address)$meth$$method, resolved_method_index(cbuf));
+ __ ic_call((address)$meth$$method, resolved_method_index(masm));
__ post_call_nop();
%}
@@ -1850,57 +1850,31 @@ encode %{
assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
// CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
- emit_rm(cbuf, 0x01, $secondary, EAX_enc ); // R/M byte
- emit_d8(cbuf, disp); // Displacement
+ emit_rm(masm, 0x01, $secondary, EAX_enc ); // R/M byte
+ emit_d8(masm, disp); // Displacement
+ __ clear_inst_mark();
__ post_call_nop();
%}
-// Following encoding is no longer used, but may be restored if calling
-// convention changes significantly.
-// Became: Xor_Reg(EBP), Java_To_Runtime( labl )
-//
-// enc_class Java_Interpreter_Call (label labl) %{ // JAVA INTERPRETER CALL
-// // int ic_reg = Matcher::inline_cache_reg();
-// // int ic_encode = Matcher::_regEncode[ic_reg];
-// // int imo_reg = Matcher::interpreter_method_reg();
-// // int imo_encode = Matcher::_regEncode[imo_reg];
-//
-// // // Interpreter expects method_ptr in EBX, currently a callee-saved register,
-// // // so we load it immediately before the call
-// // emit_opcode(cbuf, 0x8B); // MOV imo_reg,ic_reg # method_ptr
-// // emit_rm(cbuf, 0x03, imo_encode, ic_encode ); // R/M byte
-//
-// // xor rbp,ebp
-// emit_opcode(cbuf, 0x33);
-// emit_rm(cbuf, 0x3, EBP_enc, EBP_enc);
-//
-// // CALL to interpreter.
-// cbuf.set_insts_mark();
-// $$$emit8$primary;
-// emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
-// runtime_call_Relocation::spec(), RELOC_IMM32 );
-// %}
-
enc_class RegOpcImm (rRegI dst, immI8 shift) %{ // SHL, SAR, SHR
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
$$$emit8$shift$$constant;
%}
enc_class LdImmI (rRegI dst, immI src) %{ // Load Immediate
// Load immediate does not have a zero or sign extended version
// for 8-bit immediates
- emit_opcode(cbuf, 0xB8 + $dst$$reg);
+ emit_opcode(masm, 0xB8 + $dst$$reg);
$$$emit32$src$$constant;
%}
enc_class LdImmP (rRegI dst, immI src) %{ // Load Immediate
// Load immediate does not have a zero or sign extended version
// for 8-bit immediates
- emit_opcode(cbuf, $primary + $dst$$reg);
+ emit_opcode(masm, $primary + $dst$$reg);
$$$emit32$src$$constant;
%}
@@ -1911,11 +1885,11 @@ encode %{
int src_con = $src$$constant & 0x0FFFFFFFFL;
if (src_con == 0) {
// xor dst, dst
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, dst_enc, dst_enc);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, dst_enc, dst_enc);
} else {
- emit_opcode(cbuf, $primary + dst_enc);
- emit_d32(cbuf, src_con);
+ emit_opcode(masm, $primary + dst_enc);
+ emit_d32(masm, src_con);
}
%}
@@ -1926,48 +1900,48 @@ encode %{
int src_con = ((julong)($src$$constant)) >> 32;
if (src_con == 0) {
// xor dst, dst
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, dst_enc, dst_enc);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, dst_enc, dst_enc);
} else {
- emit_opcode(cbuf, $primary + dst_enc);
- emit_d32(cbuf, src_con);
+ emit_opcode(masm, $primary + dst_enc);
+ emit_d32(masm, src_con);
}
%}
// Encode a reg-reg copy. If it is useless, then empty encoding.
enc_class enc_Copy( rRegI dst, rRegI src ) %{
- encode_Copy( cbuf, $dst$$reg, $src$$reg );
+ encode_Copy( masm, $dst$$reg, $src$$reg );
%}
enc_class enc_CopyL_Lo( rRegI dst, eRegL src ) %{
- encode_Copy( cbuf, $dst$$reg, $src$$reg );
+ encode_Copy( masm, $dst$$reg, $src$$reg );
%}
enc_class RegReg (rRegI dst, rRegI src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Lo(eRegL dst, eRegL src) %{ // RegReg(Many)
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Hi(eRegL dst, eRegL src) %{ // RegReg(Many)
$$$emit8$secondary;
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_Lo2(eRegL dst, eRegL src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Hi2(eRegL dst, eRegL src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_HiLo( eRegL src, rRegI dst ) %{
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class Con32 (immI src) %{ // Con32(storeImmI)
@@ -1979,14 +1953,14 @@ encode %{
// Output Float immediate bits
jfloat jf = $src$$constant;
int jf_as_bits = jint_cast( jf );
- emit_d32(cbuf, jf_as_bits);
+ emit_d32(masm, jf_as_bits);
%}
enc_class Con32F_as_bits(immF src) %{ // storeX_imm
// Output Float immediate bits
jfloat jf = $src$$constant;
int jf_as_bits = jint_cast( jf );
- emit_d32(cbuf, jf_as_bits);
+ emit_d32(masm, jf_as_bits);
%}
enc_class Con16 (immI src) %{ // Con16(storeImmI)
@@ -1995,17 +1969,17 @@ encode %{
%}
enc_class Con_d32(immI src) %{
- emit_d32(cbuf,$src$$constant);
+ emit_d32(masm,$src$$constant);
%}
enc_class conmemref (eRegP t1) %{ // Con32(storeImmI)
// Output immediate memory reference
- emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
- emit_d32(cbuf, 0x00);
+ emit_rm(masm, 0x00, $t1$$reg, 0x05 );
+ emit_d32(masm, 0x00);
%}
enc_class lock_prefix( ) %{
- emit_opcode(cbuf,0xF0); // [Lock]
+ emit_opcode(masm,0xF0); // [Lock]
%}
// Cmp-xchg long value.
@@ -2016,71 +1990,67 @@ encode %{
enc_class enc_cmpxchg8(eSIRegP mem_ptr) %{
// XCHG rbx,ecx
- emit_opcode(cbuf,0x87);
- emit_opcode(cbuf,0xD9);
+ emit_opcode(masm,0x87);
+ emit_opcode(masm,0xD9);
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHG8 [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xC7);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xC7);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
// XCHG rbx,ecx
- emit_opcode(cbuf,0x87);
- emit_opcode(cbuf,0xD9);
+ emit_opcode(masm,0x87);
+ emit_opcode(masm,0xD9);
%}
enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHG [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB1);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB1);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgb(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHGB [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB0);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB0);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgw(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// 16-bit mode
- emit_opcode(cbuf, 0x66);
+ emit_opcode(masm, 0x66);
// CMPXCHGW [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB1);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB1);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_flags_ne_to_boolean( iRegI res ) %{
int res_encoding = $res$$reg;
// MOV res,0
- emit_opcode( cbuf, 0xB8 + res_encoding);
- emit_d32( cbuf, 0 );
+ emit_opcode( masm, 0xB8 + res_encoding);
+ emit_d32( masm, 0 );
// JNE,s fail
- emit_opcode(cbuf,0x75);
- emit_d8(cbuf, 5 );
+ emit_opcode(masm,0x75);
+ emit_d8(masm, 5 );
// MOV res,1
- emit_opcode( cbuf, 0xB8 + res_encoding);
- emit_d32( cbuf, 1 );
+ emit_opcode( masm, 0xB8 + res_encoding);
+ emit_d32( masm, 1 );
// fail:
%}
- enc_class set_instruction_start( ) %{
- cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand
- %}
-
enc_class RegMem (rRegI ereg, memory mem) %{ // emit_reg_mem
int reg_encoding = $ereg$$reg;
int base = $mem$$base;
@@ -2088,7 +2058,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc();
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class RegMem_Hi(eRegL ereg, memory mem) %{ // emit_reg_mem
@@ -2098,33 +2068,33 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp + 4; // Offset is 4 further in memory
assert( $mem->disp_reloc() == relocInfo::none, "Cannot add 4 to oop" );
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, relocInfo::none);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, relocInfo::none);
%}
enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
int r1, r2;
if( $tertiary == 0xA4 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,$tertiary);
- emit_rm(cbuf, 0x3, r1, r2);
- emit_d8(cbuf,$cnt$$constant);
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, r1);
- emit_d8(cbuf,$cnt$$constant);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,$tertiary);
+ emit_rm(masm, 0x3, r1, r2);
+ emit_d8(masm,$cnt$$constant);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, r1);
+ emit_d8(masm,$cnt$$constant);
%}
enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
- emit_opcode( cbuf, 0x8B ); // Move
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode( masm, 0x8B ); // Move
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
if( $cnt$$constant > 32 ) { // Shift, if not by zero
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
- emit_d8(cbuf,$cnt$$constant-32);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
+ emit_d8(masm,$cnt$$constant-32);
}
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW_ENC($dst$$reg));
- emit_d8(cbuf,31);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_d8(masm,31);
%}
enc_class move_long_big_shift_clr( eRegL dst, immI_32_63 cnt ) %{
@@ -2132,28 +2102,28 @@ encode %{
if( $secondary == 0x5 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
- emit_opcode( cbuf, 0x8B ); // Move r1,r2
- emit_rm(cbuf, 0x3, r1, r2);
+ emit_opcode( masm, 0x8B ); // Move r1,r2
+ emit_rm(masm, 0x3, r1, r2);
if( $cnt$$constant > 32 ) { // Shift, if not by zero
- emit_opcode(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, r1);
- emit_d8(cbuf,$cnt$$constant-32);
+ emit_opcode(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, r1);
+ emit_d8(masm,$cnt$$constant-32);
}
- emit_opcode(cbuf,0x33); // XOR r2,r2
- emit_rm(cbuf, 0x3, r2, r2);
+ emit_opcode(masm,0x33); // XOR r2,r2
+ emit_rm(masm, 0x3, r2, r2);
%}
// Clone of RegMem but accepts an extra parameter to access each
// half of a double in memory; it never needs relocation info.
enc_class Mov_MemD_half_to_Reg (immI opcode, memory mem, immI disp_for_half, rRegI rm_reg) %{
- emit_opcode(cbuf,$opcode$$constant);
+ emit_opcode(masm,$opcode$$constant);
int reg_encoding = $rm_reg$$reg;
int base = $mem$$base;
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp + $disp_for_half$$constant;
relocInfo::relocType disp_reloc = relocInfo::none;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
// !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!!
@@ -2168,7 +2138,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
assert( $mem->disp_reloc() == relocInfo::none, "No oops here because no reloc info allowed" );
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
%}
enc_class RMopc_Mem (immI rm_opcode, memory mem) %{
@@ -2178,7 +2148,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, disp_reloc);
%}
enc_class RegLea (rRegI dst, rRegI src0, immI src1 ) %{ // emit_reg_lea
@@ -2188,31 +2158,31 @@ encode %{
int scale = 0x00; // 0x00 indicates no scale
int displace = $src1$$constant; // 0x00 indicates no displacement
relocInfo::relocType disp_reloc = relocInfo::none;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class min_enc (rRegI dst, rRegI src) %{ // MIN
// Compare dst,src
- emit_opcode(cbuf,0x3B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x3B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
// jmp dst < src around move
- emit_opcode(cbuf,0x7C);
- emit_d8(cbuf,2);
+ emit_opcode(masm,0x7C);
+ emit_d8(masm,2);
// move dst,src
- emit_opcode(cbuf,0x8B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x8B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class max_enc (rRegI dst, rRegI src) %{ // MAX
// Compare dst,src
- emit_opcode(cbuf,0x3B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x3B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
// jmp dst > src around move
- emit_opcode(cbuf,0x7F);
- emit_d8(cbuf,2);
+ emit_opcode(masm,0x7F);
+ emit_d8(masm,2);
// move dst,src
- emit_opcode(cbuf,0x8B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x8B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class enc_FPR_store(memory mem, regDPR src) %{
@@ -2226,115 +2196,116 @@ encode %{
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
if( $src$$reg != FPR1L_enc ) {
reg_encoding = 0x3; // Store & pop
- emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD (i.e., push it)
+ emit_d8( masm, 0xC0-1+$src$$reg );
}
- cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand
- emit_opcode(cbuf,$primary);
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ __ set_inst_mark(); // Mark start of opcode for reloc info in mem operand
+ emit_opcode(masm,$primary);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
+ __ clear_inst_mark();
%}
enc_class neg_reg(rRegI dst) %{
// NEG $dst
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0x03, $dst$$reg );
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0x03, $dst$$reg );
%}
enc_class setLT_reg(eCXRegI dst) %{
// SETLT $dst
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0x9C);
- emit_rm( cbuf, 0x3, 0x4, $dst$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0x9C);
+ emit_rm( masm, 0x3, 0x4, $dst$$reg );
%}
enc_class enc_cmpLTP(ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp) %{ // cadd_cmpLT
int tmpReg = $tmp$$reg;
// SUB $p,$q
- emit_opcode(cbuf,0x2B);
- emit_rm(cbuf, 0x3, $p$$reg, $q$$reg);
+ emit_opcode(masm,0x2B);
+ emit_rm(masm, 0x3, $p$$reg, $q$$reg);
// SBB $tmp,$tmp
- emit_opcode(cbuf,0x1B);
- emit_rm(cbuf, 0x3, tmpReg, tmpReg);
+ emit_opcode(masm,0x1B);
+ emit_rm(masm, 0x3, tmpReg, tmpReg);
// AND $tmp,$y
- emit_opcode(cbuf,0x23);
- emit_rm(cbuf, 0x3, tmpReg, $y$$reg);
+ emit_opcode(masm,0x23);
+ emit_rm(masm, 0x3, tmpReg, $y$$reg);
// ADD $p,$tmp
- emit_opcode(cbuf,0x03);
- emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
+ emit_opcode(masm,0x03);
+ emit_rm(masm, 0x3, $p$$reg, tmpReg);
%}
enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x04);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x04);
// MOV $dst.hi,$dst.lo
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
// CLR $dst.lo
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, $dst$$reg, $dst$$reg);
// small:
// SHLD $dst.hi,$dst.lo,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xA5);
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xA5);
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
// SHL $dst.lo,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x4, $dst$$reg );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x4, $dst$$reg );
%}
enc_class shift_right_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x04);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x04);
// MOV $dst.lo,$dst.hi
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// CLR $dst.hi
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($dst$$reg));
// small:
// SHRD $dst.lo,$dst.hi,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xAD);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xAD);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SHR $dst.hi,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x5, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
enc_class shift_right_arith_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x05);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x05);
// MOV $dst.lo,$dst.hi
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// SAR $dst.hi,31
- emit_opcode(cbuf, 0xC1);
- emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW_ENC($dst$$reg) );
- emit_d8(cbuf, 0x1F );
+ emit_opcode(masm, 0xC1);
+ emit_rm(masm, 0x3, 7, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_d8(masm, 0x1F );
// small:
// SHRD $dst.lo,$dst.hi,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xAD);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xAD);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SAR $dst.hi,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x7, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
@@ -2342,136 +2313,135 @@ encode %{
// May leave result in FPU-TOS or FPU reg depending on opcodes
enc_class OpcReg_FPR(regFPR src) %{ // FMUL, FDIV
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $secondary, $src$$reg );
+ emit_rm(masm, 0x3, $secondary, $src$$reg );
%}
// Pop argument in FPR0 with FSTP ST(0)
enc_class PopFPU() %{
- emit_opcode( cbuf, 0xDD );
- emit_d8( cbuf, 0xD8 );
+ emit_opcode( masm, 0xDD );
+ emit_d8( masm, 0xD8 );
%}
// !!!!! equivalent to Pop_Reg_F
enc_class Pop_Reg_DPR( regDPR dst ) %{
- emit_opcode( cbuf, 0xDD ); // FSTP ST(i)
- emit_d8( cbuf, 0xD8+$dst$$reg );
+ emit_opcode( masm, 0xDD ); // FSTP ST(i)
+ emit_d8( masm, 0xD8+$dst$$reg );
%}
enc_class Push_Reg_DPR( regDPR dst ) %{
- emit_opcode( cbuf, 0xD9 );
- emit_d8( cbuf, 0xC0-1+$dst$$reg ); // FLD ST(i-1)
+ emit_opcode( masm, 0xD9 );
+ emit_d8( masm, 0xC0-1+$dst$$reg ); // FLD ST(i-1)
%}
enc_class strictfp_bias1( regDPR dst ) %{
- emit_opcode( cbuf, 0xDB ); // FLD m80real
- emit_opcode( cbuf, 0x2D );
- emit_d32( cbuf, (int)StubRoutines::x86::addr_fpu_subnormal_bias1() );
- emit_opcode( cbuf, 0xDE ); // FMULP ST(dst), ST0
- emit_opcode( cbuf, 0xC8+$dst$$reg );
+ emit_opcode( masm, 0xDB ); // FLD m80real
+ emit_opcode( masm, 0x2D );
+ emit_d32( masm, (int)StubRoutines::x86::addr_fpu_subnormal_bias1() );
+ emit_opcode( masm, 0xDE ); // FMULP ST(dst), ST0
+ emit_opcode( masm, 0xC8+$dst$$reg );
%}
enc_class strictfp_bias2( regDPR dst ) %{
- emit_opcode( cbuf, 0xDB ); // FLD m80real
- emit_opcode( cbuf, 0x2D );
- emit_d32( cbuf, (int)StubRoutines::x86::addr_fpu_subnormal_bias2() );
- emit_opcode( cbuf, 0xDE ); // FMULP ST(dst), ST0
- emit_opcode( cbuf, 0xC8+$dst$$reg );
+ emit_opcode( masm, 0xDB ); // FLD m80real
+ emit_opcode( masm, 0x2D );
+ emit_d32( masm, (int)StubRoutines::x86::addr_fpu_subnormal_bias2() );
+ emit_opcode( masm, 0xDE ); // FMULP ST(dst), ST0
+ emit_opcode( masm, 0xC8+$dst$$reg );
%}
// Special case for moving an integer register to a stack slot.
enc_class OpcPRegSS( stackSlotI dst, rRegI src ) %{ // RegSS
- store_to_stackslot( cbuf, $primary, $src$$reg, $dst$$disp );
+ store_to_stackslot( masm, $primary, $src$$reg, $dst$$disp );
%}
// Special case for moving a register to a stack slot.
enc_class RegSS( stackSlotI dst, rRegI src ) %{ // RegSS
// Opcode already emitted
- emit_rm( cbuf, 0x02, $src$$reg, ESP_enc ); // R/M byte
- emit_rm( cbuf, 0x00, ESP_enc, ESP_enc); // SIB byte
- emit_d32(cbuf, $dst$$disp); // Displacement
+ emit_rm( masm, 0x02, $src$$reg, ESP_enc ); // R/M byte
+ emit_rm( masm, 0x00, ESP_enc, ESP_enc); // SIB byte
+ emit_d32(masm, $dst$$disp); // Displacement
%}
// Push the integer in stackSlot 'src' onto FP-stack
enc_class Push_Mem_I( memory src ) %{ // FILD [ESP+src]
- store_to_stackslot( cbuf, $primary, $secondary, $src$$disp );
+ store_to_stackslot( masm, $primary, $secondary, $src$$disp );
%}
// Push FPU's TOS float to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_FPR( stackSlotF dst ) %{ // FSTP_S [ESP+dst]
- store_to_stackslot( cbuf, 0xD9, 0x03, $dst$$disp );
+ store_to_stackslot( masm, 0xD9, 0x03, $dst$$disp );
%}
// Same as Pop_Mem_F except for opcode
// Push FPU's TOS double to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_DPR( stackSlotD dst ) %{ // FSTP_D [ESP+dst]
- store_to_stackslot( cbuf, 0xDD, 0x03, $dst$$disp );
+ store_to_stackslot( masm, 0xDD, 0x03, $dst$$disp );
%}
enc_class Pop_Reg_FPR( regFPR dst ) %{
- emit_opcode( cbuf, 0xDD ); // FSTP ST(i)
- emit_d8( cbuf, 0xD8+$dst$$reg );
+ emit_opcode( masm, 0xDD ); // FSTP ST(i)
+ emit_d8( masm, 0xD8+$dst$$reg );
%}
enc_class Push_Reg_FPR( regFPR dst ) %{
- emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1)
- emit_d8( cbuf, 0xC0-1+$dst$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(i-1)
+ emit_d8( masm, 0xC0-1+$dst$$reg );
%}
// Push FPU's float to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_Reg_FPR( stackSlotF dst, regFPR src ) %{
int pop = 0x02;
if ($src$$reg != FPR1L_enc) {
- emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(i-1)
+ emit_d8( masm, 0xC0-1+$src$$reg );
pop = 0x03;
}
- store_to_stackslot( cbuf, 0xD9, pop, $dst$$disp ); // FST_S [ESP+dst]
+ store_to_stackslot( masm, 0xD9, pop, $dst$$disp ); // FST
_S [ESP+dst]
%}
// Push FPU's double to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_Reg_DPR( stackSlotD dst, regDPR src ) %{
int pop = 0x02;
if ($src$$reg != FPR1L_enc) {
- emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(i-1)
+ emit_d8( masm, 0xC0-1+$src$$reg );
pop = 0x03;
}
- store_to_stackslot( cbuf, 0xDD, pop, $dst$$disp ); // FST
_D [ESP+dst]
+ store_to_stackslot( masm, 0xDD, pop, $dst$$disp ); // FST
_D [ESP+dst]
%}
// Push FPU's double to a FPU-stack-slot, and pop FPU-stack
enc_class Pop_Reg_Reg_DPR( regDPR dst, regFPR src ) %{
int pop = 0xD0 - 1; // -1 since we skip FLD
if ($src$$reg != FPR1L_enc) {
- emit_opcode( cbuf, 0xD9 ); // FLD ST(src-1)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(src-1)
+ emit_d8( masm, 0xC0-1+$src$$reg );
pop = 0xD8;
}
- emit_opcode( cbuf, 0xDD );
- emit_d8( cbuf, pop+$dst$$reg ); // FST
ST(i)
+ emit_opcode( masm, 0xDD );
+ emit_d8( masm, pop+$dst$$reg ); // FST
ST(i)
%}
enc_class Push_Reg_Mod_DPR( regDPR dst, regDPR src) %{
// load dst in FPR0
- emit_opcode( cbuf, 0xD9 );
- emit_d8( cbuf, 0xC0-1+$dst$$reg );
+ emit_opcode( masm, 0xD9 );
+ emit_d8( masm, 0xC0-1+$dst$$reg );
if ($src$$reg != FPR1L_enc) {
// fincstp
- emit_opcode (cbuf, 0xD9);
- emit_opcode (cbuf, 0xF7);
+ emit_opcode (masm, 0xD9);
+ emit_opcode (masm, 0xF7);
// swap src with FPR1:
// FXCH FPR1 with src
- emit_opcode(cbuf, 0xD9);
- emit_d8(cbuf, 0xC8-1+$src$$reg );
+ emit_opcode(masm, 0xD9);
+ emit_d8(masm, 0xC8-1+$src$$reg );
// fdecstp
- emit_opcode (cbuf, 0xD9);
- emit_opcode (cbuf, 0xF6);
+ emit_opcode (masm, 0xD9);
+ emit_opcode (masm, 0xF6);
}
%}
enc_class Push_ModD_encoding(regD src0, regD src1) %{
- MacroAssembler _masm(&cbuf);
__ subptr(rsp, 8);
__ movdbl(Address(rsp, 0), $src1$$XMMRegister);
__ fld_d(Address(rsp, 0));
@@ -2480,7 +2450,6 @@ encode %{
%}
enc_class Push_ModF_encoding(regF src0, regF src1) %{
- MacroAssembler _masm(&cbuf);
__ subptr(rsp, 4);
__ movflt(Address(rsp, 0), $src1$$XMMRegister);
__ fld_s(Address(rsp, 0));
@@ -2489,38 +2458,32 @@ encode %{
%}
enc_class Push_ResultD(regD dst) %{
- MacroAssembler _masm(&cbuf);
__ fstp_d(Address(rsp, 0));
__ movdbl($dst$$XMMRegister, Address(rsp, 0));
__ addptr(rsp, 8);
%}
enc_class Push_ResultF(regF dst, immI d8) %{
- MacroAssembler _masm(&cbuf);
__ fstp_s(Address(rsp, 0));
__ movflt($dst$$XMMRegister, Address(rsp, 0));
__ addptr(rsp, $d8$$constant);
%}
enc_class Push_SrcD(regD src) %{
- MacroAssembler _masm(&cbuf);
__ subptr(rsp, 8);
__ movdbl(Address(rsp, 0), $src$$XMMRegister);
__ fld_d(Address(rsp, 0));
%}
enc_class push_stack_temp_qword() %{
- MacroAssembler _masm(&cbuf);
__ subptr(rsp, 8);
%}
enc_class pop_stack_temp_qword() %{
- MacroAssembler _masm(&cbuf);
__ addptr(rsp, 8);
%}
enc_class push_xmm_to_fpr1(regD src) %{
- MacroAssembler _masm(&cbuf);
__ movdbl(Address(rsp, 0), $src$$XMMRegister);
__ fld_d(Address(rsp, 0));
%}
@@ -2528,90 +2491,86 @@ encode %{
enc_class Push_Result_Mod_DPR( regDPR src) %{
if ($src$$reg != FPR1L_enc) {
// fincstp
- emit_opcode (cbuf, 0xD9);
- emit_opcode (cbuf, 0xF7);
+ emit_opcode (masm, 0xD9);
+ emit_opcode (masm, 0xF7);
// FXCH FPR1 with src
- emit_opcode(cbuf, 0xD9);
- emit_d8(cbuf, 0xC8-1+$src$$reg );
+ emit_opcode(masm, 0xD9);
+ emit_d8(masm, 0xC8-1+$src$$reg );
// fdecstp
- emit_opcode (cbuf, 0xD9);
- emit_opcode (cbuf, 0xF6);
+ emit_opcode (masm, 0xD9);
+ emit_opcode (masm, 0xF6);
}
- // // following asm replaced with Pop_Reg_F or Pop_Mem_F
- // // FSTP FPR$dst$$reg
- // emit_opcode( cbuf, 0xDD );
- // emit_d8( cbuf, 0xD8+$dst$$reg );
%}
enc_class fnstsw_sahf_skip_parity() %{
// fnstsw ax
- emit_opcode( cbuf, 0xDF );
- emit_opcode( cbuf, 0xE0 );
+ emit_opcode( masm, 0xDF );
+ emit_opcode( masm, 0xE0 );
// sahf
- emit_opcode( cbuf, 0x9E );
+ emit_opcode( masm, 0x9E );
// jnp ::skip
- emit_opcode( cbuf, 0x7B );
- emit_opcode( cbuf, 0x05 );
+ emit_opcode( masm, 0x7B );
+ emit_opcode( masm, 0x05 );
%}
enc_class emitModDPR() %{
// fprem must be iterative
// :: loop
// fprem
- emit_opcode( cbuf, 0xD9 );
- emit_opcode( cbuf, 0xF8 );
+ emit_opcode( masm, 0xD9 );
+ emit_opcode( masm, 0xF8 );
// wait
- emit_opcode( cbuf, 0x9b );
+ emit_opcode( masm, 0x9b );
// fnstsw ax
- emit_opcode( cbuf, 0xDF );
- emit_opcode( cbuf, 0xE0 );
+ emit_opcode( masm, 0xDF );
+ emit_opcode( masm, 0xE0 );
// sahf
- emit_opcode( cbuf, 0x9E );
+ emit_opcode( masm, 0x9E );
// jp ::loop
- emit_opcode( cbuf, 0x0F );
- emit_opcode( cbuf, 0x8A );
- emit_opcode( cbuf, 0xF4 );
- emit_opcode( cbuf, 0xFF );
- emit_opcode( cbuf, 0xFF );
- emit_opcode( cbuf, 0xFF );
+ emit_opcode( masm, 0x0F );
+ emit_opcode( masm, 0x8A );
+ emit_opcode( masm, 0xF4 );
+ emit_opcode( masm, 0xFF );
+ emit_opcode( masm, 0xFF );
+ emit_opcode( masm, 0xFF );
%}
enc_class fpu_flags() %{
// fnstsw_ax
- emit_opcode( cbuf, 0xDF);
- emit_opcode( cbuf, 0xE0);
+ emit_opcode( masm, 0xDF);
+ emit_opcode( masm, 0xE0);
// test ax,0x0400
- emit_opcode( cbuf, 0x66 ); // operand-size prefix for 16-bit immediate
- emit_opcode( cbuf, 0xA9 );
- emit_d16 ( cbuf, 0x0400 );
+ emit_opcode( masm, 0x66 ); // operand-size prefix for 16-bit immediate
+ emit_opcode( masm, 0xA9 );
+ emit_d16 ( masm, 0x0400 );
// // // This sequence works, but stalls for 12-16 cycles on PPro
// // test rax,0x0400
- // emit_opcode( cbuf, 0xA9 );
- // emit_d32 ( cbuf, 0x00000400 );
+ // emit_opcode( masm, 0xA9 );
+ // emit_d32 ( masm, 0x00000400 );
//
// jz exit (no unordered comparison)
- emit_opcode( cbuf, 0x74 );
- emit_d8 ( cbuf, 0x02 );
+ emit_opcode( masm, 0x74 );
+ emit_d8 ( masm, 0x02 );
// mov ah,1 - treat as LT case (set carry flag)
- emit_opcode( cbuf, 0xB4 );
- emit_d8 ( cbuf, 0x01 );
+ emit_opcode( masm, 0xB4 );
+ emit_d8 ( masm, 0x01 );
// sahf
- emit_opcode( cbuf, 0x9E);
+ emit_opcode( masm, 0x9E);
%}
enc_class cmpF_P6_fixup() %{
// Fixup the integer flags in case comparison involved a NaN
//
// JNP exit (no unordered comparison, P-flag is set by NaN)
- emit_opcode( cbuf, 0x7B );
- emit_d8 ( cbuf, 0x03 );
+ emit_opcode( masm, 0x7B );
+ emit_d8 ( masm, 0x03 );
// MOV AH,1 - treat as LT case (set carry flag)
- emit_opcode( cbuf, 0xB4 );
- emit_d8 ( cbuf, 0x01 );
+ emit_opcode( masm, 0xB4 );
+ emit_d8 ( masm, 0x01 );
// SAHF
- emit_opcode( cbuf, 0x9E);
+ emit_opcode( masm, 0x9E);
// NOP // target for branch to avoid branch to branch
- emit_opcode( cbuf, 0x90);
+ emit_opcode( masm, 0x90);
%}
// fnstsw_ax();
@@ -2631,31 +2590,31 @@ encode %{
enc_class CmpF_Result(rRegI dst) %{
// fnstsw_ax();
- emit_opcode( cbuf, 0xDF);
- emit_opcode( cbuf, 0xE0);
+ emit_opcode( masm, 0xDF);
+ emit_opcode( masm, 0xE0);
// sahf
- emit_opcode( cbuf, 0x9E);
+ emit_opcode( masm, 0x9E);
// movl(dst, nan_result);
- emit_opcode( cbuf, 0xB8 + $dst$$reg);
- emit_d32( cbuf, -1 );
+ emit_opcode( masm, 0xB8 + $dst$$reg);
+ emit_d32( masm, -1 );
// jcc(Assembler::parity, exit);
- emit_opcode( cbuf, 0x7A );
- emit_d8 ( cbuf, 0x13 );
+ emit_opcode( masm, 0x7A );
+ emit_d8 ( masm, 0x13 );
// movl(dst, less_result);
- emit_opcode( cbuf, 0xB8 + $dst$$reg);
- emit_d32( cbuf, -1 );
+ emit_opcode( masm, 0xB8 + $dst$$reg);
+ emit_d32( masm, -1 );
// jcc(Assembler::below, exit);
- emit_opcode( cbuf, 0x72 );
- emit_d8 ( cbuf, 0x0C );
+ emit_opcode( masm, 0x72 );
+ emit_d8 ( masm, 0x0C );
// movl(dst, equal_result);
- emit_opcode( cbuf, 0xB8 + $dst$$reg);
- emit_d32( cbuf, 0 );
+ emit_opcode( masm, 0xB8 + $dst$$reg);
+ emit_d32( masm, 0 );
// jcc(Assembler::equal, exit);
- emit_opcode( cbuf, 0x74 );
- emit_d8 ( cbuf, 0x05 );
+ emit_opcode( masm, 0x74 );
+ emit_d8 ( masm, 0x05 );
// movl(dst, greater_result);
- emit_opcode( cbuf, 0xB8 + $dst$$reg);
- emit_d32( cbuf, 1 );
+ emit_opcode( masm, 0xB8 + $dst$$reg);
+ emit_d32( masm, 1 );
%}
@@ -2663,14 +2622,14 @@ encode %{
// BROKEN! Do Not use as-is
enc_class cmpl_test( eRegL src1, eRegL src2 ) %{
// CMP $src1.hi,$src2.hi
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
// JNE,s done
- emit_opcode(cbuf,0x75);
- emit_d8(cbuf, 2 );
+ emit_opcode(masm,0x75);
+ emit_d8(masm, 2 );
// CMP $src1.lo,$src2.lo
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, $src1$$reg, $src2$$reg );
// done:
%}
@@ -2678,223 +2637,223 @@ encode %{
// mov $dst.lo,$src
int dst_encoding = $dst$$reg;
int src_encoding = $src$$reg;
- encode_Copy( cbuf, dst_encoding , src_encoding );
+ encode_Copy( masm, dst_encoding , src_encoding );
// mov $dst.hi,$src
- encode_Copy( cbuf, HIGH_FROM_LOW_ENC(dst_encoding), src_encoding );
+ encode_Copy( masm, HIGH_FROM_LOW_ENC(dst_encoding), src_encoding );
// sar $dst.hi,31
- emit_opcode( cbuf, 0xC1 );
- emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW_ENC(dst_encoding) );
- emit_d8(cbuf, 0x1F );
+ emit_opcode( masm, 0xC1 );
+ emit_rm(masm, 0x3, 7, HIGH_FROM_LOW_ENC(dst_encoding) );
+ emit_d8(masm, 0x1F );
%}
enc_class convert_long_double( eRegL src ) %{
// push $src.hi
- emit_opcode(cbuf, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
+ emit_opcode(masm, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
// push $src.lo
- emit_opcode(cbuf, 0x50+$src$$reg );
+ emit_opcode(masm, 0x50+$src$$reg );
// fild 64-bits at [SP]
- emit_opcode(cbuf,0xdf);
- emit_d8(cbuf, 0x6C);
- emit_d8(cbuf, 0x24);
- emit_d8(cbuf, 0x00);
+ emit_opcode(masm,0xdf);
+ emit_d8(masm, 0x6C);
+ emit_d8(masm, 0x24);
+ emit_d8(masm, 0x00);
// pop stack
- emit_opcode(cbuf, 0x83); // add SP, #8
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d8(cbuf, 0x8);
+ emit_opcode(masm, 0x83); // add SP, #8
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d8(masm, 0x8);
%}
enc_class multiply_con_and_shift_high( eDXRegI dst, nadxRegI src1, eADXRegL_low_only src2, immI_32_63 cnt, eFlagsReg cr ) %{
// IMUL EDX:EAX,$src1
- emit_opcode( cbuf, 0xF7 );
- emit_rm( cbuf, 0x3, 0x5, $src1$$reg );
+ emit_opcode( masm, 0xF7 );
+ emit_rm( masm, 0x3, 0x5, $src1$$reg );
// SAR EDX,$cnt-32
int shift_count = ((int)$cnt$$constant) - 32;
if (shift_count > 0) {
- emit_opcode(cbuf, 0xC1);
- emit_rm(cbuf, 0x3, 7, $dst$$reg );
- emit_d8(cbuf, shift_count);
+ emit_opcode(masm, 0xC1);
+ emit_rm(masm, 0x3, 7, $dst$$reg );
+ emit_d8(masm, shift_count);
}
%}
// this version doesn't have add sp, 8
enc_class convert_long_double2( eRegL src ) %{
// push $src.hi
- emit_opcode(cbuf, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
+ emit_opcode(masm, 0x50+HIGH_FROM_LOW_ENC($src$$reg));
// push $src.lo
- emit_opcode(cbuf, 0x50+$src$$reg );
+ emit_opcode(masm, 0x50+$src$$reg );
// fild 64-bits at [SP]
- emit_opcode(cbuf,0xdf);
- emit_d8(cbuf, 0x6C);
- emit_d8(cbuf, 0x24);
- emit_d8(cbuf, 0x00);
+ emit_opcode(masm,0xdf);
+ emit_d8(masm, 0x6C);
+ emit_d8(masm, 0x24);
+ emit_d8(masm, 0x00);
%}
enc_class long_int_multiply( eADXRegL dst, nadxRegI src) %{
// Basic idea: long = (long)int * (long)int
// IMUL EDX:EAX, src
- emit_opcode( cbuf, 0xF7 );
- emit_rm( cbuf, 0x3, 0x5, $src$$reg);
+ emit_opcode( masm, 0xF7 );
+ emit_rm( masm, 0x3, 0x5, $src$$reg);
%}
enc_class long_uint_multiply( eADXRegL dst, nadxRegI src) %{
// Basic Idea: long = (int & 0xffffffffL) * (int & 0xffffffffL)
// MUL EDX:EAX, src
- emit_opcode( cbuf, 0xF7 );
- emit_rm( cbuf, 0x3, 0x4, $src$$reg);
+ emit_opcode( masm, 0xF7 );
+ emit_rm( masm, 0x3, 0x4, $src$$reg);
%}
enc_class long_multiply( eADXRegL dst, eRegL src, rRegI tmp ) %{
// Basic idea: lo(result) = lo(x_lo * y_lo)
// hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
// MOV $tmp,$src.lo
- encode_Copy( cbuf, $tmp$$reg, $src$$reg );
+ encode_Copy( masm, $tmp$$reg, $src$$reg );
// IMUL $tmp,EDX
- emit_opcode( cbuf, 0x0F );
- emit_opcode( cbuf, 0xAF );
- emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x0F );
+ emit_opcode( masm, 0xAF );
+ emit_rm( masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// MOV EDX,$src.hi
- encode_Copy( cbuf, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg) );
+ encode_Copy( masm, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg) );
// IMUL EDX,EAX
- emit_opcode( cbuf, 0x0F );
- emit_opcode( cbuf, 0xAF );
- emit_rm( cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
+ emit_opcode( masm, 0x0F );
+ emit_opcode( masm, 0xAF );
+ emit_rm( masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
// ADD $tmp,EDX
- emit_opcode( cbuf, 0x03 );
- emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x03 );
+ emit_rm( masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// MUL EDX:EAX,$src.lo
- emit_opcode( cbuf, 0xF7 );
- emit_rm( cbuf, 0x3, 0x4, $src$$reg );
+ emit_opcode( masm, 0xF7 );
+ emit_rm( masm, 0x3, 0x4, $src$$reg );
// ADD EDX,ESI
- emit_opcode( cbuf, 0x03 );
- emit_rm( cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $tmp$$reg );
+ emit_opcode( masm, 0x03 );
+ emit_rm( masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $tmp$$reg );
%}
enc_class long_multiply_con( eADXRegL dst, immL_127 src, rRegI tmp ) %{
// Basic idea: lo(result) = lo(src * y_lo)
// hi(result) = hi(src * y_lo) + lo(src * y_hi)
// IMUL $tmp,EDX,$src
- emit_opcode( cbuf, 0x6B );
- emit_rm( cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
- emit_d8( cbuf, (int)$src$$constant );
+ emit_opcode( masm, 0x6B );
+ emit_rm( masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_d8( masm, (int)$src$$constant );
// MOV EDX,$src
- emit_opcode(cbuf, 0xB8 + EDX_enc);
- emit_d32( cbuf, (int)$src$$constant );
+ emit_opcode(masm, 0xB8 + EDX_enc);
+ emit_d32( masm, (int)$src$$constant );
// MUL EDX:EAX,EDX
- emit_opcode( cbuf, 0xF7 );
- emit_rm( cbuf, 0x3, 0x4, EDX_enc );
+ emit_opcode( masm, 0xF7 );
+ emit_rm( masm, 0x3, 0x4, EDX_enc );
// ADD EDX,ESI
- emit_opcode( cbuf, 0x03 );
- emit_rm( cbuf, 0x3, EDX_enc, $tmp$$reg );
+ emit_opcode( masm, 0x03 );
+ emit_rm( masm, 0x3, EDX_enc, $tmp$$reg );
%}
enc_class long_div( eRegL src1, eRegL src2 ) %{
// PUSH src1.hi
- emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
+ emit_opcode(masm, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
// PUSH src1.lo
- emit_opcode(cbuf, 0x50+$src1$$reg );
+ emit_opcode(masm, 0x50+$src1$$reg );
// PUSH src2.hi
- emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
+ emit_opcode(masm, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
// PUSH src2.lo
- emit_opcode(cbuf, 0x50+$src2$$reg );
+ emit_opcode(masm, 0x50+$src2$$reg );
// CALL directly to the runtime
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
- emit_opcode(cbuf,0xE8); // Call into runtime
- emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ set_inst_mark();
+ emit_opcode(masm,0xE8); // Call into runtime
+ emit_d32_reloc(masm, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - __ pc()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
// Restore stack
- emit_opcode(cbuf, 0x83); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d8(cbuf, 4*4);
+ emit_opcode(masm, 0x83); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d8(masm, 4*4);
%}
enc_class long_mod( eRegL src1, eRegL src2 ) %{
// PUSH src1.hi
- emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
+ emit_opcode(masm, HIGH_FROM_LOW_ENC(0x50+$src1$$reg) );
// PUSH src1.lo
- emit_opcode(cbuf, 0x50+$src1$$reg );
+ emit_opcode(masm, 0x50+$src1$$reg );
// PUSH src2.hi
- emit_opcode(cbuf, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
+ emit_opcode(masm, HIGH_FROM_LOW_ENC(0x50+$src2$$reg) );
// PUSH src2.lo
- emit_opcode(cbuf, 0x50+$src2$$reg );
+ emit_opcode(masm, 0x50+$src2$$reg );
// CALL directly to the runtime
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
- emit_opcode(cbuf,0xE8); // Call into runtime
- emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ set_inst_mark();
+ emit_opcode(masm,0xE8); // Call into runtime
+ emit_d32_reloc(masm, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - __ pc()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
// Restore stack
- emit_opcode(cbuf, 0x83); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d8(cbuf, 4*4);
+ emit_opcode(masm, 0x83); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d8(masm, 4*4);
%}
enc_class long_cmp_flags0( eRegL src, rRegI tmp ) %{
// MOV $tmp,$src.lo
- emit_opcode(cbuf, 0x8B);
- emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg);
+ emit_opcode(masm, 0x8B);
+ emit_rm(masm, 0x3, $tmp$$reg, $src$$reg);
// OR $tmp,$src.hi
- emit_opcode(cbuf, 0x0B);
- emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg));
+ emit_opcode(masm, 0x0B);
+ emit_rm(masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class long_cmp_flags1( eRegL src1, eRegL src2 ) %{
// CMP $src1.lo,$src2.lo
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, $src1$$reg, $src2$$reg );
// JNE,s skip
- emit_cc(cbuf, 0x70, 0x5);
- emit_d8(cbuf,2);
+ emit_cc(masm, 0x70, 0x5);
+ emit_d8(masm,2);
// CMP $src1.hi,$src2.hi
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($src1$$reg), HIGH_FROM_LOW_ENC($src2$$reg) );
%}
enc_class long_cmp_flags2( eRegL src1, eRegL src2, rRegI tmp ) %{
// CMP $src1.lo,$src2.lo\t! Long compare; set flags for low bits
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, $src1$$reg, $src2$$reg );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, $src1$$reg, $src2$$reg );
// MOV $tmp,$src1.hi
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src1$$reg) );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src1$$reg) );
// SBB $tmp,$src2.hi\t! Compute flags for long compare
- emit_opcode( cbuf, 0x1B );
- emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src2$$reg) );
+ emit_opcode( masm, 0x1B );
+ emit_rm(masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src2$$reg) );
%}
enc_class long_cmp_flags3( eRegL src, rRegI tmp ) %{
// XOR $tmp,$tmp
- emit_opcode(cbuf,0x33); // XOR
- emit_rm(cbuf,0x3, $tmp$$reg, $tmp$$reg);
+ emit_opcode(masm,0x33); // XOR
+ emit_rm(masm,0x3, $tmp$$reg, $tmp$$reg);
// CMP $tmp,$src.lo
- emit_opcode( cbuf, 0x3B );
- emit_rm(cbuf, 0x3, $tmp$$reg, $src$$reg );
+ emit_opcode( masm, 0x3B );
+ emit_rm(masm, 0x3, $tmp$$reg, $src$$reg );
// SBB $tmp,$src.hi
- emit_opcode( cbuf, 0x1B );
- emit_rm(cbuf, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg) );
+ emit_opcode( masm, 0x1B );
+ emit_rm(masm, 0x3, $tmp$$reg, HIGH_FROM_LOW_ENC($src$$reg) );
%}
// Sniff, sniff... smells like Gnu Superoptimizer
enc_class neg_long( eRegL dst ) %{
- emit_opcode(cbuf,0xF7); // NEG hi
- emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
- emit_opcode(cbuf,0xF7); // NEG lo
- emit_rm (cbuf,0x3, 0x3, $dst$$reg );
- emit_opcode(cbuf,0x83); // SBB hi,0
- emit_rm (cbuf,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
- emit_d8 (cbuf,0 );
+ emit_opcode(masm,0xF7); // NEG hi
+ emit_rm (masm,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode(masm,0xF7); // NEG lo
+ emit_rm (masm,0x3, 0x3, $dst$$reg );
+ emit_opcode(masm,0x83); // SBB hi,0
+ emit_rm (masm,0x3, 0x3, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_d8 (masm,0 );
%}
enc_class enc_pop_rdx() %{
- emit_opcode(cbuf,0x5A);
+ emit_opcode(masm,0x5A);
%}
enc_class enc_rethrow() %{
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
- emit_opcode(cbuf, 0xE9); // jmp entry
- emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4,
+ __ set_inst_mark();
+ emit_opcode(masm, 0xE9); // jmp entry
+ emit_d32_reloc(masm, (int)OptoRuntime::rethrow_stub() - ((int)__ pc())-4,
runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
%}
@@ -2912,83 +2871,83 @@ encode %{
// invalid-op exceptions hanging. We would have to clear them before
// enabling them and that is more expensive than just testing for the
// invalid value Intel stores down in the corner cases.
- emit_opcode(cbuf,0xD9); // FLDCW trunc
- emit_opcode(cbuf,0x2D);
- emit_d32(cbuf,(int)StubRoutines::x86::addr_fpu_cntrl_wrd_trunc());
+ emit_opcode(masm,0xD9); // FLDCW trunc
+ emit_opcode(masm,0x2D);
+ emit_d32(masm,(int)StubRoutines::x86::addr_fpu_cntrl_wrd_trunc());
// Allocate a word
- emit_opcode(cbuf,0x83); // SUB ESP,4
- emit_opcode(cbuf,0xEC);
- emit_d8(cbuf,0x04);
+ emit_opcode(masm,0x83); // SUB ESP,4
+ emit_opcode(masm,0xEC);
+ emit_d8(masm,0x04);
// Encoding assumes a double has been pushed into FPR0.
// Store down the double as an int, popping the FPU stack
- emit_opcode(cbuf,0xDB); // FISTP [ESP]
- emit_opcode(cbuf,0x1C);
- emit_d8(cbuf,0x24);
+ emit_opcode(masm,0xDB); // FISTP [ESP]
+ emit_opcode(masm,0x1C);
+ emit_d8(masm,0x24);
// Restore the rounding mode; mask the exception
- emit_opcode(cbuf,0xD9); // FLDCW std/24-bit mode
- emit_opcode(cbuf,0x2D);
- emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
+ emit_opcode(masm,0xD9); // FLDCW std/24-bit mode
+ emit_opcode(masm,0x2D);
+ emit_d32( masm, Compile::current()->in_24_bit_fp_mode()
? (int)StubRoutines::x86::addr_fpu_cntrl_wrd_24()
: (int)StubRoutines::x86::addr_fpu_cntrl_wrd_std());
// Load the converted int; adjust CPU stack
- emit_opcode(cbuf,0x58); // POP EAX
- emit_opcode(cbuf,0x3D); // CMP EAX,imm
- emit_d32 (cbuf,0x80000000); // 0x80000000
- emit_opcode(cbuf,0x75); // JNE around_slow_call
- emit_d8 (cbuf,0x07); // Size of slow_call
+ emit_opcode(masm,0x58); // POP EAX
+ emit_opcode(masm,0x3D); // CMP EAX,imm
+ emit_d32 (masm,0x80000000); // 0x80000000
+ emit_opcode(masm,0x75); // JNE around_slow_call
+ emit_d8 (masm,0x07); // Size of slow_call
// Push src onto stack slow-path
- emit_opcode(cbuf,0xD9 ); // FLD ST(i)
- emit_d8 (cbuf,0xC0-1+$src$$reg );
+ emit_opcode(masm,0xD9 ); // FLD ST(i)
+ emit_d8 (masm,0xC0-1+$src$$reg );
// CALL directly to the runtime
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
- emit_opcode(cbuf,0xE8); // Call into runtime
- emit_d32_reloc(cbuf, (StubRoutines::x86::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ set_inst_mark();
+ emit_opcode(masm,0xE8); // Call into runtime
+ emit_d32_reloc(masm, (StubRoutines::x86::d2i_wrapper() - __ pc()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
// Carry on here...
%}
enc_class DPR2L_encoding( regDPR src ) %{
- emit_opcode(cbuf,0xD9); // FLDCW trunc
- emit_opcode(cbuf,0x2D);
- emit_d32(cbuf,(int)StubRoutines::x86::addr_fpu_cntrl_wrd_trunc());
+ emit_opcode(masm,0xD9); // FLDCW trunc
+ emit_opcode(masm,0x2D);
+ emit_d32(masm,(int)StubRoutines::x86::addr_fpu_cntrl_wrd_trunc());
// Allocate a word
- emit_opcode(cbuf,0x83); // SUB ESP,8
- emit_opcode(cbuf,0xEC);
- emit_d8(cbuf,0x08);
+ emit_opcode(masm,0x83); // SUB ESP,8
+ emit_opcode(masm,0xEC);
+ emit_d8(masm,0x08);
// Encoding assumes a double has been pushed into FPR0.
// Store down the double as a long, popping the FPU stack
- emit_opcode(cbuf,0xDF); // FISTP [ESP]
- emit_opcode(cbuf,0x3C);
- emit_d8(cbuf,0x24);
+ emit_opcode(masm,0xDF); // FISTP [ESP]
+ emit_opcode(masm,0x3C);
+ emit_d8(masm,0x24);
// Restore the rounding mode; mask the exception
- emit_opcode(cbuf,0xD9); // FLDCW std/24-bit mode
- emit_opcode(cbuf,0x2D);
- emit_d32( cbuf, Compile::current()->in_24_bit_fp_mode()
+ emit_opcode(masm,0xD9); // FLDCW std/24-bit mode
+ emit_opcode(masm,0x2D);
+ emit_d32( masm, Compile::current()->in_24_bit_fp_mode()
? (int)StubRoutines::x86::addr_fpu_cntrl_wrd_24()
: (int)StubRoutines::x86::addr_fpu_cntrl_wrd_std());
// Load the converted int; adjust CPU stack
- emit_opcode(cbuf,0x58); // POP EAX
- emit_opcode(cbuf,0x5A); // POP EDX
- emit_opcode(cbuf,0x81); // CMP EDX,imm
- emit_d8 (cbuf,0xFA); // rdx
- emit_d32 (cbuf,0x80000000); // 0x80000000
- emit_opcode(cbuf,0x75); // JNE around_slow_call
- emit_d8 (cbuf,0x07+4); // Size of slow_call
- emit_opcode(cbuf,0x85); // TEST EAX,EAX
- emit_opcode(cbuf,0xC0); // 2/rax,/rax,
- emit_opcode(cbuf,0x75); // JNE around_slow_call
- emit_d8 (cbuf,0x07); // Size of slow_call
+ emit_opcode(masm,0x58); // POP EAX
+ emit_opcode(masm,0x5A); // POP EDX
+ emit_opcode(masm,0x81); // CMP EDX,imm
+ emit_d8 (masm,0xFA); // rdx
+ emit_d32 (masm,0x80000000); // 0x80000000
+ emit_opcode(masm,0x75); // JNE around_slow_call
+ emit_d8 (masm,0x07+4); // Size of slow_call
+ emit_opcode(masm,0x85); // TEST EAX,EAX
+ emit_opcode(masm,0xC0); // 2/rax,/rax,
+ emit_opcode(masm,0x75); // JNE around_slow_call
+ emit_d8 (masm,0x07); // Size of slow_call
// Push src onto stack slow-path
- emit_opcode(cbuf,0xD9 ); // FLD ST(i)
- emit_d8 (cbuf,0xC0-1+$src$$reg );
+ emit_opcode(masm,0xD9 ); // FLD ST(i)
+ emit_d8 (masm,0xC0-1+$src$$reg );
// CALL directly to the runtime
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
- emit_opcode(cbuf,0xE8); // Call into runtime
- emit_d32_reloc(cbuf, (StubRoutines::x86::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ set_inst_mark();
+ emit_opcode(masm,0xE8); // Call into runtime
+ emit_d32_reloc(masm, (StubRoutines::x86::d2l_wrapper() - __ pc()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
// Carry on here...
%}
@@ -2996,68 +2955,68 @@ encode %{
enc_class FMul_ST_reg( eRegFPR src1 ) %{
// Operand was loaded from memory into fp ST (stack top)
// FMUL ST,$src /* D8 C8+i */
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xC8 + $src1$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xC8 + $src1$$reg);
%}
enc_class FAdd_ST_reg( eRegFPR src2 ) %{
// FADDP ST,src2 /* D8 C0+i */
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xC0 + $src2$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xC0 + $src2$$reg);
//could use FADDP src2,fpST /* DE C0+i */
%}
enc_class FAddP_reg_ST( eRegFPR src2 ) %{
// FADDP src2,ST /* DE C0+i */
- emit_opcode(cbuf, 0xDE);
- emit_opcode(cbuf, 0xC0 + $src2$$reg);
+ emit_opcode(masm, 0xDE);
+ emit_opcode(masm, 0xC0 + $src2$$reg);
%}
enc_class subFPR_divFPR_encode( eRegFPR src1, eRegFPR src2) %{
// Operand has been loaded into fp ST (stack top)
// FSUB ST,$src1
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xE0 + $src1$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xE0 + $src1$$reg);
// FDIV
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xF0 + $src2$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xF0 + $src2$$reg);
%}
enc_class MulFAddF (eRegFPR src1, eRegFPR src2) %{
// Operand was loaded from memory into fp ST (stack top)
// FADD ST,$src /* D8 C0+i */
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xC0 + $src1$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xC0 + $src1$$reg);
// FMUL ST,src2 /* D8 C*+i */
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xC8 + $src2$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xC8 + $src2$$reg);
%}
enc_class MulFAddFreverse (eRegFPR src1, eRegFPR src2) %{
// Operand was loaded from memory into fp ST (stack top)
// FADD ST,$src /* D8 C0+i */
- emit_opcode(cbuf, 0xD8);
- emit_opcode(cbuf, 0xC0 + $src1$$reg);
+ emit_opcode(masm, 0xD8);
+ emit_opcode(masm, 0xC0 + $src1$$reg);
// FMULP src2,ST /* DE C8+i */
- emit_opcode(cbuf, 0xDE);
- emit_opcode(cbuf, 0xC8 + $src2$$reg);
+ emit_opcode(masm, 0xDE);
+ emit_opcode(masm, 0xC8 + $src2$$reg);
%}
// Atomically load the volatile long
enc_class enc_loadL_volatile( memory mem, stackSlotL dst ) %{
- emit_opcode(cbuf,0xDF);
+ emit_opcode(masm,0xDF);
int rm_byte_opcode = 0x05;
int base = $mem$$base;
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
- store_to_stackslot( cbuf, 0x0DF, 0x07, $dst$$disp );
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, disp_reloc);
+ store_to_stackslot( masm, 0x0DF, 0x07, $dst$$disp );
%}
// Volatile Store Long. Must be atomic, so move it into
@@ -3065,16 +3024,17 @@ encode %{
// target address before the store (for null-ptr checks)
// so the memory operand is used twice in the encoding.
enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{
- store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp );
- cbuf.set_insts_mark(); // Mark start of FIST in case $mem has an oop
- emit_opcode(cbuf,0xDF);
+ store_to_stackslot( masm, 0x0DF, 0x05, $src$$disp );
+ __ set_inst_mark(); // Mark start of FIST in case $mem has an oop
+ emit_opcode(masm,0xDF);
int rm_byte_opcode = 0x07;
int base = $mem$$base;
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, disp_reloc);
+ __ clear_inst_mark();
%}
%}
@@ -5754,7 +5714,7 @@ instruct loadRange(rRegI dst, memory mem) %{
ins_cost(125);
format %{ "MOV $dst,$mem" %}
opcode(0x8B);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_mem );
%}
@@ -5766,7 +5726,7 @@ instruct loadP(eRegP dst, memory mem) %{
ins_cost(125);
format %{ "MOV $dst,$mem" %}
opcode(0x8B);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_mem );
%}
@@ -5777,7 +5737,7 @@ instruct loadKlass(eRegP dst, memory mem) %{
ins_cost(125);
format %{ "MOV $dst,$mem" %}
opcode(0x8B);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_mem );
%}
@@ -5790,8 +5750,8 @@ instruct loadDPR(regDPR dst, memory mem) %{
format %{ "FLD_D ST,$mem\n\t"
"FSTP $dst" %}
opcode(0xDD); /* DD /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem),
- Pop_Reg_DPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem),
+ Pop_Reg_DPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -5840,8 +5800,8 @@ instruct loadFPR(regFPR dst, memory mem) %{
format %{ "FLD_S ST,$mem\n\t"
"FSTP $dst" %}
opcode(0xD9); /* D9 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem),
- Pop_Reg_FPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem),
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -5852,7 +5812,7 @@ instruct leaP8(eRegP dst, indOffset8 mem) %{
ins_cost(110);
format %{ "LEA $dst,$mem" %}
opcode(0x8D);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_reg_fat );
%}
@@ -5862,7 +5822,7 @@ instruct leaP32(eRegP dst, indOffset32 mem) %{
ins_cost(110);
format %{ "LEA $dst,$mem" %}
opcode(0x8D);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_reg_fat );
%}
@@ -5872,7 +5832,7 @@ instruct leaPIdxOff(eRegP dst, indIndexOffset mem) %{
ins_cost(110);
format %{ "LEA $dst,$mem" %}
opcode(0x8D);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_reg_fat );
%}
@@ -5882,7 +5842,7 @@ instruct leaPIdxScale(eRegP dst, indIndexScale mem) %{
ins_cost(110);
format %{ "LEA $dst,$mem" %}
opcode(0x8D);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_reg_fat );
%}
@@ -5892,7 +5852,7 @@ instruct leaPIdxScaleOff(eRegP dst, indIndexScaleOffset mem) %{
ins_cost(110);
format %{ "LEA $dst,$mem" %}
opcode(0x8D);
- ins_encode( OpcP, RegMem(dst,mem));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,mem), ClearInstMark);
ins_pipe( ialu_reg_reg_fat );
%}
@@ -5901,7 +5861,7 @@ instruct loadConI(rRegI dst, immI src) %{
match(Set dst src);
format %{ "MOV $dst,$src" %}
- ins_encode( LdImmI(dst, src) );
+ ins_encode( SetInstMark, LdImmI(dst, src), ClearInstMark );
ins_pipe( ialu_reg_fat );
%}
@@ -5922,7 +5882,7 @@ instruct loadConP(eRegP dst, immP src) %{
format %{ "MOV $dst,$src" %}
opcode(0xB8); /* + rd */
- ins_encode( LdImmP(dst, src) );
+ ins_encode( SetInstMark, LdImmP(dst, src), ClearInstMark );
ins_pipe( ialu_reg_fat );
%}
@@ -6080,7 +6040,7 @@ instruct loadSSI(rRegI dst, stackSlotI src) %{
format %{ "MOV $dst,$src" %}
opcode(0x8B);
- ins_encode( OpcP, RegMem(dst,src));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,src), ClearInstMark);
ins_pipe( ialu_reg_mem );
%}
@@ -6091,7 +6051,7 @@ instruct loadSSL(eRegL dst, stackSlotL src) %{
format %{ "MOV $dst,$src.lo\n\t"
"MOV $dst+4,$src.hi" %}
opcode(0x8B, 0x8B);
- ins_encode( OpcP, RegMem( dst, src ), OpcS, RegMem_Hi( dst, src ) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, src ), OpcS, RegMem_Hi( dst, src ), ClearInstMark );
ins_pipe( ialu_mem_long_reg );
%}
@@ -6102,7 +6062,7 @@ instruct loadSSP(eRegP dst, stackSlotP src) %{
format %{ "MOV $dst,$src" %}
opcode(0x8B);
- ins_encode( OpcP, RegMem(dst,src));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,src), ClearInstMark);
ins_pipe( ialu_reg_mem );
%}
@@ -6114,8 +6074,8 @@ instruct loadSSF(regFPR dst, stackSlotF src) %{
format %{ "FLD_S $src\n\t"
"FSTP $dst" %}
opcode(0xD9); /* D9 /0, FLD m32real */
- ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
- Pop_Reg_FPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem_no_oop(0x00,src),
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -6127,8 +6087,8 @@ instruct loadSSD(regDPR dst, stackSlotD src) %{
format %{ "FLD_D $src\n\t"
"FSTP $dst" %}
opcode(0xDD); /* DD /0, FLD m64real */
- ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
- Pop_Reg_DPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem_no_oop(0x00,src),
+ Pop_Reg_DPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -6202,7 +6162,7 @@ instruct storeB(memory mem, xRegI src) %{
ins_cost(125);
format %{ "MOV8 $mem,$src" %}
opcode(0x88);
- ins_encode( OpcP, RegMem( src, mem ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, mem ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -6213,7 +6173,7 @@ instruct storeC(memory mem, rRegI src) %{
ins_cost(125);
format %{ "MOV16 $mem,$src" %}
opcode(0x89, 0x66);
- ins_encode( OpcS, OpcP, RegMem( src, mem ) );
+ ins_encode( SetInstMark, OpcS, OpcP, RegMem( src, mem ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -6224,7 +6184,7 @@ instruct storeI(memory mem, rRegI src) %{
ins_cost(125);
format %{ "MOV $mem,$src" %}
opcode(0x89);
- ins_encode( OpcP, RegMem( src, mem ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, mem ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -6237,7 +6197,7 @@ instruct storeL(long_memory mem, eRegL src) %{
format %{ "MOV $mem,$src.lo\n\t"
"MOV $mem+4,$src.hi" %}
opcode(0x89, 0x89);
- ins_encode( OpcP, RegMem( src, mem ), OpcS, RegMem_Hi( src, mem ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, mem ), OpcS, RegMem_Hi( src, mem ), ClearInstMark );
ins_pipe( ialu_mem_long_reg );
%}
@@ -6265,7 +6225,7 @@ instruct storeL_volatile(memory mem, stackSlotL src, eFlagsReg cr ) %{
"FILD $src\n\t"
"FISTp $mem\t # 64-bit atomic volatile long store" %}
opcode(0x3B);
- ins_encode( OpcP, RegMem( EAX, mem ), enc_storeL_volatile(mem,src));
+ ins_encode( SetInstMark, OpcP, RegMem( EAX, mem ), enc_storeL_volatile(mem,src), ClearInstMark);
ins_pipe( fpu_reg_mem );
%}
@@ -6312,7 +6272,7 @@ instruct storeP(memory mem, anyRegP src) %{
ins_cost(125);
format %{ "MOV $mem,$src" %}
opcode(0x89);
- ins_encode( OpcP, RegMem( src, mem ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, mem ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -6323,7 +6283,7 @@ instruct storeImmI(memory mem, immI src) %{
ins_cost(150);
format %{ "MOV $mem,$src" %}
opcode(0xC7); /* C7 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con32(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6335,7 +6295,7 @@ instruct storeImmI16(memory mem, immI16 src) %{
ins_cost(150);
format %{ "MOV16 $mem,$src" %}
opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */
- ins_encode( SizePrefix, OpcP, RMopc_Mem(0x00,mem), Con16( src ));
+ ins_encode( SetInstMark, SizePrefix, OpcP, RMopc_Mem(0x00,mem), Con16(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6347,7 +6307,7 @@ instruct storeImmP(memory mem, immP src) %{
ins_cost(150);
format %{ "MOV $mem,$src" %}
opcode(0xC7); /* C7 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con32( src ), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6358,7 +6318,7 @@ instruct storeImmB(memory mem, immI8 src) %{
ins_cost(150);
format %{ "MOV8 $mem,$src" %}
opcode(0xC6); /* C6 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con8or32( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con8or32(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6369,7 +6329,7 @@ instruct storeImmCM(memory mem, immI8 src) %{
ins_cost(150);
format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
opcode(0xC6); /* C6 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con8or32( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con8or32(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6468,7 +6428,7 @@ instruct storeFPR_imm( memory mem, immFPR src) %{
ins_cost(50);
format %{ "MOV $mem,$src\t# store float" %}
opcode(0xC7); /* C7 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32FPR_as_bits( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con32FPR_as_bits(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6480,7 +6440,7 @@ instruct storeF_imm( memory mem, immF src) %{
ins_cost(50);
format %{ "MOV $mem,$src\t# store float" %}
opcode(0xC7); /* C7 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem), Con32F_as_bits( src ));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con32F_as_bits(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -6514,7 +6474,7 @@ instruct storeSSL(stackSlotL dst, eRegL src) %{
format %{ "MOV $dst,$src.lo\n\t"
"MOV $dst+4,$src.hi" %}
opcode(0x89, 0x89);
- ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_long_reg );
%}
@@ -6689,7 +6649,7 @@ instruct cmovI_mem(cmpOp cop, eFlagsReg cr, rRegI dst, memory src) %{
ins_cost(250);
format %{ "CMOV$cop $dst,$src" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cop), RegMem( dst, src ) );
+ ins_encode( SetInstMark, enc_cmov(cop), RegMem( dst, src ), ClearInstMark );
ins_pipe( pipe_cmov_mem );
%}
@@ -6700,7 +6660,7 @@ instruct cmovI_memU(cmpOpU cop, eFlagsRegU cr, rRegI dst, memory src) %{
ins_cost(250);
format %{ "CMOV$cop $dst,$src" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cop), RegMem( dst, src ) );
+ ins_encode( SetInstMark, enc_cmov(cop), RegMem( dst, src ), ClearInstMark );
ins_pipe( pipe_cmov_mem );
%}
@@ -6998,7 +6958,7 @@ instruct leaI_eReg_immI(rRegI dst, rRegI src0, immI src1) %{
format %{ "LEA $dst,[$src0 + $src1]" %}
opcode(0x8D); /* 0x8D /r */
- ins_encode( OpcP, RegLea( dst, src0, src1 ) );
+ ins_encode( SetInstMark, OpcP, RegLea( dst, src0, src1 ), ClearInstMark );
ins_pipe( ialu_reg_reg );
%}
@@ -7008,7 +6968,7 @@ instruct leaP_eReg_immI(eRegP dst, eRegP src0, immI src1) %{
format %{ "LEA $dst,[$src0 + $src1]\t# ptr" %}
opcode(0x8D); /* 0x8D /r */
- ins_encode( OpcP, RegLea( dst, src0, src1 ) );
+ ins_encode( SetInstMark, OpcP, RegLea( dst, src0, src1 ), ClearInstMark );
ins_pipe( ialu_reg_reg );
%}
@@ -7053,7 +7013,7 @@ instruct addI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(150);
format %{ "ADD $dst,$src" %}
opcode(0x03);
- ins_encode( OpcP, RegMem( dst, src) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem );
%}
@@ -7064,7 +7024,7 @@ instruct addI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
ins_cost(150);
format %{ "ADD $dst,$src" %}
opcode(0x01); /* Opcode 01 /r */
- ins_encode( OpcP, RegMem( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -7076,7 +7036,7 @@ instruct addI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
ins_cost(125);
format %{ "ADD $dst,$src" %}
opcode(0x81); /* Opcode 81 /0 id */
- ins_encode( OpcSE( src ), RMopc_Mem(0x00,dst), Con8or32( src ) );
+ ins_encode( SetInstMark, OpcSE( src ), RMopc_Mem(0x00,dst), Con8or32(src), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -7087,7 +7047,7 @@ instruct incI_mem(memory dst, immI_1 src, eFlagsReg cr) %{
ins_cost(125);
format %{ "INC $dst" %}
opcode(0xFF); /* Opcode FF /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,dst));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,dst), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -7098,7 +7058,7 @@ instruct decI_mem(memory dst, immI_M1 src, eFlagsReg cr) %{
ins_cost(125);
format %{ "DEC $dst" %}
opcode(0xFF); /* Opcode FF /1 */
- ins_encode( OpcP, RMopc_Mem(0x01,dst));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x01,dst), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
@@ -7420,7 +7380,7 @@ instruct subI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(150);
format %{ "SUB $dst,$src" %}
opcode(0x2B);
- ins_encode( OpcP, RegMem( dst, src) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem );
%}
@@ -7431,7 +7391,7 @@ instruct subI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
ins_cost(150);
format %{ "SUB $dst,$src" %}
opcode(0x29); /* Opcode 29 /r */
- ins_encode( OpcP, RegMem( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -7537,7 +7497,7 @@ instruct mulI_mem_imm(rRegI dst, memory src, immI imm, eFlagsReg cr) %{
ins_cost(300);
format %{ "IMUL $dst,$src,$imm" %}
opcode(0x69); /* 69 /r id */
- ins_encode( OpcSE(imm), RegMem( dst, src ), Con8or32( imm ) );
+ ins_encode( SetInstMark, OpcSE(imm), RegMem( dst, src ), Con8or32( imm ), ClearInstMark );
ins_pipe( ialu_reg_mem_alu0 );
%}
@@ -7549,7 +7509,7 @@ instruct mulI(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(350);
format %{ "IMUL $dst,$src" %}
opcode(0xAF, 0x0F);
- ins_encode( OpcS, OpcP, RegMem( dst, src) );
+ ins_encode( SetInstMark, OpcS, OpcP, RegMem( dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem_alu0 );
%}
@@ -7954,7 +7914,7 @@ instruct sarI_mem_1(memory dst, immI_1 shift, eFlagsReg cr) %{
effect(KILL cr);
format %{ "SAR $dst,$shift" %}
opcode(0xD1, 0x7); /* D1 /7 */
- ins_encode( OpcP, RMopc_Mem(secondary,dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(secondary,dst), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -7977,7 +7937,7 @@ instruct sarI_mem_imm(memory dst, immI8 shift, eFlagsReg cr) %{
format %{ "SAR $dst,$shift" %}
opcode(0xC1, 0x7); /* C1 /7 ib */
- ins_encode( OpcP, RMopc_Mem(secondary, dst ), Con8or32( shift ) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(secondary, dst ), Con8or32(shift), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -8093,7 +8053,7 @@ instruct andI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(150);
format %{ "AND $dst,$src" %}
opcode(0x23);
- ins_encode( OpcP, RegMem( dst, src) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem );
%}
@@ -8105,7 +8065,7 @@ instruct andI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
ins_cost(150);
format %{ "AND $dst,$src" %}
opcode(0x21); /* Opcode 21 /r */
- ins_encode( OpcP, RegMem( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -8118,7 +8078,7 @@ instruct andI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
format %{ "AND $dst,$src" %}
opcode(0x81, 0x4); /* Opcode 81 /4 id */
// ins_encode( MemImm( dst, src) );
- ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
+ ins_encode( SetInstMark, OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32(src), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -8284,7 +8244,7 @@ instruct orI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(150);
format %{ "OR $dst,$src" %}
opcode(0x0B);
- ins_encode( OpcP, RegMem( dst, src) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem );
%}
@@ -8296,7 +8256,7 @@ instruct orI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
ins_cost(150);
format %{ "OR $dst,$src" %}
opcode(0x09); /* Opcode 09 /r */
- ins_encode( OpcP, RegMem( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -8309,7 +8269,7 @@ instruct orI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
format %{ "OR $dst,$src" %}
opcode(0x81,0x1); /* Opcode 81 /1 id */
// ins_encode( MemImm( dst, src) );
- ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
+ ins_encode( SetInstMark, OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32(src), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -8491,7 +8451,7 @@ instruct xorI_eReg_mem(rRegI dst, memory src, eFlagsReg cr) %{
ins_cost(150);
format %{ "XOR $dst,$src" %}
opcode(0x33);
- ins_encode( OpcP, RegMem(dst, src) );
+ ins_encode( SetInstMark, OpcP, RegMem(dst, src), ClearInstMark );
ins_pipe( ialu_reg_mem );
%}
@@ -8503,7 +8463,7 @@ instruct xorI_mem_eReg(memory dst, rRegI src, eFlagsReg cr) %{
ins_cost(150);
format %{ "XOR $dst,$src" %}
opcode(0x31); /* Opcode 31 /r */
- ins_encode( OpcP, RegMem( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_reg );
%}
@@ -8515,7 +8475,7 @@ instruct xorI_mem_imm(memory dst, immI src, eFlagsReg cr) %{
ins_cost(125);
format %{ "XOR $dst,$src" %}
opcode(0x81,0x6); /* Opcode 81 /6 id */
- ins_encode( OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32( src ) );
+ ins_encode( SetInstMark, OpcSE( src ), RMopc_Mem(secondary, dst ), Con8or32(src), ClearInstMark );
ins_pipe( ialu_mem_imm );
%}
@@ -8801,7 +8761,7 @@ instruct addL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
format %{ "ADD $dst.lo,$mem\n\t"
"ADC $dst.hi,$mem+4" %}
opcode(0x03, 0x13);
- ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem), ClearInstMark );
ins_pipe( ialu_reg_long_mem );
%}
@@ -8836,7 +8796,7 @@ instruct subL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
format %{ "SUB $dst.lo,$mem\n\t"
"SBB $dst.hi,$mem+4" %}
opcode(0x2B, 0x1B);
- ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem), ClearInstMark );
ins_pipe( ialu_reg_long_mem );
%}
@@ -8879,7 +8839,7 @@ instruct andL_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
format %{ "AND $dst.lo,$mem\n\t"
"AND $dst.hi,$mem+4" %}
opcode(0x23, 0x23);
- ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem), ClearInstMark );
ins_pipe( ialu_reg_long_mem );
%}
@@ -9117,7 +9077,7 @@ instruct orl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
format %{ "OR $dst.lo,$mem\n\t"
"OR $dst.hi,$mem+4" %}
opcode(0x0B,0x0B);
- ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem), ClearInstMark );
ins_pipe( ialu_reg_long_mem );
%}
@@ -9163,7 +9123,7 @@ instruct xorl_eReg_mem(eRegL dst, load_long_memory mem, eFlagsReg cr) %{
format %{ "XOR $dst.lo,$mem\n\t"
"XOR $dst.hi,$mem+4" %}
opcode(0x33,0x33);
- ins_encode( OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem) );
+ ins_encode( SetInstMark, OpcP, RegMem( dst, mem), OpcS, RegMem_Hi(dst,mem), ClearInstMark );
ins_pipe( ialu_reg_long_mem );
%}
@@ -9445,7 +9405,7 @@ instruct cmpD_cc(eFlagsRegU cr, regD src1, regD src2) %{
"exit:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe( pipe_slow );
%}
@@ -9474,7 +9434,7 @@ instruct cmpD_ccmem(eFlagsRegU cr, regD src1, memory src2) %{
"exit:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe( pipe_slow );
%}
@@ -9505,7 +9465,7 @@ instruct cmpD_reg(xRegI dst, regD src1, regD src2, eFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe( pipe_slow );
%}
@@ -9525,7 +9485,7 @@ instruct cmpD_regmem(xRegI dst, regD src1, memory src2, eFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe( pipe_slow );
%}
@@ -9567,8 +9527,8 @@ instruct subDPR_reg_mem(regDPR dst, memory src) %{
format %{ "FLD $src\n\t"
"DSUBp $dst,ST" %}
opcode(0xDE, 0x5, 0xDD); /* DE C0+i */ /* LoadD DD /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
- OpcP, RegOpc(dst) );
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src),
+ OpcP, RegOpc(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -9629,8 +9589,8 @@ instruct addDPR_reg_mem(regDPR dst, memory src) %{
format %{ "FLD $src\n\t"
"DADDp $dst,ST" %}
opcode(0xDE, 0x0, 0xDD); /* DE C0+i */ /* LoadD DD /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
- OpcP, RegOpc(dst) );
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src),
+ OpcP, RegOpc(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -9644,10 +9604,11 @@ instruct addDPR_mem_reg(memory dst, regDPR src) %{
"DADD ST,$src\n\t"
"FST_D $dst" %}
opcode(0xDD, 0x0);
- ins_encode( Opcode(0xDD), RMopc_Mem(0x00,dst),
- Opcode(0xD8), RegOpc(src),
- set_instruction_start,
- Opcode(0xDD), RMopc_Mem(0x03,dst) );
+ ins_encode( SetInstMark, Opcode(0xDD), RMopc_Mem(0x00,dst),
+ Opcode(0xD8), RegOpc(src), ClearInstMark,
+ SetInstMark,
+ Opcode(0xDD), RMopc_Mem(0x03,dst),
+ ClearInstMark);
ins_pipe( fpu_reg_mem );
%}
@@ -9752,8 +9713,8 @@ instruct mulDPR_reg_mem(regDPR dst, memory src) %{
format %{ "FLD_D $src\n\t"
"DMULp $dst,ST" %}
opcode(0xDE, 0x1, 0xDD); /* DE C8+i or DE /1*/ /* LoadD DD /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
- OpcP, RegOpc(dst) );
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src),
+ OpcP, RegOpc(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -9767,9 +9728,9 @@ instruct mulDPR_reg_mem_cisc(regDPR dst, regDPR src, memory mem) %{
"DMUL ST,$src\n\t"
"FSTP_D $dst" %}
opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */ /* LoadD D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,mem),
OpcReg_FPR(src),
- Pop_Reg_DPR(dst) );
+ Pop_Reg_DPR(dst), ClearInstMark );
ins_pipe( fpu_reg_reg_mem );
%}
@@ -10028,7 +9989,7 @@ instruct cmpF_cc(eFlagsRegU cr, regF src1, regF src2) %{
"exit:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe( pipe_slow );
%}
@@ -10057,7 +10018,7 @@ instruct cmpF_ccmem(eFlagsRegU cr, regF src1, memory src2) %{
"exit:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe( pipe_slow );
%}
@@ -10088,7 +10049,7 @@ instruct cmpF_reg(xRegI dst, regF src1, regF src2, eFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe( pipe_slow );
%}
@@ -10108,7 +10069,7 @@ instruct cmpF_regmem(xRegI dst, regF src1, memory src2, eFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe( pipe_slow );
%}
@@ -10194,9 +10155,9 @@ instruct addFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
"FADD ST,$src1\n\t"
"FSTP_S $dst" %}
opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcReg_FPR(src1),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst), ClearInstMark );
ins_pipe( fpu_mem_reg_mem );
%}
//
@@ -10208,8 +10169,8 @@ instruct addFPR_reg_mem(regFPR dst, memory src) %{
format %{ "FADD $dst,$src" %}
opcode(0xDE, 0x0, 0xD9); /* DE C0+i or DE /0*/ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src),
- OpcP, RegOpc(dst) );
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src),
+ OpcP, RegOpc(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -10221,9 +10182,9 @@ instruct addFPR24_mem_reg(stackSlotF dst, regFPR src2, memory src1 ) %{
format %{ "FADD $dst,$src1,$src2" %}
opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src1),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src1),
OpcReg_FPR(src2),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst), ClearInstMark );
ins_pipe( fpu_mem_reg_mem );
%}
@@ -10235,10 +10196,10 @@ instruct addFPR24_mem_cisc(stackSlotF dst, memory src1, memory src2) %{
format %{ "FADD $dst,$src1,$src2 cisc" %}
opcode(0xD8, 0x0, 0xD9); /* D8 C0+i */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
- set_instruction_start,
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcP, RMopc_Mem(secondary,src1),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst),
+ ClearInstMark);
ins_pipe( fpu_mem_mem_mem );
%}
@@ -10249,10 +10210,10 @@ instruct addFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
format %{ "FADD $dst,$src1,$src2" %}
opcode(0xD8, 0x0, 0xD9); /* D8 /0 */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
- set_instruction_start,
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcP, RMopc_Mem(secondary,src1),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst),
+ ClearInstMark);
ins_pipe( fpu_mem_mem_mem );
%}
@@ -10328,9 +10289,9 @@ instruct mulFPR24_reg_mem(stackSlotF dst, regFPR src1, memory src2) %{
"FMUL $src1\n\t"
"FSTP_S $dst" %}
opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or DE /1*/ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcReg_FPR(src1),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst), ClearInstMark );
ins_pipe( fpu_mem_reg_mem );
%}
//
@@ -10342,9 +10303,9 @@ instruct mulFPR_reg_mem(regFPR dst, regFPR src1, memory src2) %{
format %{ "FMUL $dst,$src1,$src2" %}
opcode(0xD8, 0x1, 0xD9); /* D8 C8+i */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcReg_FPR(src1),
- Pop_Reg_FPR(dst) );
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_reg_mem );
%}
@@ -10355,10 +10316,10 @@ instruct mulFPR24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
format %{ "FMUL $dst,$src1,$src2" %}
opcode(0xD8, 0x1, 0xD9); /* D8 /1 */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,src2),
- set_instruction_start,
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,src2),
OpcP, RMopc_Mem(secondary,src1),
- Pop_Mem_FPR(dst) );
+ Pop_Mem_FPR(dst),
+ ClearInstMark );
ins_pipe( fpu_mem_mem_mem );
%}
@@ -10406,9 +10367,9 @@ instruct mulFPR_reg_load1(regFPR dst, regFPR src, memory mem1 ) %{
"FMUL ST,$src\n\t"
"FSTP $dst" %}
opcode(0xD8, 0x1, 0xD9); /* D8 C8+i or D8 /1 */ /* LoadF D9 /0 */
- ins_encode( Opcode(tertiary), RMopc_Mem(0x00,mem1),
+ ins_encode( SetInstMark, Opcode(tertiary), RMopc_Mem(0x00,mem1),
OpcReg_FPR(src),
- Pop_Reg_FPR(dst) );
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_reg_mem );
%}
//
@@ -10424,10 +10385,10 @@ instruct addFPR_mulFPR_reg_load1(regFPR dst, memory mem1, regFPR src1, regFPR sr
"FADD ST,$src2\n\t"
"FSTP $dst" %}
opcode(0xD9); /* LoadF D9 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem1),
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem1),
FMul_ST_reg(src1),
FAdd_ST_reg(src2),
- Pop_Reg_FPR(dst) );
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem_reg_reg );
%}
@@ -10966,8 +10927,8 @@ instruct convI2DPR_mem(regDPR dst, memory mem) %{
format %{ "FILD $mem\n\t"
"FSTP $dst" %}
opcode(0xDB); /* DB /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem),
- Pop_Reg_DPR(dst));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem),
+ Pop_Reg_DPR(dst), ClearInstMark);
ins_pipe( fpu_reg_mem );
%}
@@ -11004,8 +10965,8 @@ instruct convI2FPR_SSF_mem(stackSlotF dst, memory mem) %{
format %{ "FILD $mem\n\t"
"FSTP_S $dst" %}
opcode(0xDB); /* DB /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem),
- Pop_Mem_FPR(dst));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem),
+ Pop_Mem_FPR(dst), ClearInstMark);
ins_pipe( fpu_mem_mem );
%}
@@ -11028,8 +10989,8 @@ instruct convI2FPR_mem(regFPR dst, memory mem) %{
format %{ "FILD $mem\n\t"
"FSTP $dst" %}
opcode(0xDB); /* DB /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,mem),
- Pop_Reg_FPR(dst));
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem),
+ Pop_Reg_FPR(dst), ClearInstMark);
ins_pipe( fpu_reg_mem );
%}
@@ -11227,8 +11188,8 @@ instruct MoveI2FPR_stack_reg(regFPR dst, stackSlotI src) %{
format %{ "FLD_S $src\n\t"
"FSTP $dst\t# MoveI2F_stack_reg" %}
opcode(0xD9); /* D9 /0, FLD m32real */
- ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
- Pop_Reg_FPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem_no_oop(0x00,src),
+ Pop_Reg_FPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -11266,7 +11227,7 @@ instruct MoveD2L_stack_reg(eRegL dst, stackSlotD src) %{
format %{ "MOV $dst.lo,$src\n\t"
"MOV $dst.hi,$src+4\t# MoveD2L_stack_reg" %}
opcode(0x8B, 0x8B);
- ins_encode( OpcP, RegMem(dst,src), OpcS, RegMem_Hi(dst,src));
+ ins_encode( SetInstMark, OpcP, RegMem(dst,src), OpcS, RegMem_Hi(dst,src), ClearInstMark);
ins_pipe( ialu_mem_long_reg );
%}
@@ -11317,7 +11278,7 @@ instruct MoveL2D_reg_stack(stackSlotD dst, eRegL src) %{
format %{ "MOV $dst,$src.lo\n\t"
"MOV $dst+4,$src.hi\t# MoveL2D_reg_stack" %}
opcode(0x89, 0x89);
- ins_encode( OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, dst ), OpcS, RegMem_Hi( src, dst ), ClearInstMark );
ins_pipe( ialu_mem_long_reg );
%}
@@ -11331,8 +11292,8 @@ instruct MoveL2DPR_stack_reg(regDPR dst, stackSlotL src) %{
format %{ "FLD_D $src\n\t"
"FSTP $dst\t# MoveL2D_stack_reg" %}
opcode(0xDD); /* DD /0, FLD m64real */
- ins_encode( OpcP, RMopc_Mem_no_oop(0x00,src),
- Pop_Reg_DPR(dst) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem_no_oop(0x00,src),
+ Pop_Reg_DPR(dst), ClearInstMark );
ins_pipe( fpu_reg_mem );
%}
@@ -12245,7 +12206,7 @@ instruct compI_eReg_mem(eFlagsReg cr, rRegI op1, memory op2) %{
format %{ "CMP $op1,$op2" %}
ins_cost(500);
opcode(0x3B); /* Opcode 3B /r */
- ins_encode( OpcP, RegMem( op1, op2) );
+ ins_encode( SetInstMark, OpcP, RegMem( op1, op2), ClearInstMark );
ins_pipe( ialu_cr_reg_mem );
%}
@@ -12273,7 +12234,7 @@ instruct testI_reg_mem( eFlagsReg cr, rRegI src, memory mem, immI_0 zero ) %{
format %{ "TEST $src,$mem" %}
opcode(0x85);
- ins_encode( OpcP, RegMem( src, mem ) );
+ ins_encode( SetInstMark, OpcP, RegMem( src, mem ), ClearInstMark );
ins_pipe( ialu_cr_reg_mem );
%}
@@ -12304,7 +12265,7 @@ instruct compU_eReg_mem(eFlagsRegU cr, rRegI op1, memory op2) %{
format %{ "CMPu $op1,$op2" %}
ins_cost(500);
opcode(0x3B); /* Opcode 3B /r */
- ins_encode( OpcP, RegMem( op1, op2) );
+ ins_encode( SetInstMark, OpcP, RegMem( op1, op2), ClearInstMark );
ins_pipe( ialu_cr_reg_mem );
%}
@@ -12342,7 +12303,7 @@ instruct compP_eReg_imm(eFlagsRegU cr, eRegP op1, immP op2) %{
format %{ "CMPu $op1,$op2" %}
opcode(0x81,0x07); /* Opcode 81 /7 */
- ins_encode( OpcSErm( op1, op2 ), Con8or32( op2 ) );
+ ins_encode( SetInstMark, OpcSErm( op1, op2 ), Con8or32( op2 ), ClearInstMark );
ins_pipe( ialu_cr_reg_imm );
%}
@@ -12353,7 +12314,7 @@ instruct compP_eReg_mem(eFlagsRegU cr, eRegP op1, memory op2) %{
format %{ "CMPu $op1,$op2" %}
ins_cost(500);
opcode(0x3B); /* Opcode 3B /r */
- ins_encode( OpcP, RegMem( op1, op2) );
+ ins_encode( SetInstMark, OpcP, RegMem( op1, op2), ClearInstMark );
ins_pipe( ialu_cr_reg_mem );
%}
@@ -12376,7 +12337,7 @@ instruct compP_mem_eReg( eFlagsRegU cr, eRegP op1, memory op2 ) %{
format %{ "CMPu $op1,$op2" %}
opcode(0x3B); /* Opcode 3B /r */
- ins_encode( OpcP, RegMem( op1, op2) );
+ ins_encode( SetInstMark, OpcP, RegMem( op1, op2), ClearInstMark );
ins_pipe( ialu_cr_reg_mem );
%}
@@ -12401,7 +12362,7 @@ instruct testP_Reg_mem( eFlagsReg cr, memory op, immI_0 zero ) %{
format %{ "TEST $op,0xFFFFFFFF" %}
ins_cost(500);
opcode(0xF7); /* Opcode F7 /0 */
- ins_encode( OpcP, RMopc_Mem(0x00,op), Con_d32(0xFFFFFFFF) );
+ ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,op), Con_d32(0xFFFFFFFF), ClearInstMark );
ins_pipe( ialu_cr_reg_imm );
%}
@@ -12969,7 +12930,7 @@ instruct cmovLL_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, eRegL dst, load_lo
format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
"CMOV$cmp $dst.hi,$src.hi" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src), ClearInstMark );
ins_pipe( pipe_cmov_reg_long );
%}
@@ -13008,7 +12969,7 @@ instruct cmovII_mem_LTGE(cmpOp cmp, flagsReg_long_LTGE flags, rRegI dst, memory
ins_cost(250);
format %{ "CMOV$cmp $dst,$src" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem( dst, src ), ClearInstMark );
ins_pipe( pipe_cmov_mem );
%}
@@ -13178,7 +13139,7 @@ instruct cmovLL_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, eRegL dst, load_lo
format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
"CMOV$cmp $dst.hi,$src.hi" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src), ClearInstMark );
ins_pipe( pipe_cmov_reg_long );
%}
@@ -13199,7 +13160,7 @@ instruct cmovII_mem_EQNE(cmpOp cmp, flagsReg_long_EQNE flags, rRegI dst, memory
ins_cost(250);
format %{ "CMOV$cmp $dst,$src" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem( dst, src ), ClearInstMark );
ins_pipe( pipe_cmov_mem );
%}
@@ -13379,7 +13340,7 @@ instruct cmovLL_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, eRegL dst,
format %{ "CMOV$cmp $dst.lo,$src.lo\n\t"
"CMOV$cmp $dst.hi,$src.hi+4" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem(dst, src), enc_cmov(cmp), RegMem_Hi(dst, src), ClearInstMark );
ins_pipe( pipe_cmov_reg_long );
%}
@@ -13418,7 +13379,7 @@ instruct cmovII_mem_LEGT(cmpOp_commute cmp, flagsReg_long_LEGT flags, rRegI dst,
ins_cost(250);
format %{ "CMOV$cmp $dst,$src" %}
opcode(0x0F,0x40);
- ins_encode( enc_cmov(cmp), RegMem( dst, src ) );
+ ins_encode( SetInstMark, enc_cmov(cmp), RegMem( dst, src ), ClearInstMark );
ins_pipe( pipe_cmov_mem );
%}
@@ -13597,6 +13558,8 @@ instruct Ret() %{
// Also known as an 'interprocedural jump'.
// Target of jump will eventually return to caller.
// TailJump below removes the return address.
+// Don't use ebp for 'jump_target' because a MachEpilogNode has already been
+// emitted just above the TailCall which has reset ebp to the caller state.
instruct TailCalljmpInd(eRegP_no_EBP jump_target, eBXRegP method_ptr) %{
match(TailCall jump_target method_ptr);
ins_cost(300);
@@ -13765,7 +13728,9 @@ instruct safePoint_poll_tls(eFlagsReg cr, eRegP_no_EBP poll) %{
// EBP would need size(3)
size(2); /* setting an explicit size will cause debug builds to assert if size is incorrect */
ins_encode %{
+ __ set_inst_mark();
__ relocate(relocInfo::poll_type);
+ __ clear_inst_mark();
address pre_pc = __ pc();
__ testl(rax, Address($poll$$Register, 0));
address post_pc = __ pc();
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index 4e57f3e1bbe..5d1c513b10f 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -296,6 +296,9 @@ reg_class long_rcx_reg(RCX, RCX_H);
// Singleton class for RDX long register
reg_class long_rdx_reg(RDX, RDX_H);
+// Singleton class for R11 long register
+reg_class long_r11_reg(R11, R11_H);
+
// Singleton class for RAX int register
reg_class int_rax_reg(RAX);
@@ -358,7 +361,7 @@ source %{
#define RELOC_IMM64 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
-#define __ _masm.
+#define __ masm->
RegMask _ANY_REG_mask;
RegMask _PTR_REG_mask;
@@ -519,7 +522,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
}
// This could be in MacroAssembler but it's fairly C2 specific
-static void emit_cmpfp_fixup(MacroAssembler& _masm) {
+static void emit_cmpfp_fixup(MacroAssembler* masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
@@ -539,7 +542,7 @@ static void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler* masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -558,7 +561,7 @@ static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
// je #
// |-jz -> a | b # a & b
// | -> a #
-static void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
+static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst,
XMMRegister a, XMMRegister b,
XMMRegister xmmt, Register rt,
bool min, bool single) {
@@ -643,7 +646,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -719,9 +722,8 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_size_in_bytes();
int bangsize = C->output()->bang_size_in_bytes();
@@ -743,7 +745,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, false, C->stub_function() != nullptr);
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@@ -795,10 +797,9 @@ void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
-void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+void MachEpilogNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
Compile* C = ra_->C;
- MacroAssembler _masm(&cbuf);
if (generate_vzeroupper(C)) {
// Clear upper bits of YMM registers when current compiled code uses
@@ -825,7 +826,6 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
}
if (do_polling() && C->is_method_compilation()) {
- MacroAssembler _masm(&cbuf);
Label dummy_label;
Label* code_stub = &dummy_label;
if (!C->output()->in_scratch_emit_size()) {
@@ -881,16 +881,15 @@ static enum RC rc_class(OptoReg::Name reg)
}
// Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
-static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
+static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st);
-void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
-static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
+static void vec_stack_to_stack_helper(C2_MacroAssembler *masm, int src_offset,
int dst_offset, uint ireg, outputStream* st) {
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
switch (ireg) {
case Op_VecS:
__ movq(Address(rsp, -8), rax);
@@ -966,11 +965,11 @@ static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
}
}
-uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
+uint MachSpillCopyNode::implementation(C2_MacroAssembler* masm,
PhaseRegAlloc* ra_,
bool do_size,
outputStream* st) const {
- assert(cbuf != nullptr || st != nullptr, "sanity");
+ assert(masm != nullptr || st != nullptr, "sanity");
// Get registers to move
OptoReg::Name src_second = ra_->get_reg_second(in(1));
OptoReg::Name src_first = ra_->get_reg_first(in(1));
@@ -997,15 +996,15 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// mem -> mem
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
- vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
+ vec_stack_to_stack_helper(masm, src_offset, dst_offset, ireg, st);
} else if (src_first_rc == rc_float && dst_first_rc == rc_float ) {
- vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
+ vec_mov_helper(masm, src_first, dst_first, src_second, dst_second, ireg, st);
} else if (src_first_rc == rc_float && dst_first_rc == rc_stack ) {
int stack_offset = ra_->reg2offset(dst_first);
- vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
+ vec_spill_helper(masm, false, stack_offset, src_first, ireg, st);
} else if (src_first_rc == rc_stack && dst_first_rc == rc_float ) {
int stack_offset = ra_->reg2offset(src_first);
- vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
+ vec_spill_helper(masm, true, stack_offset, dst_first, ireg, st);
} else {
ShouldNotReachHere();
}
@@ -1021,8 +1020,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 64-bit
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ pushq(Address(rsp, src_offset));
__ popq (Address(rsp, dst_offset));
#ifndef PRODUCT
@@ -1039,8 +1037,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// No pushl/popl, so:
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movq(Address(rsp, -8), rax);
__ movl(rax, Address(rsp, src_offset));
__ movl(Address(rsp, dst_offset), rax);
@@ -1062,8 +1059,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movq(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1077,8 +1073,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(src_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movl(as_Register(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1095,8 +1090,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1111,8 +1105,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(src_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1129,8 +1122,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(src_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1150,8 +1142,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movq(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1165,8 +1156,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movl(Address(rsp, offset), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1182,8 +1172,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movq(as_Register(Matcher::_regEncode[dst_first]),
as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
@@ -1198,8 +1187,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movl(as_Register(Matcher::_regEncode[dst_first]),
as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
@@ -1216,8 +1204,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdq( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1230,8 +1217,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1246,8 +1232,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1268,8 +1253,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdbl( Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1283,8 +1267,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
int offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1300,8 +1283,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdq( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1314,8 +1296,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdl( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1331,8 +1312,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1346,8 +1326,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
// 32-bit
assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1370,8 +1349,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
int offset = ra_->reg2offset(dst_first);
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1386,8 +1364,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ kmov(as_Register(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1403,8 +1380,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
if ((src_first & 1) == 0 && src_first + 1 == src_second &&
(dst_first & 1) == 0 && dst_first + 1 == dst_second) {
// 64-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1432,8 +1408,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, nullptr);
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1451,13 +1427,12 @@ void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
-void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+void BoxLockNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
- MacroAssembler masm(&cbuf);
- masm.lea(as_Register(reg), Address(rsp, offset));
+ __ lea(as_Register(reg), Address(rsp, offset));
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const
@@ -1481,10 +1456,9 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
-void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
- MacroAssembler masm(&cbuf);
- masm.ic_check(InteriorEntryAlignment);
+ __ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -1663,7 +1637,6 @@ encode %{
// [REX_B]
// f: f7 f9 idiv $div
// 0000000000000011 :
- MacroAssembler _masm(&cbuf);
Label normal;
Label done;
@@ -1719,7 +1692,6 @@ encode %{
// 17: 48 99 cqto
// 19: 48 f7 f9 idiv $div
// 000000000000001c :
- MacroAssembler _masm(&cbuf);
Label normal;
Label done;
@@ -1761,7 +1733,6 @@ encode %{
Label miss;
const bool set_cond_codes = true;
- MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
nullptr, &miss,
/*set_cond_codes:*/ true);
@@ -1772,21 +1743,19 @@ encode %{
%}
enc_class clear_avx %{
- debug_only(int off0 = cbuf.insts_size());
+ debug_only(int off0 = __ offset());
if (generate_vzeroupper(Compile::current())) {
// Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
- MacroAssembler _masm(&cbuf);
__ vzeroupper();
}
- debug_only(int off1 = cbuf.insts_size());
+ debug_only(int off1 = __ offset());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}
enc_class Java_To_Runtime(method meth) %{
// No relocation needed
- MacroAssembler _masm(&cbuf);
__ mov64(r10, (int64_t) $meth$$method);
__ call(r10);
__ post_call_nop();
@@ -1797,8 +1766,6 @@ encode %{
// JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to
// determine who we intended to call.
- MacroAssembler _masm(&cbuf);
-
if (!_method) {
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, $meth$$method)));
} else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
@@ -1807,7 +1774,7 @@ encode %{
__ addr_nop_5();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
address mark = __ pc();
@@ -1816,10 +1783,11 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
- cbuf.shared_stub_to_interp_for(_method, call_offset);
+ __ code()->shared_stub_to_interp_for(_method, call_offset);
} else {
// Emit stubs for static call.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, mark);
+ __ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -1830,8 +1798,7 @@ encode %{
%}
enc_class Java_Dynamic_Call(method meth) %{
- MacroAssembler _masm(&cbuf);
- __ ic_call((address)$meth$$method, resolved_method_index(cbuf));
+ __ ic_call((address)$meth$$method, resolved_method_index(masm));
__ post_call_nop();
%}
@@ -2703,6 +2670,16 @@ operand rdx_RegL()
interface(REG_INTER);
%}
+operand r11_RegL()
+%{
+ constraint(ALLOC_IN_RC(long_r11_reg));
+ match(RegL);
+ match(rRegL);
+
+ format %{ %}
+ interface(REG_INTER);
+%}
+
operand no_rbp_r13_RegL()
%{
constraint(ALLOC_IN_RC(long_no_rbp_r13_reg));
@@ -4351,7 +4328,7 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
format %{ "$dst = max($a, $b)\t# intrinsic (float)" %}
ins_encode %{
- emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
+ emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
false /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
@@ -4376,7 +4353,7 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe
format %{ "$dst = max($a, $b)\t# intrinsic (double)" %}
ins_encode %{
- emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
+ emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
false /*min*/, false /*single*/);
%}
ins_pipe( pipe_slow );
@@ -4401,7 +4378,7 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
format %{ "$dst = min($a, $b)\t# intrinsic (float)" %}
ins_encode %{
- emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
+ emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
true /*min*/, true /*single*/);
%}
ins_pipe( pipe_slow );
@@ -4426,7 +4403,7 @@ instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe
format %{ "$dst = min($a, $b)\t# intrinsic (double)" %}
ins_encode %{
- emit_fp_min_max(_masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
+ emit_fp_min_max(masm, $dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, $xmmt$$XMMRegister, $tmp$$Register,
true /*min*/, false /*single*/);
%}
ins_pipe( pipe_slow );
@@ -9732,7 +9709,7 @@ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
"exit:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe(pipe_slow);
%}
@@ -9783,7 +9760,7 @@ instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2)
"exit:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp_fixup(_masm);
+ emit_cmpfp_fixup(masm);
%}
ins_pipe(pipe_slow);
%}
@@ -9836,7 +9813,7 @@ instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -9857,7 +9834,7 @@ instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomiss($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -9877,7 +9854,7 @@ instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomiss($src$$XMMRegister, $constantaddress($con));
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -9898,7 +9875,7 @@ instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$XMMRegister);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -9919,7 +9896,7 @@ instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
"done:" %}
ins_encode %{
__ ucomisd($src1$$XMMRegister, $src2$$Address);
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -9939,7 +9916,7 @@ instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
"done:" %}
ins_encode %{
__ ucomisd($src$$XMMRegister, $constantaddress($con));
- emit_cmpfp3(_masm, $dst$$Register);
+ emit_cmpfp3(masm, $dst$$Register);
%}
ins_pipe(pipe_slow);
%}
@@ -12108,6 +12085,31 @@ instruct partialSubtypeCheck(rdi_RegP result,
ins_pipe(pipe_slow);
%}
+instruct partialSubtypeCheckConstSuper(rsi_RegP sub, rax_RegP super_reg, immP super_con, rdi_RegP result,
+ rdx_RegL temp1, rcx_RegL temp2, rbx_RegP temp3, r11_RegL temp4,
+ rFlagsReg cr)
+%{
+ match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
+ predicate(UseSecondarySupersTable);
+ effect(KILL cr, TEMP temp1, TEMP temp2, TEMP temp3, TEMP temp4);
+
+ ins_cost(700); // smaller than the next version
+ format %{ "partialSubtypeCheck $result, $sub, super" %}
+
+ ins_encode %{
+ u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
+ if (InlineSecondarySupersTest) {
+ __ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register, $temp1$$Register, $temp2$$Register,
+ $temp3$$Register, $temp4$$Register, $result$$Register,
+ super_klass_slot);
+ } else {
+ __ call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr,
rsi_RegP sub, rax_RegP super, rcx_RegI rcx,
immP0 zero,
@@ -12467,6 +12469,8 @@ instruct Ret()
// Also known as an 'interprocedural jump'.
// Target of jump will eventually return to caller.
// TailJump below removes the return address.
+// Don't use rbp for 'jump_target' because a MachEpilogNode has already been
+// emitted just above the TailCall which has reset rbp to the caller state.
instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_ptr)
%{
match(TailCall jump_target method_ptr);
diff --git a/test/langtools/tools/javac/diags/examples/StringTemplate.java b/src/hotspot/cpu/zero/c2_MacroAssembler_zero.hpp
similarity index 78%
rename from test/langtools/tools/javac/diags/examples/StringTemplate.java
rename to src/hotspot/cpu/zero/c2_MacroAssembler_zero.hpp
index e35cbab11e4..3efa561c8e6 100644
--- a/test/langtools/tools/javac/diags/examples/StringTemplate.java
+++ b/src/hotspot/cpu/zero/c2_MacroAssembler_zero.hpp
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -19,15 +20,12 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
+ *
*/
- // key: compiler.misc.feature.string.templates
- // key: compiler.warn.preview.feature.use.plural
- // options: --enable-preview -source ${jdk.version} -Xlint:preview
+#ifndef CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP
+#define CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP
-class StringTemplate {
- String m() {
- int x = 10, y = 20;
- return STR."\{x} + \{y} = \{x + y}";
- }
-}
+// C2_MacroAssembler contains high-level macros for C2
+
+#endif // CPU_ZERO_C2_MACROASSEMBLER_ZERO_HPP
diff --git a/src/hotspot/cpu/zero/compiledIC_zero.cpp b/src/hotspot/cpu/zero/compiledIC_zero.cpp
index 24153aeacc5..7db93a5f3cf 100644
--- a/src/hotspot/cpu/zero/compiledIC_zero.cpp
+++ b/src/hotspot/cpu/zero/compiledIC_zero.cpp
@@ -42,7 +42,7 @@
// ----------------------------------------------------------------------------
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
ShouldNotReachHere(); // Only needed for COMPILER2.
return nullptr;
}
diff --git a/src/hotspot/cpu/zero/relocInfo_zero.cpp b/src/hotspot/cpu/zero/relocInfo_zero.cpp
index 1e9fdc1081d..b926f20cfe7 100644
--- a/src/hotspot/cpu/zero/relocInfo_zero.cpp
+++ b/src/hotspot/cpu/zero/relocInfo_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,7 +30,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
-void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
+void Relocation::pd_set_data_value(address x, bool verify_only) {
ShouldNotCallThis();
}
diff --git a/src/hotspot/cpu/zero/stubGenerator_zero.cpp b/src/hotspot/cpu/zero/stubGenerator_zero.cpp
index c2d6e82622c..5c7772021ea 100644
--- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp
+++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -148,6 +148,9 @@ class StubGenerator: public StubCodeGenerator {
// Shared code tests for "null" to discover the stub is not generated.
StubRoutines::_unsafe_arraycopy = nullptr;
+ // Shared code tests for "null" to discover the stub is not generated.
+ StubRoutines::_unsafe_setmemory = nullptr;
+
// We don't generate specialized code for HeapWord-aligned source
// arrays, so just use the code we've already generated
StubRoutines::_arrayof_jbyte_disjoint_arraycopy =
diff --git a/src/hotspot/os/aix/loadlib_aix.cpp b/src/hotspot/os/aix/loadlib_aix.cpp
index 107f2783ff5..bc21aef3836 100644
--- a/src/hotspot/os/aix/loadlib_aix.cpp
+++ b/src/hotspot/os/aix/loadlib_aix.cpp
@@ -117,8 +117,8 @@ static void print_entry(const loaded_module_t* lm, outputStream* os) {
", data: " INTPTR_FORMAT " - " INTPTR_FORMAT " "
"%s",
(lm->is_in_vm ? '*' : ' '),
- lm->text, (uintptr_t)lm->text + lm->text_len,
- lm->data, (uintptr_t)lm->data + lm->data_len,
+ p2i(lm->text), (uintptr_t)lm->text + lm->text_len,
+ p2i(lm->data), (uintptr_t)lm->data + lm->data_len,
lm->path);
if (lm->member) {
os->print("(%s)", lm->member);
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index 3309d3aa77e..f0a984d3d1f 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -23,10 +23,6 @@
*
*/
-// According to the AIX OS doc #pragma alloca must be used
-// with C++ compiler before referencing the function alloca()
-#pragma alloca
-
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/vtableStubs.hpp"
@@ -291,46 +287,6 @@ julong os::physical_memory() {
return Aix::physical_memory();
}
-// Helper function, emulates disclaim64 using multiple 32bit disclaims
-// because we cannot use disclaim64() on old AIX releases.
-static bool my_disclaim64(char* addr, size_t size) {
-
- if (size == 0) {
- return true;
- }
-
- // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
- const unsigned int maxDisclaimSize = 0x40000000;
-
- const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
- const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
-
- char* p = addr;
-
- for (unsigned int i = 0; i < numFullDisclaimsNeeded; i ++) {
- if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
- ErrnoPreserver ep;
- log_trace(os, map)("disclaim failed: " RANGEFMT " errno=(%s)",
- RANGEFMTARGS(p, maxDisclaimSize),
- os::strerror(ep.saved_errno()));
- return false;
- }
- p += maxDisclaimSize;
- }
-
- if (lastDisclaimSize > 0) {
- if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
- ErrnoPreserver ep;
- log_trace(os, map)("disclaim failed: " RANGEFMT " errno=(%s)",
- RANGEFMTARGS(p, lastDisclaimSize),
- os::strerror(ep.saved_errno()));
- return false;
- }
- }
-
- return true;
-}
-
// Cpu architecture string
#if defined(PPC32)
static char cpu_arch[] = "ppc";
@@ -646,8 +602,8 @@ static void *thread_native_entry(Thread *thread) {
address low_address = thread->stack_end();
address high_address = thread->stack_base();
lt.print("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT
- ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %uk pages)).",
- os::current_thread_id(), (uintx) kernel_thread_id, low_address, high_address,
+ ", stack [" PTR_FORMAT " - " PTR_FORMAT " (" SIZE_FORMAT "k using %luk pages)).",
+ os::current_thread_id(), (uintx) kernel_thread_id, p2i(low_address), p2i(high_address),
(high_address - low_address) / K, os::Aix::query_pagesize(low_address) / K);
}
@@ -1394,8 +1350,8 @@ struct vmembk_t {
void print_on(outputStream* os) const {
os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
- " bytes, %d %s pages), %s",
- addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
+ " bytes, %ld %s pages), %s",
+ p2i(addr), p2i(addr) + size - 1, size, size / pagesize, describe_pagesize(pagesize),
(type == VMEM_SHMATED ? "shmat" : "mmap")
);
}
@@ -1597,10 +1553,11 @@ static bool uncommit_shmated_memory(char* addr, size_t size) {
trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
p2i(addr), p2i(addr + size - 1));
- const bool rc = my_disclaim64(addr, size);
+ const int rc = disclaim64(addr, size, DISCLAIM_ZEROMEM);
- if (!rc) {
- log_warning(os)("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", p2i(addr), size);
+ if (rc != 0) {
+ ErrnoPreserver ep;
+ log_warning(os)("disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed, %s\n", p2i(addr), size, os::strerror(ep.saved_errno()));
return false;
}
return true;
@@ -1973,12 +1930,12 @@ static bool checked_mprotect(char* addr, size_t size, int prot) {
//
// See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
if (!rc) {
const char* const s_errno = os::errno_name(errno);
- warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
+ warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", p2i(addr), p2i(addr) + size, prot, s_errno);
return false;
}
@@ -2102,15 +2059,6 @@ size_t os::vm_min_address() {
return _vm_min_address_default;
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@@ -2395,7 +2343,7 @@ void os::set_native_thread_name(const char *name) {
bool os::find(address addr, outputStream* st) {
- st->print(PTR_FORMAT ": ", addr);
+ st->print(PTR_FORMAT ": ", p2i(addr));
loaded_module_t lm;
if (LoadedLibraries::find_for_text_address(addr, &lm) ||
@@ -2494,13 +2442,6 @@ int os::open(const char *path, int oflag, int mode) {
return fd;
}
-// create binary file, rewriting existing file if required
-int os::create_binary_file(const char* path, bool rewrite_existing) {
- int oflags = O_WRONLY | O_CREAT;
- oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
- return ::open(path, oflags, S_IREAD | S_IWRITE);
-}
-
// return current position of file pointer
jlong os::current_file_offset(int fd) {
return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
diff --git a/src/hotspot/os/aix/os_aix.hpp b/src/hotspot/os/aix/os_aix.hpp
index 7f4f3c7e8cc..759bc552bb7 100644
--- a/src/hotspot/os/aix/os_aix.hpp
+++ b/src/hotspot/os/aix/os_aix.hpp
@@ -120,19 +120,19 @@ class os::Aix {
struct meminfo_t {
// Amount of virtual memory (in units of 4 KB pages)
- unsigned long long virt_total;
+ size_t virt_total;
// Amount of real memory, in bytes
- unsigned long long real_total;
+ size_t real_total;
// Amount of free real memory, in bytes
- unsigned long long real_free;
+ size_t real_free;
// Total amount of paging space, in bytes
- unsigned long long pgsp_total;
+ size_t pgsp_total;
// Amount of free paging space, in bytes
- unsigned long long pgsp_free;
+ size_t pgsp_free;
};
diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp
index 68233097b49..ac0cc8d53d8 100644
--- a/src/hotspot/os/aix/porting_aix.cpp
+++ b/src/hotspot/os/aix/porting_aix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -741,7 +741,7 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
st->print("(invalid) ");
goto cleanup;
} else {
- st->print("(base - 0x%X) ", PTRDIFF_BYTES(stack_base, cur_sp));
+ st->print("(base - 0x%lX) ", PTRDIFF_BYTES(stack_base, cur_sp));
}
st->cr();
@@ -797,7 +797,7 @@ void AixNativeCallstack::print_callstack_for_context(outputStream* st, const uco
st->print_cr("trying to recover and find backchain...");
sp = try_find_backchain(sp_last, stack_base, stack_size);
if (sp) {
- st->print_cr("found something which looks like a backchain at " PTR_FORMAT ", after 0x%x bytes... ",
+ st->print_cr("found something which looks like a backchain at " PTR_FORMAT ", after 0x%lx bytes... ",
p2i(sp), PTRDIFF_BYTES(sp, sp_last));
} else {
st->print_cr("did not find a backchain, giving up.");
@@ -906,10 +906,11 @@ struct TableLocker {
~TableLocker() { pthread_mutex_unlock(&g_handletable_mutex); }
};
struct handletableentry{
- void* handle;
- ino64_t inode;
- dev64_t devid;
- uint refcount;
+ void* handle;
+ ino64_t inode;
+ dev64_t devid;
+ char* member;
+ uint refcount;
};
constexpr unsigned init_num_handles = 128;
static unsigned max_handletable = 0;
@@ -1049,6 +1050,14 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
return nullptr;
}
else {
+ // extract member string if exist duplicate it and store pointer of it
+ // if member does not exist store nullptr
+ char* member = nullptr;
+ const char* substr;
+ if (filename[strlen(filename) - 1] == ')' && (substr = strrchr(filename, '('))) {
+ member = os::strdup(substr);
+ }
+
unsigned i = 0;
TableLocker lock;
// check if library belonging to filename is already loaded.
@@ -1056,7 +1065,10 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
for (i = 0; i < g_handletable_used; i++) {
if ((p_handletable + i)->handle &&
(p_handletable + i)->inode == libstat.st_ino &&
- (p_handletable + i)->devid == libstat.st_dev) {
+ (p_handletable + i)->devid == libstat.st_dev &&
+ (((p_handletable + i)->member == nullptr && member == nullptr) ||
+ ((p_handletable + i)->member != nullptr && member != nullptr &&
+ strcmp((p_handletable + i)->member, member) == 0))) {
(p_handletable + i)->refcount++;
result = (p_handletable + i)->handle;
break;
@@ -1084,6 +1096,7 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) {
(p_handletable + i)->handle = result;
(p_handletable + i)->inode = libstat.st_ino;
(p_handletable + i)->devid = libstat.st_dev;
+ (p_handletable + i)->member = member;
(p_handletable + i)->refcount = 1;
}
else {
@@ -1131,7 +1144,7 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
// while in the second case we simply have to nag.
res = (0 == ::dlclose(libhandle));
if (!res) {
- // error analysis when dlopen fails
+ // error analysis when dlclose fails
const char* error_report = ::dlerror();
if (error_report == nullptr) {
error_report = "dlerror returned no error description";
@@ -1145,7 +1158,11 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) {
if (i < g_handletable_used) {
if (res) {
// First case: libhandle was found (with refcount == 0) and ::dlclose successful,
- // so delete entry from array
+ // so delete entry from array (do not forget to free member-string space if member exists)
+ if ((p_handletable + i)->member) {
+ os::free((p_handletable + i)->member);
+ (p_handletable + i)->member = nullptr;
+ }
g_handletable_used--;
// If the entry was the last one of the array, the previous g_handletable_used--
// is sufficient to remove the entry from the array, otherwise we move the last
diff --git a/src/hotspot/os/bsd/attachListener_bsd.cpp b/src/hotspot/os/bsd/attachListener_bsd.cpp
deleted file mode 100644
index 2b7e93e33ed..00000000000
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp
+++ /dev/null
@@ -1,548 +0,0 @@
-/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "logging/log.hpp"
-#include "os_posix.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.inline.hpp"
-#include "services/attachListener.hpp"
-#include "utilities/checkedCast.hpp"
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifndef UNIX_PATH_MAX
-#define UNIX_PATH_MAX sizeof(((struct sockaddr_un *)0)->sun_path)
-#endif
-
-// The attach mechanism on Bsd uses a UNIX domain socket. An attach listener
-// thread is created at startup or is created on-demand via a signal from
-// the client tool. The attach listener creates a socket and binds it to a file
-// in the filesystem. The attach listener then acts as a simple (single-
-// threaded) server - it waits for a client to connect, reads the request,
-// executes it, and returns the response to the client via the socket
-// connection.
-//
-// As the socket is a UNIX domain socket it means that only clients on the
-// local machine can connect. In addition there are two other aspects to
-// the security:
-// 1. The well known file that the socket is bound to has permission 400
-// 2. When a client connect, the SO_PEERCRED socket option is used to
-// obtain the credentials of client. We check that the effective uid
-// of the client matches this process.
-
-// forward reference
-class BsdAttachOperation;
-
-class BsdAttachListener: AllStatic {
- private:
- // the path to which we bind the UNIX domain socket
- static char _path[UNIX_PATH_MAX];
- static bool _has_path;
-
- // the file descriptor for the listening socket
- static volatile int _listener;
-
- static bool _atexit_registered;
-
- // reads a request from the given connected socket
- static BsdAttachOperation* read_request(int s);
-
- public:
- enum {
- ATTACH_PROTOCOL_VER = 1 // protocol version
- };
- enum {
- ATTACH_ERROR_BADVERSION = 101 // error codes
- };
-
- static void set_path(char* path) {
- if (path == nullptr) {
- _path[0] = '\0';
- _has_path = false;
- } else {
- strncpy(_path, path, UNIX_PATH_MAX);
- _path[UNIX_PATH_MAX-1] = '\0';
- _has_path = true;
- }
- }
-
- static void set_listener(int s) { _listener = s; }
-
- // initialize the listener, returns 0 if okay
- static int init();
-
- static char* path() { return _path; }
- static bool has_path() { return _has_path; }
- static int listener() { return _listener; }
-
- // write the given buffer to a socket
- static int write_fully(int s, char* buf, size_t len);
-
- static BsdAttachOperation* dequeue();
-};
-
-class BsdAttachOperation: public AttachOperation {
- private:
- // the connection to the client
- int _socket;
-
- public:
- void complete(jint res, bufferedStream* st);
-
- void set_socket(int s) { _socket = s; }
- int socket() const { return _socket; }
-
- BsdAttachOperation(char* name) : AttachOperation(name) {
- set_socket(-1);
- }
-};
-
-// statics
-char BsdAttachListener::_path[UNIX_PATH_MAX];
-bool BsdAttachListener::_has_path;
-volatile int BsdAttachListener::_listener = -1;
-bool BsdAttachListener::_atexit_registered = false;
-
-// Supporting class to help split a buffer into individual components
-class ArgumentIterator : public StackObj {
- private:
- char* _pos;
- char* _end;
- public:
- ArgumentIterator(char* arg_buffer, size_t arg_size) {
- _pos = arg_buffer;
- _end = _pos + arg_size - 1;
- }
- char* next() {
- if (*_pos == '\0') {
- // advance the iterator if possible (null arguments)
- if (_pos < _end) {
- _pos += 1;
- }
- return nullptr;
- }
- char* res = _pos;
- char* next_pos = strchr(_pos, '\0');
- if (next_pos < _end) {
- next_pos++;
- }
- _pos = next_pos;
- return res;
- }
-};
-
-
-// atexit hook to stop listener and unlink the file that it is
-// bound too.
-extern "C" {
- static void listener_cleanup() {
- int s = BsdAttachListener::listener();
- if (s != -1) {
- BsdAttachListener::set_listener(-1);
- ::shutdown(s, SHUT_RDWR);
- ::close(s);
- }
- if (BsdAttachListener::has_path()) {
- ::unlink(BsdAttachListener::path());
- BsdAttachListener::set_path(nullptr);
- }
- }
-}
-
-// Initialization - create a listener socket and bind it to a file
-
-int BsdAttachListener::init() {
- char path[UNIX_PATH_MAX]; // socket file
- char initial_path[UNIX_PATH_MAX]; // socket file during setup
- int listener; // listener socket (file descriptor)
-
- // register function to cleanup
- if (!_atexit_registered) {
- _atexit_registered = true;
- ::atexit(listener_cleanup);
- }
-
- int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
- os::get_temp_directory(), os::current_process_id());
- if (n < (int)UNIX_PATH_MAX) {
- n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
- }
- if (n >= (int)UNIX_PATH_MAX) {
- return -1;
- }
-
- // create the listener socket
- listener = ::socket(PF_UNIX, SOCK_STREAM, 0);
- if (listener == -1) {
- return -1;
- }
-
- // bind socket
- struct sockaddr_un addr;
- memset((void *)&addr, 0, sizeof(addr));
- addr.sun_family = AF_UNIX;
- strcpy(addr.sun_path, initial_path);
- ::unlink(initial_path);
- int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
- if (res == -1) {
- ::close(listener);
- return -1;
- }
-
- // put in listen mode, set permissions, and rename into place
- res = ::listen(listener, 5);
- if (res == 0) {
- RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
- if (res == 0) {
- // make sure the file is owned by the effective user and effective group
- // e.g. default behavior on mac is that new files inherit the group of
- // the directory that they are created in
- RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
- if (res == 0) {
- res = ::rename(initial_path, path);
- }
- }
- }
- if (res == -1) {
- ::close(listener);
- ::unlink(initial_path);
- return -1;
- }
- set_path(path);
- set_listener(listener);
-
- return 0;
-}
-
-// Given a socket that is connected to a peer we read the request and
-// create an AttachOperation. As the socket is blocking there is potential
-// for a denial-of-service if the peer does not response. However this happens
-// after the peer credentials have been checked and in the worst case it just
-// means that the attach listener thread is blocked.
-//
-BsdAttachOperation* BsdAttachListener::read_request(int s) {
- char ver_str[8];
- size_t ver_str_len = os::snprintf_checked(ver_str, sizeof(ver_str), "%d", ATTACH_PROTOCOL_VER);
-
- // The request is a sequence of strings so we first figure out the
- // expected count and the maximum possible length of the request.
- // The request is:
- // 00000
- // where is the protocol version (1), is the command
- // name ("load", "datadump", ...), and is an argument
- int expected_str_count = 2 + AttachOperation::arg_count_max;
- const size_t max_len = (sizeof(ver_str) + 1) + (AttachOperation::name_length_max + 1) +
- AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1);
-
- char buf[max_len];
- int str_count = 0;
-
- // Read until all (expected) strings have been read, the buffer is
- // full, or EOF.
-
- size_t off = 0;
- size_t left = max_len;
-
- do {
- ssize_t n;
- RESTARTABLE(read(s, buf+off, left), n);
- assert(n <= checked_cast(left), "buffer was too small, impossible!");
- buf[max_len - 1] = '\0';
- if (n == -1) {
- return nullptr; // reset by peer or other error
- }
- if (n == 0) {
- break;
- }
- for (ssize_t i=0; i so check it now to
- // check for protocol mismatch
- if (str_count == 1) {
- if ((strlen(buf) != ver_str_len) ||
- (atoi(buf) != ATTACH_PROTOCOL_VER)) {
- char msg[32];
- int msg_len = os::snprintf_checked(msg, sizeof(msg), "%d\n", ATTACH_ERROR_BADVERSION);
- write_fully(s, msg, msg_len);
- return nullptr;
- }
- }
- }
- }
- off += n;
- left -= n;
- } while (left > 0 && str_count < expected_str_count);
-
- if (str_count != expected_str_count) {
- return nullptr; // incomplete request
- }
-
- // parse request
-
- ArgumentIterator args(buf, (max_len)-left);
-
- // version already checked
- char* v = args.next();
-
- char* name = args.next();
- if (name == nullptr || strlen(name) > AttachOperation::name_length_max) {
- return nullptr;
- }
-
- BsdAttachOperation* op = new BsdAttachOperation(name);
-
- for (int i=0; iset_arg(i, nullptr);
- } else {
- if (strlen(arg) > AttachOperation::arg_length_max) {
- delete op;
- return nullptr;
- }
- op->set_arg(i, arg);
- }
- }
-
- op->set_socket(s);
- return op;
-}
-
-
-// Dequeue an operation
-//
-// In the Bsd implementation there is only a single operation and clients
-// cannot queue commands (except at the socket level).
-//
-BsdAttachOperation* BsdAttachListener::dequeue() {
- for (;;) {
- int s;
-
- // wait for client to connect
- struct sockaddr addr;
- socklen_t len = sizeof(addr);
- RESTARTABLE(::accept(listener(), &addr, &len), s);
- if (s == -1) {
- return nullptr; // log a warning?
- }
-
- // get the credentials of the peer and check the effective uid/guid
- uid_t puid;
- gid_t pgid;
- if (::getpeereid(s, &puid, &pgid) != 0) {
- log_debug(attach)("Failed to get peer id");
- ::close(s);
- continue;
- }
-
- if (!os::Posix::matches_effective_uid_and_gid_or_root(puid, pgid)) {
- log_debug(attach)("euid/egid check failed (%d/%d vs %d/%d)", puid, pgid,
- geteuid(), getegid());
- ::close(s);
- continue;
- }
-
- // peer credential look okay so we read the request
- BsdAttachOperation* op = read_request(s);
- if (op == nullptr) {
- ::close(s);
- continue;
- } else {
- return op;
- }
- }
-}
-
-// write the given buffer to the socket
-int BsdAttachListener::write_fully(int s, char* buf, size_t len) {
- do {
- ssize_t n = ::write(s, buf, len);
- if (n == -1) {
- if (errno != EINTR) return -1;
- } else {
- buf += n;
- len -= n;
- }
- }
- while (len > 0);
- return 0;
-}
-
-// Complete an operation by sending the operation result and any result
-// output to the client. At this time the socket is in blocking mode so
-// potentially we can block if there is a lot of data and the client is
-// non-responsive. For most operations this is a non-issue because the
-// default send buffer is sufficient to buffer everything. In the future
-// if there are operations that involves a very big reply then it the
-// socket could be made non-blocking and a timeout could be used.
-
-void BsdAttachOperation::complete(jint result, bufferedStream* st) {
- JavaThread* thread = JavaThread::current();
- ThreadBlockInVM tbivm(thread);
-
- // write operation result
- char msg[32];
- int msg_len = os::snprintf_checked(msg, sizeof(msg), "%d\n", result);
- int rc = BsdAttachListener::write_fully(this->socket(), msg, msg_len);
-
- // write any result data
- if (rc == 0) {
- BsdAttachListener::write_fully(this->socket(), (char*) st->base(), st->size());
- ::shutdown(this->socket(), 2);
- }
-
- // done
- ::close(this->socket());
-
- delete this;
-}
-
-
-// AttachListener functions
-
-AttachOperation* AttachListener::dequeue() {
- JavaThread* thread = JavaThread::current();
- ThreadBlockInVM tbivm(thread);
-
- AttachOperation* op = BsdAttachListener::dequeue();
-
- return op;
-}
-
-// Performs initialization at vm startup
-// For BSD we remove any stale .java_pid file which could cause
-// an attaching process to think we are ready to receive on the
-// domain socket before we are properly initialized
-
-void AttachListener::vm_start() {
- char fn[UNIX_PATH_MAX];
- struct stat st;
- int ret;
-
- int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
- os::get_temp_directory(), os::current_process_id());
- assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
-
- RESTARTABLE(::stat(fn, &st), ret);
- if (ret == 0) {
- ret = ::unlink(fn);
- if (ret == -1) {
- log_debug(attach)("Failed to remove stale attach pid file at %s", fn);
- }
- }
-}
-
-int AttachListener::pd_init() {
- JavaThread* thread = JavaThread::current();
- ThreadBlockInVM tbivm(thread);
-
- int ret_code = BsdAttachListener::init();
-
- return ret_code;
-}
-
-bool AttachListener::check_socket_file() {
- int ret;
- struct stat st;
- ret = stat(BsdAttachListener::path(), &st);
- if (ret == -1) { // need to restart attach listener.
- log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
- BsdAttachListener::path());
-
- listener_cleanup();
-
- // wait to terminate current attach listener instance...
- {
- // avoid deadlock if AttachListener thread is blocked at safepoint
- ThreadBlockInVM tbivm(JavaThread::current());
- while (AttachListener::transit_state(AL_INITIALIZING,
- AL_NOT_INITIALIZED) != AL_NOT_INITIALIZED) {
- os::naked_yield();
- }
- }
- return is_init_trigger();
- }
- return false;
-}
-
-// Attach Listener is started lazily except in the case when
-// +ReduseSignalUsage is used
-bool AttachListener::init_at_startup() {
- if (ReduceSignalUsage) {
- return true;
- } else {
- return false;
- }
-}
-
-// If the file .attach_pid exists in the working directory
-// or /tmp then this is the trigger to start the attach mechanism
-bool AttachListener::is_init_trigger() {
- if (init_at_startup() || is_initialized()) {
- return false; // initialized at startup or already initialized
- }
- char fn[PATH_MAX + 1];
- int ret;
- struct stat st;
- snprintf(fn, PATH_MAX + 1, "%s/.attach_pid%d",
- os::get_temp_directory(), os::current_process_id());
- RESTARTABLE(::stat(fn, &st), ret);
- if (ret == -1) {
- log_debug(attach)("Failed to find attach file: %s", fn);
- }
- if (ret == 0) {
- // simple check to avoid starting the attach mechanism when
- // a bogus non-root user creates the file
- if (os::Posix::matches_effective_uid_or_root(st.st_uid)) {
- init();
- log_trace(attach)("Attach triggered by %s", fn);
- return true;
- } else {
- log_debug(attach)("File %s has wrong user id %d (vs %d). Attach is not triggered", fn, st.st_uid, geteuid());
- }
- }
- return false;
-}
-
-// if VM aborts then remove listener
-void AttachListener::abort() {
- listener_cleanup();
-}
-
-void AttachListener::pd_data_dump() {
- os::signal_notify(SIGQUIT);
-}
-
-void AttachListener::pd_detachall() {
- // do nothing for now
-}
diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
index bd1d7174c12..29825a9eab2 100644
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
@@ -103,7 +103,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, untype(offset) + length / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
const uintptr_t addr = _base + untype(offset);
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
@@ -150,7 +150,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, untype(offset) + length / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
const uintptr_t start = _base + untype(offset);
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index bf615d1b37a..e5b6c74ce2f 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -1590,7 +1590,7 @@ bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
#if defined(__OpenBSD__)
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot);
if (::mprotect(addr, size, prot) == 0) {
return true;
} else {
@@ -1711,7 +1711,7 @@ bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, siz
bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
#if defined(__OpenBSD__)
// XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size));
if (::mprotect(addr, size, PROT_NONE) == 0) {
return true;
} else {
@@ -1829,7 +1829,7 @@ static bool bsd_mprotect(char* addr, size_t size, int prot) {
assert(addr == bottom, "sanity check");
size = align_up(pointer_delta(addr, bottom, 1) + size, os::vm_page_size());
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
return ::mprotect(bottom, size, prot) == 0;
}
@@ -1936,15 +1936,6 @@ size_t os::vm_min_address() {
#endif
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@@ -2140,16 +2131,25 @@ jint os::init_2(void) {
if (status != 0) {
log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
} else {
- nbr_files.rlim_cur = nbr_files.rlim_max;
+ rlim_t rlim_original = nbr_files.rlim_cur;
-#ifdef __APPLE__
- // Darwin returns RLIM_INFINITY for rlim_max, but fails with EINVAL if
- // you attempt to use RLIM_INFINITY. As per setrlimit(2), OPEN_MAX must
- // be used instead
- nbr_files.rlim_cur = MIN(OPEN_MAX, nbr_files.rlim_cur);
-#endif
+ // On macOS according to setrlimit(2), OPEN_MAX must be used instead
+ // of RLIM_INFINITY, but testing on macOS >= 10.6, reveals that
+ // we can, in fact, use even RLIM_INFINITY, so try the max value
+ // that the system claims can be used first, same as other BSD OSes.
+ // However, some terminals (ksh) will internally use "int" type
+ // to store this value and since RLIM_INFINITY overflows an "int"
+ // we might end up with a negative value, so cap the system limit max
+ // at INT_MAX instead, just in case, for everyone.
+ nbr_files.rlim_cur = MIN(INT_MAX, nbr_files.rlim_max);
status = setrlimit(RLIMIT_NOFILE, &nbr_files);
+ if (status != 0) {
+ // If that fails then try lowering the limit to either OPEN_MAX
+ // (which is safe) or the original limit, whichever was greater.
+ nbr_files.rlim_cur = MAX(OPEN_MAX, rlim_original);
+ status = setrlimit(RLIMIT_NOFILE, &nbr_files);
+ }
if (status != 0) {
log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
}
@@ -2384,14 +2384,6 @@ int os::open(const char *path, int oflag, int mode) {
return fd;
}
-
-// create binary file, rewriting existing file if required
-int os::create_binary_file(const char* path, bool rewrite_existing) {
- int oflags = O_WRONLY | O_CREAT;
- oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
- return ::open(path, oflags, S_IREAD | S_IWRITE);
-}
-
// return current position of file pointer
jlong os::current_file_offset(int fd) {
return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index ff891509365..76f9d90cd71 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -597,7 +597,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t
bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, untype(offset + length) / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
retry:
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
@@ -697,7 +697,7 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, untype(offset + length) / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
if (err) {
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 18a9a38db7f..20d3c99cd3e 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -2972,7 +2972,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
#define MADV_POPULATE_WRITE MADV_POPULATE_WRITE_value
#else
// Sanity-check our assumed default value if we build with a new enough libc.
- static_assert(MADV_POPULATE_WRITE == MADV_POPULATE_WRITE_value);
+ STATIC_ASSERT(MADV_POPULATE_WRITE == MADV_POPULATE_WRITE_value);
#endif
// Note that the value for MAP_FIXED_NOREPLACE differs between architectures, but all architectures
@@ -2982,7 +2982,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
#define MAP_FIXED_NOREPLACE MAP_FIXED_NOREPLACE_value
#else
// Sanity-check our assumed default value if we build with a new enough libc.
- static_assert(MAP_FIXED_NOREPLACE == MAP_FIXED_NOREPLACE_value, "MAP_FIXED_NOREPLACE != MAP_FIXED_NOREPLACE_value");
+ STATIC_ASSERT(MAP_FIXED_NOREPLACE == MAP_FIXED_NOREPLACE_value);
#endif
int os::Linux::commit_memory_impl(char* addr, size_t size,
@@ -3786,7 +3786,7 @@ static bool linux_mprotect(char* addr, size_t size, int prot) {
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (addr != g_assert_poison)
#endif
- Events::log(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
+ Events::log_memprotect(nullptr, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
return ::mprotect(bottom, size, prot) == 0;
}
@@ -4310,25 +4310,6 @@ size_t os::vm_min_address() {
return value;
}
-// Used to convert frequent JVM_Yield() to nops
-bool os::dont_yield() {
- return DontYieldALot;
-}
-
-// Linux CFS scheduler (since 2.6.23) does not guarantee sched_yield(2) will
-// actually give up the CPU. Since skip buddy (v2.6.28):
-//
-// * Sets the yielding task as skip buddy for current CPU's run queue.
-// * Picks next from run queue, if empty, picks a skip buddy (can be the yielding task).
-// * Clears skip buddies for this run queue (yielding task no longer a skip buddy).
-//
-// An alternative is calling os::naked_short_nanosleep with a small number to avoid
-// getting re-scheduled immediately.
-//
-void os::naked_yield() {
- sched_yield();
-}
-
////////////////////////////////////////////////////////////////////////////////
// thread priority support
@@ -5140,14 +5121,6 @@ int os::open(const char *path, int oflag, int mode) {
return fd;
}
-
-// create binary file, rewriting existing file if required
-int os::create_binary_file(const char* path, bool rewrite_existing) {
- int oflags = O_WRONLY | O_CREAT;
- oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
- return ::open(path, oflags, S_IREAD | S_IWRITE);
-}
-
// return current position of file pointer
jlong os::current_file_offset(int fd) {
return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
diff --git a/src/hotspot/os/linux/os_perf_linux.cpp b/src/hotspot/os/linux/os_perf_linux.cpp
index cca41a12ee1..87abbebe25f 100644
--- a/src/hotspot/os/linux/os_perf_linux.cpp
+++ b/src/hotspot/os/linux/os_perf_linux.cpp
@@ -847,7 +847,7 @@ SystemProcessInterface::SystemProcesses::ProcessIterator::ProcessIterator() {
bool SystemProcessInterface::SystemProcesses::ProcessIterator::initialize() {
_dir = os::opendir("/proc");
_entry = nullptr;
- _valid = true;
+ _valid = _dir != nullptr; // May be null if /proc is not accessible.
next_process();
return true;
diff --git a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
index 892d825b40c..a6b26928520 100644
--- a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
+++ b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
@@ -41,6 +41,8 @@
#define SYS_membarrier 365
#elif defined(AARCH64)
#define SYS_membarrier 283
+ #elif defined(ARM32)
+ #define SYS_membarrier 389
#elif defined(ALPHA)
#define SYS_membarrier 517
#else
diff --git a/src/hotspot/os/linux/attachListener_linux.cpp b/src/hotspot/os/posix/attachListener_posix.cpp
similarity index 81%
rename from src/hotspot/os/linux/attachListener_linux.cpp
rename to src/hotspot/os/posix/attachListener_posix.cpp
index 0e98bca0607..bdd97afd47e 100644
--- a/src/hotspot/os/linux/attachListener_linux.cpp
+++ b/src/hotspot/os/posix/attachListener_posix.cpp
@@ -30,6 +30,7 @@
#include "os_posix.hpp"
#include "services/attachListener.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/macros.hpp"
#include
#include
@@ -38,14 +39,17 @@
#include
#include
+#if INCLUDE_SERVICES
+#ifndef AIX
+
#ifndef UNIX_PATH_MAX
#define UNIX_PATH_MAX sizeof(((struct sockaddr_un *)0)->sun_path)
#endif
-// The attach mechanism on Linux uses a UNIX domain socket. An attach listener
-// thread is created at startup or is created on-demand via a signal from
-// the client tool. The attach listener creates a socket and binds it to a file
-// in the filesystem. The attach listener then acts as a simple (single-
+// The attach mechanism on Linux and BSD uses a UNIX domain socket. An attach
+// listener thread is created at startup or is created on-demand via a signal
+// from the client tool. The attach listener creates a socket and binds it to a
+// file in the filesystem. The attach listener then acts as a simple (single-
// threaded) server - it waits for a client to connect, reads the request,
// executes it, and returns the response to the client via the socket
// connection.
@@ -59,9 +63,9 @@
// of the client matches this process.
// forward reference
-class LinuxAttachOperation;
+class PosixAttachOperation;
-class LinuxAttachListener: AllStatic {
+class PosixAttachListener: AllStatic {
private:
// the path to which we bind the UNIX domain socket
static char _path[UNIX_PATH_MAX];
@@ -73,7 +77,7 @@ class LinuxAttachListener: AllStatic {
static bool _atexit_registered;
// reads a request from the given connected socket
- static LinuxAttachOperation* read_request(int s);
+ static PosixAttachOperation* read_request(int s);
public:
enum {
@@ -106,10 +110,10 @@ class LinuxAttachListener: AllStatic {
// write the given buffer to a socket
static int write_fully(int s, char* buf, size_t len);
- static LinuxAttachOperation* dequeue();
+ static PosixAttachOperation* dequeue();
};
-class LinuxAttachOperation: public AttachOperation {
+class PosixAttachOperation: public AttachOperation {
private:
// the connection to the client
int _socket;
@@ -120,16 +124,16 @@ class LinuxAttachOperation: public AttachOperation {
void set_socket(int s) { _socket = s; }
int socket() const { return _socket; }
- LinuxAttachOperation(char* name) : AttachOperation(name) {
+ PosixAttachOperation(char* name) : AttachOperation(name) {
set_socket(-1);
}
};
// statics
-char LinuxAttachListener::_path[UNIX_PATH_MAX];
-bool LinuxAttachListener::_has_path;
-volatile int LinuxAttachListener::_listener = -1;
-bool LinuxAttachListener::_atexit_registered = false;
+char PosixAttachListener::_path[UNIX_PATH_MAX];
+bool PosixAttachListener::_has_path;
+volatile int PosixAttachListener::_listener = -1;
+bool PosixAttachListener::_atexit_registered = false;
// Supporting class to help split a buffer into individual components
class ArgumentIterator : public StackObj {
@@ -164,22 +168,22 @@ class ArgumentIterator : public StackObj {
// bound too.
extern "C" {
static void listener_cleanup() {
- int s = LinuxAttachListener::listener();
+ int s = PosixAttachListener::listener();
if (s != -1) {
- LinuxAttachListener::set_listener(-1);
+ PosixAttachListener::set_listener(-1);
::shutdown(s, SHUT_RDWR);
::close(s);
}
- if (LinuxAttachListener::has_path()) {
- ::unlink(LinuxAttachListener::path());
- LinuxAttachListener::set_path(nullptr);
+ if (PosixAttachListener::has_path()) {
+ ::unlink(PosixAttachListener::path());
+ PosixAttachListener::set_path(nullptr);
}
}
}
// Initialization - create a listener socket and bind it to a file
-int LinuxAttachListener::init() {
+int PosixAttachListener::init() {
char path[UNIX_PATH_MAX]; // socket file
char initial_path[UNIX_PATH_MAX]; // socket file during setup
int listener; // listener socket (file descriptor)
@@ -225,7 +229,9 @@ int LinuxAttachListener::init() {
RESTARTABLE(::chmod(initial_path, S_IREAD|S_IWRITE), res);
if (res == 0) {
// make sure the file is owned by the effective user and effective group
- // e.g. the group could be inherited from the directory in case the s bit is set
+ // e.g. the group could be inherited from the directory in case the s bit
+ // is set. The default behavior on mac is that new files inherit the group
+ // of the directory that they are created in.
RESTARTABLE(::chown(initial_path, geteuid(), getegid()), res);
if (res == 0) {
res = ::rename(initial_path, path);
@@ -249,7 +255,7 @@ int LinuxAttachListener::init() {
// after the peer credentials have been checked and in the worst case it just
// means that the attach listener thread is blocked.
//
-LinuxAttachOperation* LinuxAttachListener::read_request(int s) {
+PosixAttachOperation* PosixAttachListener::read_request(int s) {
char ver_str[8];
os::snprintf_checked(ver_str, sizeof(ver_str), "%d", ATTACH_PROTOCOL_VER);
@@ -321,7 +327,7 @@ LinuxAttachOperation* LinuxAttachListener::read_request(int s) {
return nullptr;
}
- LinuxAttachOperation* op = new LinuxAttachOperation(name);
+ PosixAttachOperation* op = new PosixAttachOperation(name);
for (int i=0; isocket(), msg, strlen(msg));
+ int rc = PosixAttachListener::write_fully(this->socket(), msg, strlen(msg));
// write any result data
if (rc == 0) {
- LinuxAttachListener::write_fully(this->socket(), (char*) st->base(), st->size());
+ PosixAttachListener::write_fully(this->socket(), (char*) st->base(), st->size());
::shutdown(this->socket(), 2);
}
@@ -436,13 +461,13 @@ AttachOperation* AttachListener::dequeue() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- AttachOperation* op = LinuxAttachListener::dequeue();
+ AttachOperation* op = PosixAttachListener::dequeue();
return op;
}
// Performs initialization at vm startup
-// For Linux we remove any stale .java_pid file which could cause
+// For Linux and BSD we remove any stale .java_pid file which could cause
// an attaching process to think we are ready to receive on the
// domain socket before we are properly initialized
@@ -468,7 +493,7 @@ int AttachListener::pd_init() {
JavaThread* thread = JavaThread::current();
ThreadBlockInVM tbivm(thread);
- int ret_code = LinuxAttachListener::init();
+ int ret_code = PosixAttachListener::init();
return ret_code;
}
@@ -476,10 +501,10 @@ int AttachListener::pd_init() {
bool AttachListener::check_socket_file() {
int ret;
struct stat st;
- ret = stat(LinuxAttachListener::path(), &st);
+ ret = stat(PosixAttachListener::path(), &st);
if (ret == -1) { // need to restart attach listener.
log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
- LinuxAttachListener::path());
+ PosixAttachListener::path());
listener_cleanup();
@@ -516,12 +541,13 @@ bool AttachListener::is_init_trigger() {
char fn[PATH_MAX + 1];
int ret;
struct stat st;
- os::snprintf_checked(fn, sizeof(fn), ".attach_pid%d", os::current_process_id());
+ os::snprintf_checked(fn, sizeof(fn), ".attach_pid%d",
+ os::current_process_id());
RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
- snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
- os::get_temp_directory(), os::current_process_id());
+ snprintf(fn, sizeof(fn), "%s/.attach_pid%d", os::get_temp_directory(),
+ os::current_process_id());
RESTARTABLE(::stat(fn, &st), ret);
if (ret == -1) {
log_debug(attach)("Failed to find attach file: %s", fn);
@@ -553,3 +579,7 @@ void AttachListener::pd_data_dump() {
void AttachListener::pd_detachall() {
// do nothing for now
}
+
+#endif // !AIX
+
+#endif // INCLUDE_SERVICES
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index d2de5b30484..a18a633002c 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -508,7 +508,7 @@ void os::Posix::print_rlimit_info(outputStream* st) {
#if defined(AIX)
st->print(", NPROC ");
- st->print("%d", sysconf(_SC_CHILD_MAX));
+ st->print("%ld", sysconf(_SC_CHILD_MAX));
print_rlimit(st, ", THREADS", RLIMIT_THREADS);
#else
@@ -854,6 +854,14 @@ void os::_exit(int num) {
ALLOW_C_FUNCTION(::_exit, ::_exit(num);)
}
+bool os::dont_yield() {
+ return DontYieldALot;
+}
+
+void os::naked_yield() {
+ sched_yield();
+}
+
// Builds a platform dependent Agent_OnLoad_ function name
// which is used to find statically linked in agents.
// Parameters:
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index 5172853ecd5..a9d3bb9284c 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -614,7 +614,6 @@ int JVM_HANDLE_XXX_SIGNAL(int sig, siginfo_t* info,
if (NativeDeoptInstruction::is_deopt_at(pc)) {
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != nullptr && cb->is_nmethod()) {
- MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, t);) // can call PcDescCache::add_pc_desc
nmethod* nm = cb->as_nmethod();
assert(nm->insts_contains_inclusive(pc), "");
address deopt = nm->is_method_handle_return(pc) ?
diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
index 9a38352d1d7..181d0fada59 100644
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
@@ -225,14 +225,14 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, (untype(offset) + length) / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
return _impl->commit(offset, length);
}
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
- untype(offset) / M, (untype(offset) + length) / M, length / M);
+ untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
return _impl->uncommit(offset, length);
}
diff --git a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp
index c7c0d700d79..7448fd18e0b 100644
--- a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp
@@ -40,6 +40,11 @@ public:
};
// Implements small pages (paged) support using placeholder reservation.
+//
+// When a memory area is free (kept by the virtual memory manager) a
+// single placeholder is covering that memory area. When memory is
+// allocated from the manager the placeholder is split into granule
+// sized placeholders to allow mapping operations on that granularity.
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
private:
class PlaceholderCallbacks : public AllStatic {
@@ -52,49 +57,87 @@ private:
ZMapper::coalesce_placeholders(ZOffset::address_unsafe(start), size);
}
- static void split_into_placeholder_granules(zoffset start, size_t size) {
- for (uintptr_t addr = untype(start); addr < untype(start) + size; addr += ZGranuleSize) {
- split_placeholder(to_zoffset(addr), ZGranuleSize);
+ // Turn the single placeholder covering the memory area into granule
+ // sized placeholders.
+ static void split_into_granule_sized_placeholders(zoffset start, size_t size) {
+ assert(size >= ZGranuleSize, "Must be at least one granule");
+ assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+
+ // Don't call split_placeholder on the last granule, since it is already
+ // a placeholder and the system call would therefore fail.
+ const size_t limit = size - ZGranuleSize;
+ for (size_t offset = 0; offset < limit; offset += ZGranuleSize) {
+ split_placeholder(start + offset, ZGranuleSize);
}
}
static void coalesce_into_one_placeholder(zoffset start, size_t size) {
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
+ // Granule sized areas are already covered by a single placeholder
if (size > ZGranuleSize) {
coalesce_placeholders(start, size);
}
}
+ // Called when a memory area is returned to the memory manager but can't
+ // be merged with an already existing area. Make sure this area is covered
+ // by a single placeholder.
static void create_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+
coalesce_into_one_placeholder(area->start(), area->size());
}
+ // Called when a complete memory area in the memory manager is allocated.
+ // Create granule sized placeholders for the entire area.
static void destroy_callback(const ZMemory* area) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
- // Don't try split the last granule - VirtualFree will fail
- split_into_placeholder_granules(area->start(), area->size() - ZGranuleSize);
+
+ split_into_granule_sized_placeholders(area->start(), area->size());
}
+ // Called when a memory area is allocated at the front of an exising memory area.
+ // Turn the first part of the memory area into granule sized placeholders.
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
+ assert(area->size() > size, "Must be larger than what we try to split out");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
- split_into_placeholder_granules(area->start(), size);
+
+ // Split the area into two placeholders
+ split_placeholder(area->start(), size);
+
+ // Split the first part into granule sized placeholders
+ split_into_granule_sized_placeholders(area->start(), size);
}
+ // Called when a memory area is allocated at the end of an existing memory area.
+ // Turn the second part of the memory area into granule sized placeholders.
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
+ assert(area->size() > size, "Must be larger than what we try to split out");
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
- // Don't try split the last granule - VirtualFree will fail
- split_into_placeholder_granules(to_zoffset(untype(area->end()) - size), size - ZGranuleSize);
+
+ // Split the area into two placeholders
+ const zoffset start = to_zoffset(area->end() - size);
+ split_placeholder(start, size);
+
+ // Split the second part into granule sized placeholders
+ split_into_granule_sized_placeholders(start, size);
}
+ // Called when freeing a memory area and it can be merged at the start of an
+ // existing area. Coalesce the underlying placeholders into one.
static void grow_from_front_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
- coalesce_into_one_placeholder(to_zoffset(untype(area->start()) - size), area->size() + size);
+
+ const zoffset start = area->start() - size;
+ coalesce_into_one_placeholder(start, area->size() + size);
}
+ // Called when freeing a memory area and it can be merged at the end of an
+ // existing area. Coalesce the underlying placeholders into one.
static void grow_from_back_callback(const ZMemory* area, size_t size) {
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
+
coalesce_into_one_placeholder(area->start(), area->size() + size);
}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index ddc1b1c335d..20422e83cb7 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -2795,12 +2795,12 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
}
- bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
- if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
+ bool is_unsafe_memory_access = (in_native || in_java) && UnsafeMemoryAccess::contains_pc(pc);
+ if (((in_vm || in_native || is_unsafe_memory_access) && thread->doing_unsafe_access()) ||
(nm != nullptr && nm->has_unsafe_access())) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
}
@@ -4889,13 +4889,6 @@ bool os::dir_is_empty(const char* path) {
return is_empty;
}
-// create binary file, rewriting existing file if required
-int os::create_binary_file(const char* path, bool rewrite_existing) {
- int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
- oflags |= rewrite_existing ? _O_TRUNC : _O_EXCL;
- return ::open(path, oflags, _S_IREAD | _S_IWRITE);
-}
-
// return current position of file pointer
jlong os::current_file_offset(int fd) {
return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index e1e81d673a7..b45d38650c0 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2023 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -261,7 +261,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
else if (sig == SIGTRAP && TrapBasedICMissChecks &&
nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
if (TraceTraps) {
- tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+ tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
}
stub = SharedRuntime::get_ic_miss_stub();
goto run_stub;
@@ -271,7 +271,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
else if (sig == SIGTRAP && TrapBasedNullChecks &&
nativeInstruction_at(pc)->is_sigtrap_null_check()) {
if (TraceTraps) {
- tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+ tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
goto run_stub;
@@ -282,7 +282,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
CodeCache::contains((void*) pc) &&
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
if (TraceTraps) {
- tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
+ tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
@@ -292,7 +292,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
else if (sig == SIGTRAP && TrapBasedRangeChecks &&
nativeInstruction_at(pc)->is_sigtrap_range_check()) {
if (TraceTraps) {
- tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+ tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc));
}
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
goto run_stub;
@@ -340,11 +340,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = cb ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = pc + 4;
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Posix::ucontext_set_pc(uc, next_pc);
@@ -368,8 +368,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
address next_pc = pc + 4;
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Posix::ucontext_set_pc(uc, next_pc);
@@ -435,12 +435,12 @@ void os::print_context(outputStream *st, const void *context) {
const ucontext_t* uc = (const ucontext_t*)context;
st->print_cr("Registers:");
- st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.iar);
- st->print("lr =" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.lr);
- st->print("ctr=" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.ctr);
+ st->print("pc =" INTPTR_FORMAT " ", (unsigned long)uc->uc_mcontext.jmp_context.iar);
+ st->print("lr =" INTPTR_FORMAT " ", (unsigned long)uc->uc_mcontext.jmp_context.lr);
+ st->print("ctr=" INTPTR_FORMAT " ", (unsigned long)uc->uc_mcontext.jmp_context.ctr);
st->cr();
for (int i = 0; i < 32; i++) {
- st->print("r%-2d=" INTPTR_FORMAT " ", i, uc->uc_mcontext.jmp_context.gpr[i]);
+ st->print("r%-2d=" INTPTR_FORMAT " ", i, (unsigned long)uc->uc_mcontext.jmp_context.gpr[i]);
if (i % 3 == 2) st->cr();
}
st->cr();
@@ -464,7 +464,7 @@ void os::print_tos_pc(outputStream *st, const void *context) {
st->cr();
// Try to decode the instructions.
- st->print_cr("Decoded instructions: (pc=" PTR_FORMAT ")", pc);
+ st->print_cr("Decoded instructions: (pc=" PTR_FORMAT ")", p2i(pc));
st->print("");
// TODO: PPC port Disassembler::decode(pc, 16, 16, st);
st->cr();
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index 3dfe9e30f79..ba14cb2ac12 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -257,11 +257,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = pc + NativeCall::instruction_size;
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
@@ -299,8 +299,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = pc + NativeCall::instruction_size;
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
index 593f6494540..4e7316e0661 100644
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
@@ -441,19 +441,17 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc);
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
- }
- else
-
+ } else
#ifdef AMD64
- if (sig == SIGFPE &&
+ if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV
// Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0"
// instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
@@ -523,8 +521,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
index e1c9dc8a13a..9f44fe3aa25 100644
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
@@ -240,11 +240,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = pc + NativeCall::instruction_size;
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
@@ -286,8 +286,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = pc + NativeCall::instruction_size;
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
index 6f9ac548ce1..807fa765897 100644
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
@@ -324,17 +324,22 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- if ((nm != nullptr && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
+ if ((nm != nullptr && nm->has_unsafe_access()) ||
+ (thread->doing_unsafe_access() &&
+ UnsafeMemoryAccess::contains_pc(pc))) {
unsafe_access = true;
}
} else if (sig == SIGSEGV &&
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
- // Determination of interpreter/vtable stub/compiled code null exception
- CodeBlob* cb = CodeCache::find_blob(pc);
- if (cb != nullptr) {
- stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
- }
- } else if (sig == SIGILL && *(int *)pc == NativeInstruction::not_entrant_illegal_instruction) {
+ // Determination of interpreter/vtable stub/compiled code null exception
+ CodeBlob* cb = CodeCache::find_blob(pc);
+ if (cb != nullptr) {
+ stub = SharedRuntime::continuation_for_implicit_exception(
+ thread, pc, SharedRuntime::IMPLICIT_NULL);
+ }
+ } else if (sig == SIGILL &&
+ *(int*)pc ==
+ NativeInstruction::not_entrant_illegal_instruction) {
// Not entrant
stub = SharedRuntime::get_handle_wrong_method_stub();
}
@@ -359,8 +364,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// any other suitable exception reason,
// so assume it is an unsafe access.
address next_pc = pc + Assembler::InstructionSize;
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
#ifdef __thumb__
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index 55963b06806..1857097bd52 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -254,7 +254,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = SharedRuntime::get_handle_wrong_method_stub();
}
- else if ((sig == USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV) &&
+ else if ((sig == (USE_POLL_BIT_ONLY ? SIGTRAP : SIGSEGV)) &&
// A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
// in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
// especially when we try to read from the safepoint polling page. So the check
@@ -355,11 +355,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = pc + 4;
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Posix::ucontext_set_pc(uc, next_pc);
@@ -379,8 +379,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
thread->thread_state() == _thread_in_native) &&
sig == SIGBUS && thread->doing_unsafe_access()) {
address next_pc = pc + 4;
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
next_pc = SharedRuntime::handle_unsafe_access(thread, next_pc);
os::Posix::ucontext_set_pc(uc, next_pc);
diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
index 6546adb6ff3..b3e35d6cc10 100644
--- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
+++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp
@@ -35,16 +35,10 @@
#if defined(__clang_major__)
#define FULL_COMPILER_ATOMIC_SUPPORT
-#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ >= 2))
+#elif (__GNUC__ > 13) || ((__GNUC__ == 13) && (__GNUC_MINOR__ > 2))
#define FULL_COMPILER_ATOMIC_SUPPORT
#endif
-#if defined(__clang_major__)
-#define CORRECT_COMPILER_ATOMIC_SUPPORT
-#elif defined(__GNUC__) && (__riscv_xlen <= 32 || __GNUC__ > 13)
-#define CORRECT_COMPILER_ATOMIC_SUPPORT
-#endif
-
template
struct Atomic::PlatformAdd {
template
@@ -120,9 +114,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((
}
#endif
-#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT
+#ifndef FULL_COMPILER_ATOMIC_SUPPORT
// The implementation of `__atomic_compare_exchange` lacks sign extensions
-// in GCC 13 and lower when using with 32-bit unsigned integers on RV64,
+// in GCC 13.2 and lower when using with 32-bit unsigned integers on RV64,
// so we should implement it manually.
// GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114130.
// See also JDK-8326936.
@@ -192,11 +186,7 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attri
atomic_memory_order order) const {
#ifndef FULL_COMPILER_ATOMIC_SUPPORT
- STATIC_ASSERT(byte_size >= 4);
-#endif
-
-#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT
- STATIC_ASSERT(byte_size != 4);
+ STATIC_ASSERT(byte_size > 4);
#endif
STATIC_ASSERT(byte_size == sizeof(T));
@@ -235,6 +225,5 @@ struct Atomic::PlatformOrderedStore
};
#undef FULL_COMPILER_ATOMIC_SUPPORT
-#undef CORRECT_COMPILER_ATOMIC_SUPPORT
#endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP
diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
index 079c3b42a9c..fcb0e170af1 100644
--- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
@@ -230,11 +230,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc));
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = (thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc));
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
@@ -272,8 +272,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access()) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index b37a8d1f3a6..6cdb2a5e3f6 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -260,19 +260,17 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
nmethod* nm = (cb != nullptr) ? cb->as_nmethod_or_null() : nullptr;
- bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
- if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
+ bool is_unsafe_memory_access = thread->doing_unsafe_access() && UnsafeMemoryAccess::contains_pc(pc);
+ if ((nm != nullptr && nm->has_unsafe_access()) || is_unsafe_memory_access) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (is_unsafe_arraycopy) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (is_unsafe_memory_access) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
- }
- else
-
+ } else
#ifdef AMD64
- if (sig == SIGFPE &&
+ if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
stub =
SharedRuntime::
@@ -317,8 +315,8 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
(sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access())) {
address next_pc = Assembler::locate_next_instruction(pc);
- if (UnsafeCopyMemory::contains_pc(pc)) {
- next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
+ if (UnsafeMemoryAccess::contains_pc(pc)) {
+ next_pc = UnsafeMemoryAccess::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
diff --git a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
index 54775cb7e8e..a0f8577d8d1 100644
--- a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
+++ b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
@@ -36,7 +36,7 @@
# 8(%esp) : default value
# 4(%esp) : crash address
# 0(%esp) : return pc
- .type _SafeFetch32_impl,@function
+ .type SafeFetch32_impl,@function
SafeFetch32_impl:
movl 4(%esp),%ecx # load address from stack
_SafeFetch32_fault:
diff --git a/src/hotspot/share/adlc/adlparse.cpp b/src/hotspot/share/adlc/adlparse.cpp
index 57c6b0c5a99..033e8d26ca7 100644
--- a/src/hotspot/share/adlc/adlparse.cpp
+++ b/src/hotspot/share/adlc/adlparse.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -2896,14 +2896,6 @@ void ADLParser::ins_encode_parse_block(InstructForm& inst) {
encoding->add_parameter(opForm->_ident, param);
}
- if (!inst._is_postalloc_expand) {
- // Define a MacroAssembler instance for use by the encoding. The
- // name is chosen to match the __ idiom used for assembly in other
- // parts of hotspot and assumes the existence of the standard
- // #define __ _masm.
- encoding->add_code(" C2_MacroAssembler _masm(&cbuf);\n");
- }
-
// Parse the following %{ }% block
ins_encode_parse_block_impl(inst, encoding, ec_name);
@@ -5233,7 +5225,7 @@ void ADLParser::skipws_common(bool do_preproc) {
if (*_ptr == '\n') { // keep proper track of new lines
if (!do_preproc) break; // let caller handle the newline
next_line();
- _ptr = _curline; next = _ptr + 1;
+ _ptr = _curline; if (_ptr != nullptr) next = _ptr + 1;
}
else if ((*_ptr == '/') && (*next == '/')) // C++ comment
do { _ptr++; next++; } while(*_ptr != '\n'); // So go to end of line
diff --git a/src/hotspot/share/adlc/output_c.cpp b/src/hotspot/share/adlc/output_c.cpp
index b54e62663e7..77332b21c01 100644
--- a/src/hotspot/share/adlc/output_c.cpp
+++ b/src/hotspot/share/adlc/output_c.cpp
@@ -1902,7 +1902,7 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
// target specific instruction object encodings.
// Define the ___Node::emit() routine
//
-// (1) void ___Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+// (1) void ___Node::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// (2) // ... encoding defined by user
// (3)
// (4) }
@@ -2301,7 +2301,7 @@ public:
// Check results of prior scan
if ( ! _may_reloc ) {
// Definitely don't need relocation information
- fprintf( _fp, "emit_%s(cbuf, ", d32_hi_lo );
+ fprintf( _fp, "emit_%s(masm, ", d32_hi_lo );
emit_replacement(); fprintf(_fp, ")");
}
else {
@@ -2315,26 +2315,26 @@ public:
fprintf(_fp,"if ( opnd_array(%d)->%s_reloc() != relocInfo::none ) {\n",
_operand_idx, disp_constant);
fprintf(_fp," ");
- fprintf(_fp,"emit_%s_reloc(cbuf, ", d32_hi_lo );
+ fprintf(_fp,"emit_%s_reloc(masm, ", d32_hi_lo );
emit_replacement(); fprintf(_fp,", ");
fprintf(_fp,"opnd_array(%d)->%s_reloc(), ",
_operand_idx, disp_constant);
fprintf(_fp, "%d", _reloc_form);fprintf(_fp, ");");
fprintf(_fp,"\n");
fprintf(_fp,"} else {\n");
- fprintf(_fp," emit_%s(cbuf, ", d32_hi_lo);
+ fprintf(_fp," emit_%s(masm, ", d32_hi_lo);
emit_replacement(); fprintf(_fp, ");\n"); fprintf(_fp,"}");
}
}
else if ( _doing_emit_d16 ) {
// Relocation of 16-bit values is not supported
- fprintf(_fp,"emit_d16(cbuf, ");
+ fprintf(_fp,"emit_d16(masm, ");
emit_replacement(); fprintf(_fp, ")");
// No relocation done for 16-bit values
}
else if ( _doing_emit8 ) {
// Relocation of 8-bit values is not supported
- fprintf(_fp,"emit_d8(cbuf, ");
+ fprintf(_fp,"emit_d8(masm, ");
emit_replacement(); fprintf(_fp, ")");
// No relocation done for 8-bit values
}
@@ -2675,7 +2675,7 @@ void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) {
// (1)
// Output instruction's emit prototype
- fprintf(fp, "void %sNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {\n", inst._ident);
+ fprintf(fp, "void %sNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {\n", inst._ident);
// If user did not define an encode section,
// provide stub that does not generate any machine code.
@@ -2685,12 +2685,9 @@ void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) {
return;
}
- // Save current instruction's starting address (helps with relocation).
- fprintf(fp, " cbuf.set_insts_mark();\n");
-
// For MachConstantNodes which are ideal jump nodes, fill the jump table.
if (inst.is_mach_constant() && inst.is_ideal_jump()) {
- fprintf(fp, " ra_->C->output()->constant_table().fill_jump_table(cbuf, (MachConstantNode*) this, _index2label);\n");
+ fprintf(fp, " ra_->C->output()->constant_table().fill_jump_table(masm, (MachConstantNode*) this, _index2label);\n");
}
// Output each operand's offset into the array of registers.
diff --git a/src/hotspot/share/adlc/output_h.cpp b/src/hotspot/share/adlc/output_h.cpp
index 17a0fd0e01e..9d54d840688 100644
--- a/src/hotspot/share/adlc/output_h.cpp
+++ b/src/hotspot/share/adlc/output_h.cpp
@@ -1629,7 +1629,7 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," virtual bool requires_postalloc_expand() const { return true; }\n");
fprintf(fp," virtual void postalloc_expand(GrowableArray *nodes, PhaseRegAlloc *ra_);\n");
} else {
- fprintf(fp," virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;\n");
+ fprintf(fp," virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;\n");
}
}
diff --git a/src/hotspot/share/asm/assembler.hpp b/src/hotspot/share/asm/assembler.hpp
index a533b963844..d05278f7fe1 100644
--- a/src/hotspot/share/asm/assembler.hpp
+++ b/src/hotspot/share/asm/assembler.hpp
@@ -227,7 +227,8 @@ class AbstractAssembler : public ResourceObj {
bool isByte(int x) const { return 0 <= x && x < 0x100; }
bool isShiftCount(int x) const { return 0 <= x && x < 32; }
- // Instruction boundaries (required when emitting relocatable values).
+ // Mark instruction boundaries, this is required when emitting relocatable values.
+ // Basically, all instructions that directly or indirectly use Assembler::emit_data* methods.
class InstructionMark: public StackObj {
private:
AbstractAssembler* _assm;
@@ -366,6 +367,7 @@ class AbstractAssembler : public ResourceObj {
CodeBuffer* code() const { return code_section()->outer(); }
int sect() const { return code_section()->index(); }
address pc() const { return code_section()->end(); }
+ address begin() const { return code_section()->start(); }
int offset() const { return code_section()->size(); }
int locator() const { return CodeBuffer::locator(offset(), sect()); }
@@ -374,10 +376,11 @@ class AbstractAssembler : public ResourceObj {
void register_skipped(int size) { code_section()->register_skipped(size); }
- address inst_mark() const { return code_section()->mark(); }
- void set_inst_mark() { code_section()->set_mark(); }
- void clear_inst_mark() { code_section()->clear_mark(); }
-
+ address inst_mark() const { return code_section()->mark(); }
+ void set_inst_mark() { code_section()->set_mark(); }
+ void set_inst_mark(address addr) { code_section()->set_mark(addr); }
+ void clear_inst_mark() { code_section()->clear_mark(); }
+ void set_inst_end(address addr) { code_section()->set_end(addr); }
// Constants in code
void relocate(RelocationHolder const& rspec, int format = 0) {
@@ -389,6 +392,12 @@ class AbstractAssembler : public ResourceObj {
void relocate( relocInfo::relocType rtype, int format = 0) {
code_section()->relocate(code_section()->end(), rtype, format);
}
+ void relocate(address addr, relocInfo::relocType rtype, int format = 0) {
+ code_section()->relocate(addr, rtype, format);
+ }
+ void relocate(address addr, RelocationHolder const& rspec, int format = 0) {
+ code_section()->relocate(addr, rspec, format);
+ }
static int code_fill_byte(); // used to pad out odd-sized code buffers
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
index 4bc33cc89a6..e818853870e 100644
--- a/src/hotspot/share/asm/codeBuffer.cpp
+++ b/src/hotspot/share/asm/codeBuffer.cpp
@@ -1012,6 +1012,8 @@ void CodeBuffer::log_section_sizes(const char* name) {
}
bool CodeBuffer::finalize_stubs() {
+ // Record size of code before we generate stubs in instructions section
+ _main_code_size = _insts.size();
if (_finalize_stubs && !pd_finalize_stubs()) {
// stub allocation failure
return false;
diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp
index 48476bedfe2..5cff78a96cc 100644
--- a/src/hotspot/share/asm/codeBuffer.hpp
+++ b/src/hotspot/share/asm/codeBuffer.hpp
@@ -427,6 +427,9 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
address _total_start; // first address of combined memory buffer
csize_t _total_size; // size in bytes of combined memory buffer
+ // Size of code without stubs generated at the end of instructions section
+ csize_t _main_code_size;
+
OopRecorder* _oop_recorder;
OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
@@ -457,6 +460,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
_oop_recorder = nullptr;
_overflow_arena = nullptr;
_last_insn = nullptr;
+ _main_code_size = 0;
_finalize_stubs = false;
_shared_stub_to_interp_requests = nullptr;
_shared_trampoline_requests = nullptr;
@@ -630,6 +634,9 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
// number of bytes remaining in the insts section
csize_t insts_remaining() const { return _insts.remaining(); }
+ // size of code without stubs in instruction section
+ csize_t main_code_size() const { return _main_code_size; }
+
// is a given address in the insts section? (2nd version is end-inclusive)
bool insts_contains(address pc) const { return _insts.contains(pc); }
bool insts_contains2(address pc) const { return _insts.contains2(pc); }
diff --git a/src/hotspot/share/c1/c1_Compiler.cpp b/src/hotspot/share/c1/c1_Compiler.cpp
index bdbeb39f89a..6e518b0213b 100644
--- a/src/hotspot/share/c1/c1_Compiler.cpp
+++ b/src/hotspot/share/c1/c1_Compiler.cpp
@@ -235,6 +235,9 @@ bool Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
case vmIntrinsics::_counterTime:
#endif
case vmIntrinsics::_getObjectSize:
+#if defined(X86) || defined(AARCH64)
+ case vmIntrinsics::_clone:
+#endif
break;
case vmIntrinsics::_blackhole:
break;
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.cpp b/src/hotspot/share/c1/c1_GraphBuilder.cpp
index 396c83c6ab9..db025883b78 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp
@@ -1556,8 +1556,7 @@ void GraphBuilder::call_register_finalizer() {
void GraphBuilder::method_return(Value x, bool ignore_return) {
- if (RegisterFinalizersAtInit &&
- method()->intrinsic_id() == vmIntrinsics::_Object_init) {
+ if (method()->intrinsic_id() == vmIntrinsics::_Object_init) {
call_register_finalizer();
}
@@ -2027,8 +2026,11 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
receiver = state()->stack_at(index);
ciType* type = receiver->exact_type();
- if (type != nullptr && type->is_loaded() &&
- type->is_instance_klass() && !type->as_instance_klass()->is_interface()) {
+ if (type != nullptr && type->is_loaded()) {
+ assert(!type->is_instance_klass() || !type->as_instance_klass()->is_interface(), "Must not be an interface");
+ // Detects non-interface instances, primitive arrays, and some object arrays.
+ // Array receivers can only call Object methods, so we should be able to allow
+ // all object arrays here too, even those with unloaded types.
receiver_klass = (ciInstanceKlass*) type;
type_is_exact = true;
}
@@ -2244,7 +2246,7 @@ void GraphBuilder::new_instance(int klass_index) {
void GraphBuilder::new_type_array() {
ValueStack* state_before = copy_state_exhandling();
- apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before)));
+ apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before, true)));
}
@@ -3651,9 +3653,13 @@ void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_retur
case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set(callee, false); return;
case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return;
case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return;
+ case vmIntrinsics::_clone : append_alloc_array_copy(callee); return;
default:
break;
}
+ if (_inline_bailout_msg != nullptr) {
+ return;
+ }
// create intrinsic node
const bool has_receiver = !callee->is_static();
@@ -3715,6 +3721,9 @@ bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) {
}
}
build_graph_for_intrinsic(callee, ignore_return);
+ if (_inline_bailout_msg != nullptr) {
+ return false;
+ }
return true;
}
@@ -4428,6 +4437,43 @@ void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) {
}
}
+void GraphBuilder::append_alloc_array_copy(ciMethod* callee) {
+ const int args_base = state()->stack_size() - callee->arg_size();
+ ciType* receiver_type = state()->stack_at(args_base)->exact_type();
+ if (receiver_type == nullptr) {
+ inline_bailout("must have a receiver");
+ return;
+ }
+ if (!receiver_type->is_type_array_klass()) {
+ inline_bailout("clone array not primitive");
+ return;
+ }
+
+ ValueStack* state_before = copy_state_before();
+ state_before->set_force_reexecute();
+ Value src = apop();
+ BasicType basic_type = src->exact_type()->as_array_klass()->element_type()->basic_type();
+ Value length = append(new ArrayLength(src, state_before));
+ Value new_array = append_split(new NewTypeArray(length, basic_type, state_before, false));
+
+ ValueType* result_type = as_ValueType(callee->return_type());
+ vmIntrinsics::ID id = vmIntrinsics::_arraycopy;
+ Values* args = new Values(5);
+ args->push(src);
+ args->push(append(new Constant(new IntConstant(0))));
+ args->push(new_array);
+ args->push(append(new Constant(new IntConstant(0))));
+ args->push(length);
+ const bool has_receiver = true;
+ Intrinsic* array_copy = new Intrinsic(result_type, id,
+ args, has_receiver, state_before,
+ vmIntrinsics::preserves_state(id),
+ vmIntrinsics::can_trap(id));
+ array_copy->set_flag(Instruction::OmitChecksFlag, true);
+ append_split(array_copy);
+ apush(new_array);
+}
+
void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) {
CompileLog* log = compilation()->log();
if (log != nullptr) {
diff --git a/src/hotspot/share/c1/c1_GraphBuilder.hpp b/src/hotspot/share/c1/c1_GraphBuilder.hpp
index 42233455d4c..92b9a518a20 100644
--- a/src/hotspot/share/c1/c1_GraphBuilder.hpp
+++ b/src/hotspot/share/c1/c1_GraphBuilder.hpp
@@ -379,6 +379,7 @@ class GraphBuilder {
void append_unsafe_CAS(ciMethod* callee);
void append_unsafe_get_and_set(ciMethod* callee, bool is_add);
void append_char_access(ciMethod* callee, bool is_store);
+ void append_alloc_array_copy(ciMethod* callee);
void print_inlining(ciMethod* callee, const char* msg, bool success = true);
diff --git a/src/hotspot/share/c1/c1_Instruction.hpp b/src/hotspot/share/c1/c1_Instruction.hpp
index 8f7fd698e79..32ff3d9f61c 100644
--- a/src/hotspot/share/c1/c1_Instruction.hpp
+++ b/src/hotspot/share/c1/c1_Instruction.hpp
@@ -364,6 +364,7 @@ class Instruction: public CompilationResourceObj {
InWorkListFlag,
DeoptimizeOnException,
KillsMemoryFlag,
+ OmitChecksFlag,
InstructionLastFlag
};
@@ -1327,16 +1328,19 @@ BASE(NewArray, StateSplit)
LEAF(NewTypeArray, NewArray)
private:
BasicType _elt_type;
+ bool _zero_array;
public:
// creation
- NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before)
+ NewTypeArray(Value length, BasicType elt_type, ValueStack* state_before, bool zero_array)
: NewArray(length, state_before)
, _elt_type(elt_type)
+ , _zero_array(zero_array)
{}
// accessors
BasicType elt_type() const { return _elt_type; }
+ bool zero_array() const { return _zero_array; }
ciType* exact_type() const;
};
diff --git a/src/hotspot/share/c1/c1_LIR.cpp b/src/hotspot/share/c1/c1_LIR.cpp
index dee208c11be..4017a5324b5 100644
--- a/src/hotspot/share/c1/c1_LIR.cpp
+++ b/src/hotspot/share/c1/c1_LIR.cpp
@@ -353,7 +353,15 @@ LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_
, _tmp(tmp)
, _expected_type(expected_type)
, _flags(flags) {
+#if defined(X86) || defined(AARCH64)
+ if (expected_type != nullptr && flags == 0) {
+ _stub = nullptr;
+ } else {
+ _stub = new ArrayCopyStub(this);
+ }
+#else
_stub = new ArrayCopyStub(this);
+#endif
}
LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res)
@@ -999,7 +1007,10 @@ void LIR_OpLabel::emit_code(LIR_Assembler* masm) {
void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) {
masm->emit_arraycopy(this);
- masm->append_code_stub(stub());
+ ArrayCopyStub* code_stub = stub();
+ if (code_stub != nullptr) {
+ masm->append_code_stub(code_stub);
+ }
}
void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) {
@@ -1365,7 +1376,7 @@ void LIR_List::allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3,
stub));
}
-void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub) {
+void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array) {
append(new LIR_OpAllocArray(
klass,
len,
@@ -1375,7 +1386,8 @@ void LIR_List::allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, L
t3,
t4,
type,
- stub));
+ stub,
+ zero_array));
}
void LIR_List::shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp) {
diff --git a/src/hotspot/share/c1/c1_LIR.hpp b/src/hotspot/share/c1/c1_LIR.hpp
index 6f527135fbe..c69d29f8d61 100644
--- a/src/hotspot/share/c1/c1_LIR.hpp
+++ b/src/hotspot/share/c1/c1_LIR.hpp
@@ -1750,9 +1750,10 @@ class LIR_OpAllocArray : public LIR_Op {
LIR_Opr _tmp4;
BasicType _type;
CodeStub* _stub;
+ bool _zero_array;
public:
- LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
+ LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub, bool zero_array)
: LIR_Op(lir_alloc_array, result, nullptr)
, _klass(klass)
, _len(len)
@@ -1761,7 +1762,8 @@ class LIR_OpAllocArray : public LIR_Op {
, _tmp3(t3)
, _tmp4(t4)
, _type(type)
- , _stub(stub) {}
+ , _stub(stub)
+ , _zero_array(zero_array) {}
LIR_Opr klass() const { return _klass; }
LIR_Opr len() const { return _len; }
@@ -1772,6 +1774,7 @@ class LIR_OpAllocArray : public LIR_Op {
LIR_Opr tmp4() const { return _tmp4; }
BasicType type() const { return _type; }
CodeStub* stub() const { return _stub; }
+ bool zero_array() const { return _zero_array; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
@@ -2302,7 +2305,7 @@ class LIR_List: public CompilationResourceObj {
void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
- void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
+ void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub, bool zero_array = true);
// jump is an unconditional branch
void jump(BlockBegin* block) {
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index 1cb350f1e39..cd084135658 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -35,6 +35,7 @@
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
+#include "compiler/compilerOracle.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c1/barrierSetC1.hpp"
#include "oops/klass.inline.hpp"
@@ -3216,7 +3217,7 @@ void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
// Notify the runtime very infrequently only to take care of counter overflows
int freq_log = Tier23InlineeNotifyFreqLog;
double scale;
- if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
+ if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
}
increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
@@ -3257,7 +3258,7 @@ void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int
}
// Increment the appropriate invocation/backedge counter and notify the runtime.
double scale;
- if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {
+ if (_method->has_option_value(CompileCommandEnum::CompileThresholdScaling, scale)) {
freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
}
increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp
index ffebe389644..31ae3f6bee0 100644
--- a/src/hotspot/share/c1/c1_Runtime1.cpp
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp
@@ -1059,8 +1059,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
break;
}
case Bytecodes::_invokedynamic: {
- int indy_index = pool->decode_invokedynamic_index(index);
- appendix = Handle(current, pool->cache()->set_dynamic_call(info, indy_index));
+ appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
break;
}
default: fatal("unexpected bytecode for load_appendix_patching_id");
diff --git a/src/hotspot/share/c1/c1_ValueStack.hpp b/src/hotspot/share/c1/c1_ValueStack.hpp
index 0a75fa39bf6..bb0c475585c 100644
--- a/src/hotspot/share/c1/c1_ValueStack.hpp
+++ b/src/hotspot/share/c1/c1_ValueStack.hpp
@@ -58,6 +58,7 @@ class ValueStack: public CompilationResourceObj {
Values _locals; // the locals
Values _stack; // the expression stack
Values* _locks; // the monitor stack (holding the locked values)
+ bool _force_reexecute; // force the reexecute flag on, used for patching stub
Value check(ValueTag tag, Value t) {
assert(tag == t->type()->tag() || (tag == objectTag && t->type()->tag() == addressTag), "types must correspond");
@@ -225,6 +226,9 @@ class ValueStack: public CompilationResourceObj {
void setup_phi_for_stack(BlockBegin* b, int index);
void setup_phi_for_local(BlockBegin* b, int index);
+ bool force_reexecute() const { return _force_reexecute; }
+ void set_force_reexecute() { _force_reexecute = true; }
+
// debugging
void print() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
diff --git a/src/hotspot/share/cds/archiveBuilder.cpp b/src/hotspot/share/cds/archiveBuilder.cpp
index 841b68a9fb2..a98fd04ba68 100644
--- a/src/hotspot/share/cds/archiveBuilder.cpp
+++ b/src/hotspot/share/cds/archiveBuilder.cpp
@@ -39,6 +39,7 @@
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "interpreter/abstractInterpreter.hpp"
+#include "jvm.h"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allStatic.hpp"
@@ -76,6 +77,7 @@ ArchiveBuilder::SourceObjList::~SourceObjList() {
void ArchiveBuilder::SourceObjList::append(SourceObjInfo* src_info) {
// Save this source object for copying
+ src_info->set_id(_objs->length());
_objs->append(src_info);
// Prepare for marking the pointers in this source object
@@ -93,6 +95,7 @@ void ArchiveBuilder::SourceObjList::append(SourceObjInfo* src_info) {
void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
// src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
// so that we can copy/relocate it later.
+ src_info->set_has_embedded_pointer();
address src_obj = src_info->source_addr();
address* field_addr = ref->addr();
assert(src_info->ptrmap_start() < _total_bytes, "sanity");
@@ -143,7 +146,7 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
}
ArchiveBuilder::ArchiveBuilder() :
- _current_dump_space(nullptr),
+ _current_dump_region(nullptr),
_buffer_bottom(nullptr),
_last_verified_top(nullptr),
_num_dump_regions_used(0),
@@ -158,6 +161,8 @@ ArchiveBuilder::ArchiveBuilder() :
_rw_region("rw", MAX_SHARED_DELTA),
_ro_region("ro", MAX_SHARED_DELTA),
_ptrmap(mtClassShared),
+ _rw_ptrmap(mtClassShared),
+ _ro_ptrmap(mtClassShared),
_rw_src_objs(),
_ro_src_objs(),
_src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
@@ -168,7 +173,7 @@ ArchiveBuilder::ArchiveBuilder() :
{
_klasses = new (mtClassShared) GrowableArray(4 * K, mtClassShared);
_symbols = new (mtClassShared) GrowableArray(256 * K, mtClassShared);
-
+ _entropy_seed = 0x12345678;
assert(_current == nullptr, "must be");
_current = this;
}
@@ -188,6 +193,16 @@ ArchiveBuilder::~ArchiveBuilder() {
}
}
+// Returns a deterministic sequence of pseudo random numbers. The main purpose is NOT
+// for randomness but to get good entropy for the identity_hash() of archived Symbols,
+// while keeping the contents of static CDS archives deterministic to ensure
+// reproducibility of JDK builds.
+int ArchiveBuilder::entropy() {
+ assert(SafepointSynchronize::is_at_safepoint(), "needed to ensure deterministic sequence");
+ _entropy_seed = os::next_random(_entropy_seed);
+ return static_cast(_entropy_seed);
+}
+
class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
ArchiveBuilder* _builder;
@@ -326,10 +341,10 @@ address ArchiveBuilder::reserve_buffer() {
_buffer_bottom = buffer_bottom;
_last_verified_top = buffer_bottom;
- _current_dump_space = &_rw_region;
+ _current_dump_region = &_rw_region;
_num_dump_regions_used = 1;
_other_region_used_bytes = 0;
- _current_dump_space->init(&_shared_rs, &_shared_vs);
+ _current_dump_region->init(&_shared_rs, &_shared_vs);
ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
@@ -545,21 +560,21 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
}
}
-void ArchiveBuilder::start_dump_space(DumpRegion* next) {
+void ArchiveBuilder::start_dump_region(DumpRegion* next) {
address bottom = _last_verified_top;
- address top = (address)(current_dump_space()->top());
+ address top = (address)(current_dump_region()->top());
_other_region_used_bytes += size_t(top - bottom);
- current_dump_space()->pack(next);
- _current_dump_space = next;
+ current_dump_region()->pack(next);
+ _current_dump_region = next;
_num_dump_regions_used ++;
- _last_verified_top = (address)(current_dump_space()->top());
+ _last_verified_top = (address)(current_dump_region()->top());
}
void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
address bottom = _last_verified_top;
- address top = (address)(current_dump_space()->top());
+ address top = (address)(current_dump_region()->top());
size_t used = size_t(top - bottom) + _other_region_used_bytes;
int diff = int(estimate) - int(used);
@@ -576,6 +591,26 @@ char* ArchiveBuilder::ro_strdup(const char* s) {
return archived_str;
}
+// The objects that have embedded pointers will sink
+// towards the end of the list. This ensures we have a maximum
+// number of leading zero bits in the relocation bitmap.
+int ArchiveBuilder::compare_src_objs(SourceObjInfo** a, SourceObjInfo** b) {
+ if ((*a)->has_embedded_pointer() && !(*b)->has_embedded_pointer()) {
+ return 1;
+ } else if (!(*a)->has_embedded_pointer() && (*b)->has_embedded_pointer()) {
+ return -1;
+ } else {
+ // This is necessary to keep the sorting order stable. Otherwise the
+ // archive's contents may not be deterministic.
+ return (*a)->id() - (*b)->id();
+ }
+}
+
+void ArchiveBuilder::sort_metadata_objs() {
+ _rw_src_objs.objs()->sort(compare_src_objs);
+ _ro_src_objs.objs()->sort(compare_src_objs);
+}
+
void ArchiveBuilder::dump_rw_metadata() {
ResourceMark rm;
log_info(cds)("Allocating RW objects ... ");
@@ -595,7 +630,7 @@ void ArchiveBuilder::dump_ro_metadata() {
ResourceMark rm;
log_info(cds)("Allocating RO objects ... ");
- start_dump_space(&_ro_region);
+ start_dump_region(&_ro_region);
make_shallow_copies(&_ro_region, &_ro_src_objs);
#if INCLUDE_CDS_JAVA_HEAP
@@ -1275,8 +1310,11 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_i
write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
+ // Split pointer map into read-write and read-only bitmaps
+ ArchivePtrMarker::initialize_rw_ro_maps(&_rw_ptrmap, &_ro_ptrmap);
+
size_t bitmap_size_in_bytes;
- char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), heap_info,
+ char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(), ArchivePtrMarker::ro_ptrmap(), heap_info,
bitmap_size_in_bytes);
if (heap_info->is_used()) {
diff --git a/src/hotspot/share/cds/archiveBuilder.hpp b/src/hotspot/share/cds/archiveBuilder.hpp
index 4f811a0c512..cbde5a7e02c 100644
--- a/src/hotspot/share/cds/archiveBuilder.hpp
+++ b/src/hotspot/share/cds/archiveBuilder.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -91,7 +91,7 @@ const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
//
class ArchiveBuilder : public StackObj {
protected:
- DumpRegion* _current_dump_space;
+ DumpRegion* _current_dump_region;
address _buffer_bottom; // for writing the contents of rw/ro regions
address _last_verified_top;
int _num_dump_regions_used;
@@ -114,7 +114,7 @@ protected:
intx _buffer_to_requested_delta;
- DumpRegion* current_dump_space() const { return _current_dump_space; }
+ DumpRegion* current_dump_region() const { return _current_dump_region; }
public:
enum FollowMode {
@@ -126,15 +126,18 @@ private:
uintx _ptrmap_start; // The bit-offset of the start of this object (inclusive)
uintx _ptrmap_end; // The bit-offset of the end of this object (exclusive)
bool _read_only;
+ bool _has_embedded_pointer;
FollowMode _follow_mode;
int _size_in_bytes;
+ int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
+ // when the object is added into _source_objs.
MetaspaceObj::Type _msotype;
address _source_addr; // The source object to be copied.
address _buffered_addr; // The copy of this object insider the buffer.
public:
SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
- _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _follow_mode(follow_mode),
- _size_in_bytes(ref->size() * BytesPerWord), _msotype(ref->msotype()),
+ _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
+ _size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
_source_addr(ref->obj()) {
if (follow_mode == point_to_it) {
_buffered_addr = ref->obj();
@@ -164,7 +167,11 @@ private:
uintx ptrmap_start() const { return _ptrmap_start; } // inclusive
uintx ptrmap_end() const { return _ptrmap_end; } // exclusive
bool read_only() const { return _read_only; }
+ bool has_embedded_pointer() const { return _has_embedded_pointer; }
+ void set_has_embedded_pointer() { _has_embedded_pointer = true; }
int size_in_bytes() const { return _size_in_bytes; }
+ int id() const { return _id; }
+ void set_id(int i) { _id = i; }
address source_addr() const { return _source_addr; }
address buffered_addr() const {
if (_follow_mode != set_to_null) {
@@ -204,7 +211,14 @@ private:
DumpRegion _rw_region;
DumpRegion _ro_region;
- CHeapBitMap _ptrmap; // bitmap used by ArchivePtrMarker
+
+ // Combined bitmap to track pointers in both RW and RO regions. This is updated
+ // as objects are copied into RW and RO.
+ CHeapBitMap _ptrmap;
+
+ // _ptrmap is split into these two bitmaps which are written into the archive.
+ CHeapBitMap _rw_ptrmap; // marks pointers in the RW region
+ CHeapBitMap _ro_ptrmap; // marks pointers in the RO region
SourceObjList _rw_src_objs; // objs to put in rw region
SourceObjList _ro_src_objs; // objs to put in ro region
@@ -212,6 +226,7 @@ private:
ResizeableResourceHashtable _buffered_to_src_table;
GrowableArray* _klasses;
GrowableArray* _symbols;
+ unsigned int _entropy_seed;
// statistics
DumpAllocStats _alloc_stats;
@@ -263,17 +278,17 @@ protected:
size_t estimate_archive_size();
- void start_dump_space(DumpRegion* next);
+ void start_dump_region(DumpRegion* next);
void verify_estimate_size(size_t estimate, const char* which);
public:
address reserve_buffer();
- address buffer_bottom() const { return _buffer_bottom; }
- address buffer_top() const { return (address)current_dump_space()->top(); }
- address requested_static_archive_bottom() const { return _requested_static_archive_bottom; }
- address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; }
- intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; }
+ address buffer_bottom() const { return _buffer_bottom; }
+ address buffer_top() const { return (address)current_dump_region()->top(); }
+ address requested_static_archive_bottom() const { return _requested_static_archive_bottom; }
+ address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; }
+ intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; }
bool is_in_buffer_space(address p) const {
return (buffer_bottom() <= p && p < buffer_top());
@@ -334,6 +349,7 @@ public:
ArchiveBuilder();
~ArchiveBuilder();
+ int entropy();
void gather_klasses_and_symbols();
void gather_source_objs();
bool gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only);
@@ -375,6 +391,8 @@ public:
char* ro_strdup(const char* s);
+ static int compare_src_objs(SourceObjInfo** a, SourceObjInfo** b);
+ void sort_metadata_objs();
void dump_rw_metadata();
void dump_ro_metadata();
void relocate_metaspaceobj_embedded_pointers();
diff --git a/src/hotspot/share/cds/archiveHeapLoader.cpp b/src/hotspot/share/cds/archiveHeapLoader.cpp
index fe30be16427..2ef502a3643 100644
--- a/src/hotspot/share/cds/archiveHeapLoader.cpp
+++ b/src/hotspot/share/cds/archiveHeapLoader.cpp
@@ -442,7 +442,7 @@ void ArchiveHeapLoader::patch_native_pointers() {
FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp);
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(cds, heap)("Patching native pointers in heap region");
- BitMapView bm = r->ptrmap_view();
+ BitMapView bm = FileMapInfo::current_info()->ptrmap_view(MetaspaceShared::hp);
PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
bm.iterate(&patcher);
}
diff --git a/src/hotspot/share/cds/archiveHeapWriter.cpp b/src/hotspot/share/cds/archiveHeapWriter.cpp
index a86996d3b1f..d526a961b7f 100644
--- a/src/hotspot/share/cds/archiveHeapWriter.cpp
+++ b/src/hotspot/share/cds/archiveHeapWriter.cpp
@@ -67,8 +67,10 @@ ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
-typedef ResourceHashtable FillersTable;
static FillersTable* _fillers;
@@ -361,12 +363,12 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
array_length, fill_bytes, _buffer_used);
HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
_buffer_used = filler_end;
- _fillers->put((address)filler, fill_bytes);
+ _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
}
}
size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
- size_t* p = _fillers->get(buffered_addr);
+ size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
if (p != nullptr) {
assert(*p > 0, "filler must be larger than zero bytes");
return *p;
@@ -530,10 +532,8 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
fake_oop->set_narrow_klass(nk);
// We need to retain the identity_hash, because it may have been used by some hashtables
- // in the shared heap. This also has the side effect of pre-initializing the
- // identity_hash for all shared objects, so they are less likely to be written
- // into during run time, increasing the potential of memory sharing.
- if (src_obj != nullptr) {
+ // in the shared heap.
+ if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
intptr_t src_hash = src_obj->identity_hash();
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
assert(fake_oop->mark().is_unlocked(), "sanity");
diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp
index b14dfc8c33e..8fd20e20267 100644
--- a/src/hotspot/share/cds/archiveUtils.cpp
+++ b/src/hotspot/share/cds/archiveUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,12 +46,16 @@
#include "utilities/globalDefinitions.hpp"
CHeapBitMap* ArchivePtrMarker::_ptrmap = nullptr;
+CHeapBitMap* ArchivePtrMarker::_rw_ptrmap = nullptr;
+CHeapBitMap* ArchivePtrMarker::_ro_ptrmap = nullptr;
VirtualSpace* ArchivePtrMarker::_vs;
bool ArchivePtrMarker::_compacted;
void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
assert(_ptrmap == nullptr, "initialize only once");
+ assert(_rw_ptrmap == nullptr, "initialize only once");
+ assert(_ro_ptrmap == nullptr, "initialize only once");
_vs = vs;
_compacted = false;
_ptrmap = ptrmap;
@@ -67,6 +71,37 @@ void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
_ptrmap->initialize(estimated_archive_size / sizeof(intptr_t));
}
+void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) {
+ address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
+ address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
+
+ _rw_ptrmap = rw_ptrmap;
+ _ro_ptrmap = ro_ptrmap;
+
+ size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address);
+ size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address);
+ // ro_start is the first bit in _ptrmap that covers the pointer that would sit at ro_bottom.
+ // E.g., if rw_bottom = (address*)100
+ // ro_bottom = (address*)116
+ // then for 64-bit platform:
+ // ro_start = ro_bottom - rw_bottom = (116 - 100) / sizeof(address) = 2;
+ size_t ro_start = ro_bottom - rw_bottom;
+
+ // Note: ptrmap is big enough only to cover the last pointer in ro_region.
+ // See ArchivePtrMarker::compact()
+ _rw_ptrmap->initialize(rw_size);
+ _ro_ptrmap->initialize(_ptrmap->size() - ro_start);
+
+ for (size_t rw_bit = 0; rw_bit < _rw_ptrmap->size(); rw_bit++) {
+ _rw_ptrmap->at_put(rw_bit, _ptrmap->at(rw_bit));
+ }
+
+ for(size_t ro_bit = ro_start; ro_bit < _ptrmap->size(); ro_bit++) {
+ _ro_ptrmap->at_put(ro_bit-ro_start, _ptrmap->at(ro_bit));
+ }
+ assert(_ptrmap->size() - ro_start == _ro_ptrmap->size(), "must be");
+}
+
void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
assert(_ptrmap != nullptr, "not initialized");
assert(!_compacted, "cannot mark anymore");
@@ -275,18 +310,11 @@ void WriteClosure::do_ptr(void** p) {
if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) {
ptr = ArchiveBuilder::current()->get_buffered_addr(ptr);
}
- _dump_region->append_intptr_t((intptr_t)ptr, true);
-}
-
-void WriteClosure::do_region(u_char* start, size_t size) {
- assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
- assert(size % sizeof(intptr_t) == 0, "bad size");
- do_tag((int)size);
- while (size > 0) {
- do_ptr((void**)start);
- start += sizeof(intptr_t);
- size -= sizeof(intptr_t);
+ // null pointers do not need to be converted to offsets
+ if (ptr != nullptr) {
+ ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr);
}
+ _dump_region->append_intptr_t((intptr_t)ptr, false);
}
void ReadClosure::do_ptr(void** p) {
@@ -294,7 +322,7 @@ void ReadClosure::do_ptr(void** p) {
intptr_t obj = nextPtr();
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
"hit tag while initializing ptrs.");
- *p = (void*)obj;
+ *p = (void*)obj != nullptr ? (void*)(SharedBaseAddress + obj) : (void*)obj;
}
void ReadClosure::do_u4(u4* p) {
@@ -320,17 +348,6 @@ void ReadClosure::do_tag(int tag) {
FileMapInfo::assert_mark(tag == old_tag);
}
-void ReadClosure::do_region(u_char* start, size_t size) {
- assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
- assert(size % sizeof(intptr_t) == 0, "bad size");
- do_tag((int)size);
- while (size > 0) {
- *(intptr_t*)start = nextPtr();
- start += sizeof(intptr_t);
- size -= sizeof(intptr_t);
- }
-}
-
void ArchiveUtils::log_to_classlist(BootstrapInfo* bootstrap_specifier, TRAPS) {
if (ClassListWriter::is_enabled()) {
if (SystemDictionaryShared::is_supported_invokedynamic(bootstrap_specifier)) {
diff --git a/src/hotspot/share/cds/archiveUtils.hpp b/src/hotspot/share/cds/archiveUtils.hpp
index 2c965f8fe9c..32cef97886f 100644
--- a/src/hotspot/share/cds/archiveUtils.hpp
+++ b/src/hotspot/share/cds/archiveUtils.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,6 +42,8 @@ class VirtualSpace;
// fixed, but _ptr_end can be expanded as more objects are dumped.
class ArchivePtrMarker : AllStatic {
static CHeapBitMap* _ptrmap;
+ static CHeapBitMap* _rw_ptrmap;
+ static CHeapBitMap* _ro_ptrmap;
static VirtualSpace* _vs;
// Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
@@ -53,6 +55,7 @@ class ArchivePtrMarker : AllStatic {
public:
static void initialize(CHeapBitMap* ptrmap, VirtualSpace* vs);
+ static void initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap);
static void mark_pointer(address* ptr_loc);
static void clear_pointer(address* ptr_loc);
static void compact(address relocatable_base, address relocatable_end);
@@ -73,8 +76,18 @@ public:
return _ptrmap;
}
+ static CHeapBitMap* rw_ptrmap() {
+ return _rw_ptrmap;
+ }
+
+ static CHeapBitMap* ro_ptrmap() {
+ return _ro_ptrmap;
+ }
+
static void reset_map_and_vs() {
_ptrmap = nullptr;
+ _rw_ptrmap = nullptr;
+ _ro_ptrmap = nullptr;
_vs = nullptr;
}
};
@@ -202,7 +215,10 @@ public:
_dump_region->append_intptr_t((intptr_t)tag);
}
- void do_region(u_char* start, size_t size);
+ char* region_top() {
+ return _dump_region->top();
+ }
+
bool reading() const { return false; }
};
@@ -225,8 +241,8 @@ public:
void do_int(int* p);
void do_bool(bool *p);
void do_tag(int tag);
- void do_region(u_char* start, size_t size);
bool reading() const { return true; }
+ char* region_top() { return nullptr; }
};
class ArchiveUtils {
diff --git a/src/hotspot/share/cds/classListParser.cpp b/src/hotspot/share/cds/classListParser.cpp
index e2099620a77..79119a997de 100644
--- a/src/hotspot/share/cds/classListParser.cpp
+++ b/src/hotspot/share/cds/classListParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,30 +50,24 @@
#include "runtime/javaCalls.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
+#include "utilities/utf8.hpp"
volatile Thread* ClassListParser::_parsing_thread = nullptr;
ClassListParser* ClassListParser::_instance = nullptr;
-ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2klass_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE) {
+ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) :
+ _classlist_file(file),
+ _id2klass_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
+ _file_input(do_open(file), /* need_close=*/true),
+ _input_stream(&_file_input) {
log_info(cds)("Parsing %s%s", file,
- (parse_mode == _parse_lambda_forms_invokers_only) ? " (lambda form invokers only)" : "");
- _classlist_file = file;
- _file = nullptr;
- // Use os::open() because neither fopen() nor os::fopen()
- // can handle long path name on Windows.
- int fd = os::open(file, O_RDONLY, S_IREAD);
- if (fd != -1) {
- // Obtain a File* from the file descriptor so that fgets()
- // can be used in parse_one_line()
- _file = os::fdopen(fd, "r");
- }
- if (_file == nullptr) {
+ parse_lambda_forms_invokers_only() ? " (lambda form invokers only)" : "");
+ if (!_file_input.is_open()) {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
vm_exit_during_initialization("Loading classlist failed", errmsg);
}
- _line_no = 0;
- _token = _line;
+ _token = _line = nullptr;
_interfaces = new (mtClass) GrowableArray(10, mtClass);
_indy_items = new (mtClass) GrowableArray(9, mtClass);
_parse_mode = parse_mode;
@@ -84,142 +78,132 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : _id2k
Atomic::store(&_parsing_thread, Thread::current());
}
+FILE* ClassListParser::do_open(const char* file) {
+ // Use os::open() because neither fopen() nor os::fopen()
+ // can handle long path name on Windows. (See JDK-8216184)
+ int fd = os::open(file, O_RDONLY, S_IREAD);
+ FILE* fp = nullptr;
+ if (fd != -1) {
+ // Obtain a FILE* from the file descriptor so that _input_stream
+ // can be used in ClassListParser::parse()
+ fp = os::fdopen(fd, "r");
+ }
+ return fp;
+}
+
bool ClassListParser::is_parsing_thread() {
return Atomic::load(&_parsing_thread) == Thread::current();
}
ClassListParser::~ClassListParser() {
- if (_file != nullptr) {
- fclose(_file);
- }
Atomic::store(&_parsing_thread, (Thread*)nullptr);
delete _indy_items;
delete _interfaces;
_instance = nullptr;
}
-int ClassListParser::parse(TRAPS) {
- int class_count = 0;
+void ClassListParser::parse(TRAPS) {
+ for (; !_input_stream.done(); _input_stream.next()) {
+ _line = _input_stream.current_line();
+ clean_up_input_line();
- while (parse_one_line()) {
- if (lambda_form_line()) {
- // The current line is "@lambda-form-invoker ...". It has been recorded in LambdaFormInvokers,
- // and will be processed later.
- continue;
+ // Each line in the classlist can be one of three forms:
+ if (_line[0] == '#') {
+ // A comment; ignore it
+ } else if (_line[0] == '@') {
+ // @xxx - a tag like @lambda-proxy, to be parsed by parse_at_tags()
+ parse_at_tags(CHECK);
+ } else {
+ // A class name, followed by optional attributes. E.g.
+ // java/lang/String
+ // java/lang/Object id: 1
+ // my/pkg/TestClass id: 5 super: 1 interfaces: 3 4 source: foo.jar
+ parse_class_name_and_attributes(CHECK);
}
-
- if (_parse_mode == _parse_lambda_forms_invokers_only) {
- continue;
- }
-
- TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name);
- if (_indy_items->length() > 0) {
- // The current line is "@lambda-proxy class_name". Load the proxy class.
- resolve_indy(THREAD, class_name_symbol);
- class_count++;
- continue;
- }
-
- Klass* klass = load_current_class(class_name_symbol, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- if (PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())) {
- // If we have run out of memory, don't try to load the rest of the classes in
- // the classlist. Throw an exception, which will terminate the dumping process.
- return 0; // THROW
- }
-
- ResourceMark rm(THREAD);
- char* ex_msg = (char*)"";
- oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
- if (message != nullptr) {
- ex_msg = java_lang_String::as_utf8_string(message);
- }
- log_warning(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(), ex_msg);
- // We might have an invalid class name or an bad class. Warn about it
- // and keep going to the next line.
- CLEAR_PENDING_EXCEPTION;
- log_warning(cds)("Preload Warning: Cannot find %s", _class_name);
- continue;
- }
-
- assert(klass != nullptr, "sanity");
- if (log_is_enabled(Trace, cds)) {
- ResourceMark rm(THREAD);
- log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
- }
-
- if (klass->is_instance_klass()) {
- InstanceKlass* ik = InstanceKlass::cast(klass);
-
- // Link the class to cause the bytecodes to be rewritten and the
- // cpcache to be created. The linking is done as soon as classes
- // are loaded in order that the related data structures (klass and
- // cpCache) are located together.
- MetaspaceShared::try_link_class(THREAD, ik);
- }
-
- class_count++;
}
-
- return class_count;
}
-bool ClassListParser::parse_one_line() {
- for (;;) {
- if (fgets(_line, sizeof(_line), _file) == nullptr) {
- return false;
- }
- ++ _line_no;
- _line_len = (int)strlen(_line);
- if (_line_len > _max_allowed_line_len) {
- error("input line too long (must be no longer than %d chars)", _max_allowed_line_len);
- }
- if (*_line == '#') { // comment
- continue;
- }
+void ClassListParser::parse_class_name_and_attributes(TRAPS) {
+ read_class_name_and_attributes();
- {
- int len = (int)strlen(_line);
- int i;
- // Replace \t\r\n\f with ' '
- for (i=0; i 0) {
- if (_line[len-1] == ' ') {
- _line[len-1] = '\0';
- len --;
- } else {
- break;
- }
- }
- _line_len = len;
- }
-
- // valid line
- break;
+ if (parse_lambda_forms_invokers_only()) {
+ return;
}
+ check_class_name(_class_name);
+ TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name);
+ Klass* klass = load_current_class(class_name_symbol, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ if (PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())) {
+ // If we have run out of memory, don't try to load the rest of the classes in
+ // the classlist. Throw an exception, which will terminate the dumping process.
+ return; // THROW
+ }
+
+ ResourceMark rm(THREAD);
+ char* ex_msg = (char*)"";
+ oop message = java_lang_Throwable::message(PENDING_EXCEPTION);
+ if (message != nullptr) {
+ ex_msg = java_lang_String::as_utf8_string(message);
+ }
+ log_warning(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(), ex_msg);
+ // We might have an invalid class name or an bad class. Warn about it
+ // and keep going to the next line.
+ CLEAR_PENDING_EXCEPTION;
+ log_warning(cds)("Preload Warning: Cannot find %s", _class_name);
+ return;
+ }
+
+ assert(klass != nullptr, "sanity");
+ if (log_is_enabled(Trace, cds)) {
+ ResourceMark rm(THREAD);
+ log_trace(cds)("Shared spaces preloaded: %s", klass->external_name());
+ }
+
+ if (klass->is_instance_klass()) {
+ InstanceKlass* ik = InstanceKlass::cast(klass);
+
+ // Link the class to cause the bytecodes to be rewritten and the
+ // cpcache to be created. The linking is done as soon as classes
+ // are loaded in order that the related data structures (klass and
+ // cpCache) are located together.
+ MetaspaceShared::try_link_class(THREAD, ik);
+ }
+}
+
+void ClassListParser::clean_up_input_line() {
+ int len = (int)strlen(_line);
+ int i;
+ // Replace \t\r\n\f with ' '
+ for (i=0; i 0) {
+ if (_line[len-1] == ' ') {
+ _line[len-1] = '\0';
+ len --;
+ } else {
+ break;
+ }
+ }
+ _line_len = len;
+}
+
+void ClassListParser::read_class_name_and_attributes() {
_class_name = _line;
_id = _unspecified;
_super = _unspecified;
_interfaces->clear();
_source = nullptr;
_interfaces_specified = false;
- _indy_items->clear();
- _lambda_form_line = false;
-
- if (_line[0] == '@') {
- return parse_at_tags();
- }
if ((_token = strchr(_line, ' ')) == nullptr) {
- // No optional arguments are specified.
- return true;
+ // No optional attributes are specified.
+ return;
}
// Mark the end of the name, and go to the next input char
@@ -261,10 +245,9 @@ bool ClassListParser::parse_one_line() {
// # the class is loaded from classpath
// id may be specified
// super, interfaces, loader must not be specified
- return true;
}
-void ClassListParser::split_tokens_by_whitespace(int offset) {
+void ClassListParser::split_tokens_by_whitespace(int offset, GrowableArray* items) {
int start = offset;
int end;
bool done = false;
@@ -277,7 +260,7 @@ void ClassListParser::split_tokens_by_whitespace(int offset) {
} else {
_line[end] = '\0';
}
- _indy_items->append(_line + start);
+ items->append(_line + start);
start = ++end;
}
}
@@ -286,7 +269,7 @@ int ClassListParser::split_at_tag_from_line() {
_token = _line;
char* ptr;
if ((ptr = strchr(_line, ' ')) == nullptr) {
- error("Too few items following the @ tag \"%s\" line #%d", _line, _line_no);
+ error("Too few items following the @ tag \"%s\" line #%zu", _line, lineno());
return 0;
}
*ptr++ = '\0';
@@ -294,29 +277,30 @@ int ClassListParser::split_at_tag_from_line() {
return (int)(ptr - _line);
}
-bool ClassListParser::parse_at_tags() {
+void ClassListParser::parse_at_tags(TRAPS) {
assert(_line[0] == '@', "must be");
- int offset;
- if ((offset = split_at_tag_from_line()) == 0) {
- return false;
- }
+ int offset = split_at_tag_from_line();
+ assert(offset > 0, "would have exited VM");
if (strcmp(_token, LAMBDA_PROXY_TAG) == 0) {
- split_tokens_by_whitespace(offset);
+ _indy_items->clear();
+ split_tokens_by_whitespace(offset, _indy_items);
if (_indy_items->length() < 2) {
- error("Line with @ tag has too few items \"%s\" line #%d", _token, _line_no);
- return false;
+ error("Line with @ tag has too few items \"%s\" line #%zu", _token, lineno());
+ }
+ if (!parse_lambda_forms_invokers_only()) {
+ _class_name = _indy_items->at(0);
+ check_class_name(_class_name);
+ TempNewSymbol class_name_symbol = SymbolTable::new_symbol(_class_name);
+ if (_indy_items->length() > 0) {
+ // The current line is "@lambda-proxy class_name". Load the proxy class.
+ resolve_indy(THREAD, class_name_symbol);
+ }
}
- // set the class name
- _class_name = _indy_items->at(0);
- return true;
} else if (strcmp(_token, LAMBDA_FORM_TAG) == 0) {
LambdaFormInvokers::append(os::strdup((const char*)(_line + offset), mtInternal));
- _lambda_form_line = true;
- return true;
} else {
- error("Invalid @ tag at the beginning of line \"%s\" line #%d", _token, _line_no);
- return false;
+ error("Invalid @ tag at the beginning of line \"%s\" line #%zu", _token, lineno());
}
}
@@ -423,8 +407,8 @@ void ClassListParser::error(const char* msg, ...) {
}
jio_fprintf(defaultStream::error_stream(),
- "An error has occurred while processing class list file %s %d:%d.\n",
- _classlist_file, _line_no, (error_index + 1));
+ "An error has occurred while processing class list file %s %zu:%d.\n",
+ _classlist_file, lineno(), (error_index + 1));
jio_vfprintf(defaultStream::error_stream(), msg, ap);
if (_line_len <= 0) {
@@ -445,9 +429,28 @@ void ClassListParser::error(const char* msg, ...) {
}
jio_fprintf(defaultStream::error_stream(), "^\n");
}
+ va_end(ap);
vm_exit_during_initialization("class list format error.", nullptr);
- va_end(ap);
+}
+
+void ClassListParser::check_class_name(const char* class_name) {
+ const char* err = nullptr;
+ size_t len = strlen(class_name);
+ if (len > (size_t)Symbol::max_length()) {
+ err = "class name too long";
+ } else {
+ assert(Symbol::max_length() < INT_MAX && len < INT_MAX, "must be");
+ if (!UTF8::is_legal_utf8((const unsigned char*)class_name, (int)len, /*version_leq_47*/false)) {
+ err = "class name is not valid UTF8";
+ }
+ }
+ if (err != nullptr) {
+ jio_fprintf(defaultStream::error_stream(),
+ "An error has occurred while processing class list file %s:%zu %s\n",
+ _classlist_file, lineno(), err);
+ vm_exit_during_initialization("class list format error.", nullptr);
+ }
}
// This function is used for loading classes for customized class loaders
@@ -591,7 +594,7 @@ void ClassListParser::resolve_indy_impl(Symbol* class_name_symbol, TRAPS) {
LinkResolver::resolve_invoke(info,
recv,
pool,
- ConstantPool::encode_invokedynamic_index(indy_index),
+ indy_index,
Bytecodes::_invokedynamic, CHECK);
break;
}
diff --git a/src/hotspot/share/cds/classListParser.hpp b/src/hotspot/share/cds/classListParser.hpp
index 74a2ff10515..50ede4f6dff 100644
--- a/src/hotspot/share/cds/classListParser.hpp
+++ b/src/hotspot/share/cds/classListParser.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
+#include "utilities/istream.hpp"
#include "utilities/resizeableResourceHash.hpp"
#define LAMBDA_PROXY_TAG "@lambda-proxy"
@@ -80,14 +81,6 @@ private:
enum {
_unspecified = -999,
-
- // Max number of bytes allowed per line in the classlist.
- // Theoretically Java class names could be 65535 bytes in length. Also, an input line
- // could have a very long path name up to JVM_MAXPATHLEN bytes in length. In reality,
- // 4K bytes is more than enough.
- _max_allowed_line_len = 4096,
- _line_buf_extra = 10, // for detecting input too long
- _line_buf_size = _max_allowed_line_len + _line_buf_extra
};
// Use a small initial size in debug build to test resizing logic
@@ -96,16 +89,14 @@ private:
static volatile Thread* _parsing_thread; // the thread that created _instance
static ClassListParser* _instance; // the singleton.
const char* _classlist_file;
- FILE* _file;
ID2KlassTable _id2klass_table;
- // The following field contains information from the *current* line being
- // parsed.
- char _line[_line_buf_size]; // The buffer that holds the current line. Some characters in
+ FileInput _file_input;
+ inputStream _input_stream;
+ char* _line; // The buffer that holds the current line. Some characters in
// the buffer may be overwritten by '\0' during parsing.
int _line_len; // Original length of the input line.
- int _line_no; // Line number for current line being parsed
const char* _class_name;
GrowableArray* _indy_items; // items related to invoke dynamic for archiving lambda proxy classes
int _id;
@@ -113,7 +104,6 @@ private:
GrowableArray* _interfaces;
bool _interfaces_specified;
const char* _source;
- bool _lambda_form_line;
ParseMode _parse_mode;
bool parse_int_option(const char* option_name, int* value);
@@ -129,16 +119,20 @@ private:
void resolve_indy(JavaThread* current, Symbol* class_name_symbol);
void resolve_indy_impl(Symbol* class_name_symbol, TRAPS);
- bool parse_one_line();
+ void clean_up_input_line();
+ void read_class_name_and_attributes();
+ void parse_class_name_and_attributes(TRAPS);
Klass* load_current_class(Symbol* class_name_symbol, TRAPS);
+ size_t lineno() { return _input_stream.lineno(); }
+ FILE* do_open(const char* file);
ClassListParser(const char* file, ParseMode _parse_mode);
~ClassListParser();
public:
- static int parse_classlist(const char* classlist_path, ParseMode parse_mode, TRAPS) {
+ static void parse_classlist(const char* classlist_path, ParseMode parse_mode, TRAPS) {
ClassListParser parser(classlist_path, parse_mode);
- return parser.parse(THREAD); // returns the number of classes loaded.
+ parser.parse(THREAD);
}
static bool is_parsing_thread();
@@ -148,10 +142,10 @@ public:
return _instance;
}
- int parse(TRAPS);
- void split_tokens_by_whitespace(int offset);
+ void parse(TRAPS);
+ void split_tokens_by_whitespace(int offset, GrowableArray* items);
int split_at_tag_from_line();
- bool parse_at_tags();
+ void parse_at_tags(TRAPS);
char* _token;
void error(const char* msg, ...);
void parse_int(int* value);
@@ -161,6 +155,9 @@ public:
void skip_whitespaces();
void skip_non_whitespaces();
+ bool parse_lambda_forms_invokers_only() {
+ return _parse_mode == _parse_lambda_forms_invokers_only;
+ }
bool is_id_specified() {
return _id != _unspecified;
}
@@ -183,6 +180,7 @@ public:
error("%s id %d is not yet loaded", which, id);
}
}
+ void check_class_name(const char* class_name);
const char* current_class_name() {
return _class_name;
@@ -190,8 +188,6 @@ public:
bool is_loading_from_source();
- bool lambda_form_line() { return _lambda_form_line; }
-
// Look up the super or interface of the current class being loaded
// (in this->load_current_class()).
InstanceKlass* lookup_super_for_current_class(Symbol* super_name);
diff --git a/src/hotspot/share/cds/cppVtables.cpp b/src/hotspot/share/cds/cppVtables.cpp
index c339ce9c0de..f17d94a82fd 100644
--- a/src/hotspot/share/cds/cppVtables.cpp
+++ b/src/hotspot/share/cds/cppVtables.cpp
@@ -213,23 +213,30 @@ void CppVtableCloner::init_orig_cpp_vtptr(int kind) {
// the following holds true:
// _index[ConstantPool_Kind]->cloned_vtable() == ((intptr_t**)cp)[0]
// _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0]
-CppVtableInfo** CppVtables::_index = nullptr;
+static CppVtableInfo* _index[_num_cloned_vtable_kinds];
-char* CppVtables::dumptime_init(ArchiveBuilder* builder) {
+// Vtables are all fixed offsets from ArchiveBuilder::current()->mapped_base()
+// E.g. ConstantPool is at offset 0x58. We can archive these offsets in the
+// RO region and use them to alculate their location at runtime without storing
+// the pointers in the RW region
+char* CppVtables::_vtables_serialized_base = nullptr;
+
+void CppVtables::dumptime_init(ArchiveBuilder* builder) {
assert(CDSConfig::is_dumping_static_archive(), "cpp tables are only dumped into static archive");
- size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*);
- _index = (CppVtableInfo**)builder->rw_region()->allocate(vtptrs_bytes);
CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE);
size_t cpp_tables_size = builder->rw_region()->top() - builder->rw_region()->base();
builder->alloc_stats()->record_cpp_vtables((int)cpp_tables_size);
-
- return (char*)_index;
}
void CppVtables::serialize(SerializeClosure* soc) {
- soc->do_ptr(&_index);
+ if (!soc->reading()) {
+ _vtables_serialized_base = (char*)ArchiveBuilder::current()->buffer_top();
+ }
+ for (int i = 0; i < _num_cloned_vtable_kinds; i++) {
+ soc->do_ptr(&_index[i]);
+ }
if (soc->reading()) {
CPP_VTABLE_TYPES_DO(INITIALIZE_VTABLE);
}
diff --git a/src/hotspot/share/cds/cppVtables.hpp b/src/hotspot/share/cds/cppVtables.hpp
index 5318a9de2ba..973502909dd 100644
--- a/src/hotspot/share/cds/cppVtables.hpp
+++ b/src/hotspot/share/cds/cppVtables.hpp
@@ -36,13 +36,14 @@ class CppVtableInfo;
// Support for C++ vtables in CDS archive.
class CppVtables : AllStatic {
- static CppVtableInfo** _index;
+ static char* _vtables_serialized_base;
public:
- static char* dumptime_init(ArchiveBuilder* builder);
+ static void dumptime_init(ArchiveBuilder* builder);
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
+ static char* vtables_serialized_base() { return _vtables_serialized_base; }
};
#endif // SHARE_CDS_CPPVTABLES_HPP
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index cd5dd88b099..f255b337d14 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -137,7 +137,7 @@ public:
// Note that these tables still point to the *original* objects, so
// they would need to call DynamicArchive::original_to_target() to
// get the correct addresses.
- assert(current_dump_space() == ro_region(), "Must be RO space");
+ assert(current_dump_region() == ro_region(), "Must be RO space");
SymbolTable::write_to_archive(symbols());
ArchiveBuilder::OtherROAllocMark mark;
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index 2b35e64c264..2182865ebaa 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -211,6 +211,7 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
}
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
+ _use_secondary_supers_table = UseSecondarySupersTable;
_max_heap_size = MaxHeapSize;
_use_optimized_module_handling = CDSConfig::is_using_optimized_module_handling();
_has_full_module_graph = CDSConfig::is_dumping_full_module_graph();
@@ -274,6 +275,7 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode);
st->print_cr("- compressed_oops: %d", _compressed_oops);
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
+ st->print_cr("- use_secondary_supers_table: %d", _use_secondary_supers_table);
st->print_cr("- cloned_vtables_offset: " SIZE_FORMAT_X, _cloned_vtables_offset);
st->print_cr("- serialized_data_offset: " SIZE_FORMAT_X, _serialized_data_offset);
st->print_cr("- jvm_ident: %s", _jvm_ident);
@@ -291,10 +293,11 @@ void FileMapHeader::print(outputStream* st) {
st->print_cr("- heap_roots_offset: " SIZE_FORMAT, _heap_roots_offset);
st->print_cr("- _heap_oopmap_start_pos: " SIZE_FORMAT, _heap_oopmap_start_pos);
st->print_cr("- _heap_ptrmap_start_pos: " SIZE_FORMAT, _heap_ptrmap_start_pos);
+ st->print_cr("- _rw_ptrmap_start_pos: " SIZE_FORMAT, _rw_ptrmap_start_pos);
+ st->print_cr("- _ro_ptrmap_start_pos: " SIZE_FORMAT, _ro_ptrmap_start_pos);
st->print_cr("- allow_archiving_with_java_agent:%d", _allow_archiving_with_java_agent);
st->print_cr("- use_optimized_module_handling: %d", _use_optimized_module_handling);
st->print_cr("- has_full_module_graph %d", _has_full_module_graph);
- st->print_cr("- ptrmap_size_in_bits: " SIZE_FORMAT, _ptrmap_size_in_bits);
}
void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) {
@@ -1453,22 +1456,6 @@ void FileMapRegion::init_ptrmap(size_t offset, size_t size_in_bits) {
_ptrmap_size_in_bits = size_in_bits;
}
-BitMapView FileMapRegion::bitmap_view(bool is_oopmap) {
- char* bitmap_base = FileMapInfo::current_info()->map_bitmap_region();
- bitmap_base += is_oopmap ? _oopmap_offset : _ptrmap_offset;
- size_t size_in_bits = is_oopmap ? _oopmap_size_in_bits : _ptrmap_size_in_bits;
- return BitMapView((BitMap::bm_word_t*)(bitmap_base), size_in_bits);
-}
-
-BitMapView FileMapRegion::oopmap_view() {
- return bitmap_view(true);
-}
-
-BitMapView FileMapRegion::ptrmap_view() {
- assert(has_ptrmap(), "must be");
- return bitmap_view(false);
-}
-
bool FileMapRegion::check_region_crc(char* base) const {
// This function should be called after the region has been properly
// loaded into memory via FileMapInfo::map_region() or FileMapInfo::read_region().
@@ -1497,6 +1484,27 @@ static const char* region_name(int region_index) {
return names[region_index];
}
+BitMapView FileMapInfo::bitmap_view(int region_index, bool is_oopmap) {
+ FileMapRegion* r = region_at(region_index);
+ char* bitmap_base = is_static() ? FileMapInfo::current_info()->map_bitmap_region() : FileMapInfo::dynamic_info()->map_bitmap_region();
+ bitmap_base += is_oopmap ? r->oopmap_offset() : r->ptrmap_offset();
+ size_t size_in_bits = is_oopmap ? r->oopmap_size_in_bits() : r->ptrmap_size_in_bits();
+
+ log_debug(cds, reloc)("mapped %s relocation %smap @ " INTPTR_FORMAT " (" SIZE_FORMAT " bits)",
+ region_name(region_index), is_oopmap ? "oop" : "ptr",
+ p2i(bitmap_base), size_in_bits);
+
+ return BitMapView((BitMap::bm_word_t*)(bitmap_base), size_in_bits);
+}
+
+BitMapView FileMapInfo::oopmap_view(int region_index) {
+ return bitmap_view(region_index, /*is_oopmap*/true);
+ }
+
+BitMapView FileMapInfo::ptrmap_view(int region_index) {
+ return bitmap_view(region_index, /*is_oopmap*/false);
+}
+
void FileMapRegion::print(outputStream* st, int region_index) {
st->print_cr("============ region ============= %d \"%s\"", region_index, region_name(region_index));
st->print_cr("- crc: 0x%08x", _crc);
@@ -1510,6 +1518,8 @@ void FileMapRegion::print(outputStream* st, int region_index) {
st->print_cr("- used: " SIZE_FORMAT, _used);
st->print_cr("- oopmap_offset: " SIZE_FORMAT_X, _oopmap_offset);
st->print_cr("- oopmap_size_in_bits: " SIZE_FORMAT, _oopmap_size_in_bits);
+ st->print_cr("- ptrmap_offset: " SIZE_FORMAT_X, _ptrmap_offset);
+ st->print_cr("- ptrmap_size_in_bits: " SIZE_FORMAT, _ptrmap_size_in_bits);
st->print_cr("- mapped_base: " INTPTR_FORMAT, p2i(_mapped_base));
}
@@ -1572,7 +1582,7 @@ static size_t write_bitmap(const CHeapBitMap* map, char* output, size_t offset)
// lots of leading zeros.
size_t FileMapInfo::remove_bitmap_leading_zeros(CHeapBitMap* map) {
size_t old_zeros = map->find_first_set_bit(0);
- size_t old_size = map->size_in_bytes();
+ size_t old_size = map->size();
// Slice and resize bitmap
map->truncate(old_zeros, map->size());
@@ -1581,14 +1591,17 @@ size_t FileMapInfo::remove_bitmap_leading_zeros(CHeapBitMap* map) {
size_t new_zeros = map->find_first_set_bit(0);
assert(new_zeros == 0, "Should have removed leading zeros");
)
-
- assert(map->size_in_bytes() < old_size, "Map size should have decreased");
+ assert(map->size() <= old_size, "sanity");
return old_zeros;
}
-char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
+char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes) {
- size_in_bytes = ptrmap->size_in_bytes();
+ size_t removed_rw_zeros = remove_bitmap_leading_zeros(rw_ptrmap);
+ size_t removed_ro_zeros = remove_bitmap_leading_zeros(ro_ptrmap);
+ header()->set_rw_ptrmap_start_pos(removed_rw_zeros);
+ header()->set_ro_ptrmap_start_pos(removed_ro_zeros);
+ size_in_bytes = rw_ptrmap->size_in_bytes() + ro_ptrmap->size_in_bytes();
if (heap_info->is_used()) {
// Remove leading zeros
@@ -1602,14 +1615,19 @@ char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInf
size_in_bytes += heap_info->ptrmap()->size_in_bytes();
}
- // The bitmap region contains up to 3 parts:
- // ptrmap: metaspace pointers inside the ro/rw regions
+ // The bitmap region contains up to 4 parts:
+ // rw_ptrmap: metaspace pointers inside the read-write region
+ // ro_ptrmap: metaspace pointers inside the read-only region
// heap_info->oopmap(): Java oop pointers in the heap region
// heap_info->ptrmap(): metaspace pointers in the heap region
char* buffer = NEW_C_HEAP_ARRAY(char, size_in_bytes, mtClassShared);
size_t written = 0;
- written = write_bitmap(ptrmap, buffer, written);
- header()->set_ptrmap_size_in_bits(ptrmap->size());
+
+ region_at(MetaspaceShared::rw)->init_ptrmap(0, rw_ptrmap->size());
+ written = write_bitmap(rw_ptrmap, buffer, written);
+
+ region_at(MetaspaceShared::ro)->init_ptrmap(written, ro_ptrmap->size());
+ written = write_bitmap(ro_ptrmap, buffer, written);
if (heap_info->is_used()) {
FileMapRegion* r = region_at(MetaspaceShared::hp);
@@ -1904,15 +1922,19 @@ bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
if (bitmap_base == nullptr) {
return false; // OOM, or CRC check failure
} else {
- size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits();
- log_debug(cds, reloc)("mapped relocation bitmap @ " INTPTR_FORMAT " (" SIZE_FORMAT " bits)",
- p2i(bitmap_base), ptrmap_size_in_bits);
+ BitMapView rw_ptrmap = ptrmap_view(MetaspaceShared::rw);
+ BitMapView ro_ptrmap = ptrmap_view(MetaspaceShared::ro);
- BitMapView ptrmap((BitMap::bm_word_t*)bitmap_base, ptrmap_size_in_bits);
+ FileMapRegion* rw_region = first_core_region();
+ FileMapRegion* ro_region = last_core_region();
- // Patch all pointers in the mapped region that are marked by ptrmap.
- address patch_base = (address)mapped_base();
- address patch_end = (address)mapped_end();
+ // Patch all pointers inside the RW region
+ address rw_patch_base = (address)rw_region->mapped_base();
+ address rw_patch_end = (address)rw_region->mapped_end();
+
+ // Patch all pointers inside the RO region
+ address ro_patch_base = (address)ro_region->mapped_base();
+ address ro_patch_end = (address)ro_region->mapped_end();
// the current value of the pointers to be patched must be within this
// range (i.e., must be between the requested base address and the address of the current archive).
@@ -1925,9 +1947,12 @@ bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
address valid_new_base = (address)header()->mapped_base_address();
address valid_new_end = (address)mapped_end();
- SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
+ SharedDataRelocator rw_patcher((address*)rw_patch_base + header()->rw_ptrmap_start_pos(), (address*)rw_patch_end, valid_old_base, valid_old_end,
valid_new_base, valid_new_end, addr_delta);
- ptrmap.iterate(&patcher);
+ SharedDataRelocator ro_patcher((address*)ro_patch_base + header()->ro_ptrmap_start_pos(), (address*)ro_patch_end, valid_old_base, valid_old_end,
+ valid_new_base, valid_new_end, addr_delta);
+ rw_ptrmap.iterate(&rw_patcher);
+ ro_ptrmap.iterate(&ro_patcher);
// The MetaspaceShared::bm region will be unmapped in MetaspaceShared::initialize_shared_spaces().
@@ -2420,6 +2445,11 @@ bool FileMapHeader::validate() {
return false;
}
+ if (! _use_secondary_supers_table && UseSecondarySupersTable) {
+ log_warning(cds)("The shared archive was created without UseSecondarySupersTable.");
+ return false;
+ }
+
if (!_use_optimized_module_handling) {
CDSConfig::stop_using_optimized_module_handling();
log_info(cds)("optimized module handling: disabled because archive was created without optimized module handling");
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index e7c131ee5bb..54881b8d237 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -131,7 +131,6 @@ public:
class FileMapRegion: private CDSFileMapRegion {
- BitMapView bitmap_view(bool is_oopmap);
public:
void assert_is_heap_region() const {
assert(_is_heap_region, "must be heap region");
@@ -158,6 +157,8 @@ public:
bool mapped_from_file() const { return _mapped_from_file != 0; }
size_t oopmap_offset() const { assert_is_heap_region(); return _oopmap_offset; }
size_t oopmap_size_in_bits() const { assert_is_heap_region(); return _oopmap_size_in_bits; }
+ size_t ptrmap_offset() const { return _ptrmap_offset; }
+ size_t ptrmap_size_in_bits() const { return _ptrmap_size_in_bits; }
void set_file_offset(size_t s) { _file_offset = s; }
void set_read_only(bool v) { _read_only = v; }
@@ -167,8 +168,6 @@ public:
bool allow_exec, int crc);
void init_oopmap(size_t offset, size_t size_in_bits);
void init_ptrmap(size_t offset, size_t size_in_bits);
- BitMapView oopmap_view();
- BitMapView ptrmap_view();
bool has_ptrmap() { return _ptrmap_size_in_bits != 0; }
bool check_region_crc(char* base) const;
@@ -192,6 +191,7 @@ private:
CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
bool _compressed_oops; // save the flag UseCompressedOops
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
+ bool _use_secondary_supers_table; // save the flag UseSecondarySupersTable
size_t _cloned_vtables_offset; // The address of the first cloned vtable
size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
bool _has_non_jar_in_classpath; // non-jar file entry exists in classpath
@@ -225,11 +225,12 @@ private:
bool _use_optimized_module_handling;// No module-relation VM options were specified, so we can skip
// some expensive operations.
bool _has_full_module_graph; // Does this CDS archive contain the full archived module graph?
- size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap
size_t _heap_roots_offset; // Offset of the HeapShared::roots() object, from the bottom
// of the archived heap objects, in bytes.
size_t _heap_oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
size_t _heap_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
+ size_t _rw_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the rw region
+ size_t _ro_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the ro region
char* from_mapped_offset(size_t offset) const {
return mapped_base_address() + offset;
}
@@ -267,12 +268,13 @@ public:
char* mapped_base_address() const { return _mapped_base_address; }
bool has_platform_or_app_classes() const { return _has_platform_or_app_classes; }
bool has_non_jar_in_classpath() const { return _has_non_jar_in_classpath; }
- size_t ptrmap_size_in_bits() const { return _ptrmap_size_in_bits; }
bool compressed_oops() const { return _compressed_oops; }
bool compressed_class_pointers() const { return _compressed_class_ptrs; }
size_t heap_roots_offset() const { return _heap_roots_offset; }
- size_t heap_oopmap_start_pos() const { return _heap_oopmap_start_pos;}
- size_t heap_ptrmap_start_pos() const { return _heap_ptrmap_start_pos;}
+ size_t heap_oopmap_start_pos() const { return _heap_oopmap_start_pos; }
+ size_t heap_ptrmap_start_pos() const { return _heap_ptrmap_start_pos; }
+ size_t rw_ptrmap_start_pos() const { return _rw_ptrmap_start_pos; }
+ size_t ro_ptrmap_start_pos() const { return _ro_ptrmap_start_pos; }
// FIXME: These should really return int
jshort max_used_path_index() const { return _max_used_path_index; }
jshort app_module_paths_start_index() const { return _app_module_paths_start_index; }
@@ -282,11 +284,12 @@ public:
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
- void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
void set_heap_oopmap_start_pos(size_t n) { _heap_oopmap_start_pos = n; }
void set_heap_ptrmap_start_pos(size_t n) { _heap_ptrmap_start_pos = n; }
+ void set_rw_ptrmap_start_pos(size_t n) { _rw_ptrmap_start_pos = n; }
+ void set_ro_ptrmap_start_pos(size_t n) { _ro_ptrmap_start_pos = n; }
void copy_base_archive_name(const char* name);
void set_shared_path_table(SharedPathTable table) {
@@ -443,7 +446,7 @@ public:
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
size_t remove_bitmap_leading_zeros(CHeapBitMap* map);
- char* write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
+ char* write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes);
size_t write_heap_region(ArchiveHeapInfo* heap_info);
void write_bytes(const void* buffer, size_t count);
@@ -526,6 +529,10 @@ public:
return header()->region_at(i);
}
+ BitMapView bitmap_view(int region_index, bool is_oopmap);
+ BitMapView oopmap_view(int region_index);
+ BitMapView ptrmap_view(int region_index);
+
void print(outputStream* st) const;
const char* vm_version() {
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index a9dd5de9f4a..b9f5d963c85 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -278,12 +278,6 @@ bool HeapShared::archive_object(oop obj) {
} else {
count_allocation(obj->size());
ArchiveHeapWriter::add_source_obj(obj);
-
- // The archived objects are discovered in a predictable order. Compute
- // their identity_hash() as soon as we see them. This ensures that the
- // the identity_hash in the object header will have a predictable value,
- // making the archive reproducible.
- obj->identity_hash();
CachedOopInfo info = make_cached_oop_info(obj);
archived_object_cache()->put_when_absent(obj, info);
archived_object_cache()->maybe_grow();
@@ -1395,7 +1389,7 @@ void HeapShared::check_default_subgraph_classes() {
name == vmSymbols::java_lang_String() ||
name == vmSymbols::java_lang_ArithmeticException() ||
name == vmSymbols::java_lang_NullPointerException() ||
- name == vmSymbols::java_lang_VirtualMachineError() ||
+ name == vmSymbols::java_lang_InternalError() ||
name == vmSymbols::object_array_signature() ||
name == vmSymbols::byte_array_signature() ||
name == vmSymbols::char_array_signature(),
diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp
index 95cf4dfa791..6213b9848f1 100644
--- a/src/hotspot/share/cds/metaspaceShared.cpp
+++ b/src/hotspot/share/cds/metaspaceShared.cpp
@@ -225,6 +225,14 @@ static bool shared_base_too_high(char* specified_base, char* aligned_base, size_
static char* compute_shared_base(size_t cds_max) {
char* specified_base = (char*)SharedBaseAddress;
char* aligned_base = align_up(specified_base, MetaspaceShared::core_region_alignment());
+ if (UseCompressedClassPointers) {
+ aligned_base = align_up(specified_base, Metaspace::reserve_alignment());
+ }
+
+ if (aligned_base != specified_base) {
+ log_info(cds)("SharedBaseAddress (" INTPTR_FORMAT ") aligned up to " INTPTR_FORMAT,
+ p2i(specified_base), p2i(aligned_base));
+ }
const char* err = nullptr;
if (shared_base_too_high(specified_base, aligned_base, cds_max)) {
@@ -511,11 +519,9 @@ void VM_PopulateDumpSharedSpace::doit() {
builder.gather_source_objs();
builder.reserve_buffer();
- char* cloned_vtables = CppVtables::dumptime_init(&builder);
-
- // Initialize random for updating the hash of symbols
- os::init_random(0x12345678);
+ CppVtables::dumptime_init(&builder);
+ builder.sort_metadata_objs();
builder.dump_rw_metadata();
builder.dump_ro_metadata();
builder.relocate_metaspaceobj_embedded_pointers();
@@ -544,7 +550,7 @@ void VM_PopulateDumpSharedSpace::doit() {
FileMapInfo* mapinfo = new FileMapInfo(static_archive, true);
mapinfo->populate_header(MetaspaceShared::core_region_alignment());
mapinfo->set_serialized_data(serialized_data);
- mapinfo->set_cloned_vtables(cloned_vtables);
+ mapinfo->set_cloned_vtables(CppVtables::vtables_serialized_base());
mapinfo->open_for_write();
builder.write_archive(mapinfo, &_heap_info);
@@ -665,10 +671,12 @@ void MetaspaceShared::preload_and_dump() {
if (PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())) {
log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
SIZE_FORMAT "M", MaxHeapSize/M);
+ CLEAR_PENDING_EXCEPTION;
MetaspaceShared::unrecoverable_writing_error();
} else {
log_error(cds)("%s: %s", PENDING_EXCEPTION->klass()->external_name(),
java_lang_String::as_utf8_string(java_lang_Throwable::message(PENDING_EXCEPTION)));
+ CLEAR_PENDING_EXCEPTION;
MetaspaceShared::unrecoverable_writing_error("VM exits due to exception, use -Xlog:cds,exceptions=trace for detail");
}
}
@@ -737,18 +745,18 @@ void MetaspaceShared::preload_classes(TRAPS) {
}
log_info(cds)("Loading classes to share ...");
- int class_count = ClassListParser::parse_classlist(classlist_path,
- ClassListParser::_parse_all, CHECK);
+ ClassListParser::parse_classlist(classlist_path,
+ ClassListParser::_parse_all, CHECK);
if (ExtraSharedClassListFile) {
- class_count += ClassListParser::parse_classlist(ExtraSharedClassListFile,
- ClassListParser::_parse_all, CHECK);
+ ClassListParser::parse_classlist(ExtraSharedClassListFile,
+ ClassListParser::_parse_all, CHECK);
}
if (classlist_path != default_classlist) {
struct stat statbuf;
if (os::stat(default_classlist, &statbuf) == 0) {
// File exists, let's use it.
- class_count += ClassListParser::parse_classlist(default_classlist,
- ClassListParser::_parse_lambda_forms_invokers_only, CHECK);
+ ClassListParser::parse_classlist(default_classlist,
+ ClassListParser::_parse_lambda_forms_invokers_only, CHECK);
}
}
@@ -758,7 +766,6 @@ void MetaspaceShared::preload_classes(TRAPS) {
CDSProtectionDomain::create_jar_manifest(dummy, strlen(dummy), CHECK);
log_info(cds)("Loading classes to share: done.");
- log_info(cds)("Shared spaces: preloaded %d classes", class_count);
}
void MetaspaceShared::preload_and_dump_impl(TRAPS) {
@@ -1260,15 +1267,15 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
- // If a base address is given, it must have valid alignment and be suitable as encoding base.
- if (base_address != nullptr) {
- assert(is_aligned(base_address, archive_space_alignment),
- "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
- }
-
if (!Metaspace::using_class_space()) {
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
+
+ // When running without class space, requested archive base should be aligned to cds core alignment.
+ assert(is_aligned(base_address, archive_space_alignment),
+ "Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.",
+ p2i(base_address), archive_space_alignment);
+
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
os::vm_page_size(), (char*)base_address);
if (archive_space_rs.is_reserved()) {
@@ -1289,12 +1296,12 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
const size_t class_space_alignment = Metaspace::reserve_alignment();
- // To simplify matters, lets assume that metaspace alignment will always be
- // equal or a multiple of archive alignment.
- assert(is_power_of_2(class_space_alignment) &&
- is_power_of_2(archive_space_alignment) &&
- class_space_alignment >= archive_space_alignment,
- "Sanity");
+ // When running with class space, requested archive base must satisfy both cds core alignment
+ // and class space alignment.
+ const size_t base_address_alignment = MAX2(class_space_alignment, archive_space_alignment);
+ assert(is_aligned(base_address, base_address_alignment),
+ "Archive base address unaligned: " PTR_FORMAT ", needs alignment: %zu.",
+ p2i(base_address), base_address_alignment);
const size_t class_space_size = CompressedClassSpaceSize;
assert(CompressedClassSpaceSize > 0 &&
@@ -1302,12 +1309,11 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
"CompressedClassSpaceSize malformed: "
SIZE_FORMAT, CompressedClassSpaceSize);
- const size_t ccs_begin_offset = align_up(base_address + archive_space_size,
- class_space_alignment) - base_address;
+ const size_t ccs_begin_offset = align_up(archive_space_size, class_space_alignment);
const size_t gap_size = ccs_begin_offset - archive_space_size;
const size_t total_range_size =
- align_up(archive_space_size + gap_size + class_space_size, core_region_alignment());
+ archive_space_size + gap_size + class_space_size;
assert(total_range_size > ccs_begin_offset, "must be");
if (use_windows_memory_mapping() && use_archive_base_addr) {
@@ -1332,7 +1338,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
} else {
if (use_archive_base_addr && base_address != nullptr) {
- total_space_rs = ReservedSpace(total_range_size, archive_space_alignment,
+ total_space_rs = ReservedSpace(total_range_size, base_address_alignment,
os::vm_page_size(), (char*) base_address);
} else {
// We did not manage to reserve at the preferred address, or were instructed to relocate. In that
@@ -1350,7 +1356,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// Paranoid checks:
assert(base_address == nullptr || (address)total_space_rs.base() == base_address,
"Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_space_rs.base()));
- assert(is_aligned(total_space_rs.base(), archive_space_alignment), "Sanity");
+ assert(is_aligned(total_space_rs.base(), base_address_alignment), "Sanity");
assert(total_space_rs.size() == total_range_size, "Sanity");
// Now split up the space into ccs and cds archive. For simplicity, just leave
diff --git a/src/hotspot/share/cds/serializeClosure.hpp b/src/hotspot/share/cds/serializeClosure.hpp
index 275009286cb..3d401407f37 100644
--- a/src/hotspot/share/cds/serializeClosure.hpp
+++ b/src/hotspot/share/cds/serializeClosure.hpp
@@ -48,8 +48,20 @@ public:
// Read/write the bool pointed to by p.
virtual void do_bool(bool* p) = 0;
- // Read/write the region specified.
- virtual void do_region(u_char* start, size_t size) = 0;
+ // Iterate on the pointers from p[0] through p[num_pointers-1]
+ void do_ptrs(void** p, size_t size) {
+ assert((intptr_t)p % sizeof(intptr_t) == 0, "bad alignment");
+ assert(size % sizeof(intptr_t) == 0, "bad size");
+ do_tag((int)size);
+ while (size > 0) {
+ do_ptr(p);
+ p++;
+ size -= sizeof(intptr_t);
+ }
+ }
+
+ // Address of the first element being written (write only)
+ virtual char* region_top() = 0;
// Check/write the tag. If reading, then compare the tag against
// the passed in value and fail is they don't match. This allows
diff --git a/src/hotspot/share/ci/ciEnv.cpp b/src/hotspot/share/ci/ciEnv.cpp
index 40abaa06df2..6d11a436f65 100644
--- a/src/hotspot/share/ci/ciEnv.cpp
+++ b/src/hotspot/share/ci/ciEnv.cpp
@@ -869,10 +869,8 @@ ciMethod* ciEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
// Jump through a patchable call site, which is initially a deopt routine.
// Patch the call site to the nmethod entry point of the static compiled lambda form.
// As with other two-component call sites, both values must be independently verified.
- int indy_index = cpool->decode_invokedynamic_index(index);
- assert (indy_index >= 0, "should be");
- assert(indy_index < cpool->cache()->resolved_indy_entries_length(), "impossible");
- Method* adapter = cpool->resolved_indy_entry_at(indy_index)->method();
+ assert(index < cpool->cache()->resolved_indy_entries_length(), "impossible");
+ Method* adapter = cpool->resolved_indy_entry_at(index)->method();
// Resolved if the adapter is non null.
if (adapter != nullptr) {
return get_method(adapter);
@@ -1141,15 +1139,17 @@ void ciEnv::register_method(ciMethod* target,
#endif
if (entry_bci == InvocationEntryBci) {
- // If there is an old version we're done with it
- nmethod* old = method->code();
- if (TraceMethodReplacement && old != nullptr) {
- ResourceMark rm;
- char *method_name = method->name_and_sig_as_C_string();
- tty->print_cr("Replacing method %s", method_name);
- }
- if (old != nullptr) {
- old->make_not_used();
+ if (TieredCompilation) {
+ // If there is an old version we're done with it
+ nmethod* old = method->code();
+ if (TraceMethodReplacement && old != nullptr) {
+ ResourceMark rm;
+ char *method_name = method->name_and_sig_as_C_string();
+ tty->print_cr("Replacing method %s", method_name);
+ }
+ if (old != nullptr) {
+ old->make_not_used();
+ }
}
LogTarget(Info, nmethod, install) lt;
@@ -1499,21 +1499,20 @@ void ciEnv::record_call_site_method(Thread* thread, Method* adapter) {
// Process an invokedynamic call site and record any dynamic locations.
void ciEnv::process_invokedynamic(const constantPoolHandle &cp, int indy_index, JavaThread* thread) {
- int index = cp->decode_invokedynamic_index(indy_index);
- ResolvedIndyEntry* indy_info = cp->resolved_indy_entry_at(index);
+ ResolvedIndyEntry* indy_info = cp->resolved_indy_entry_at(indy_index);
if (indy_info->method() != nullptr) {
// process the adapter
Method* adapter = indy_info->method();
record_call_site_method(thread, adapter);
// process the appendix
- oop appendix = cp->resolved_reference_from_indy(index);
+ oop appendix = cp->resolved_reference_from_indy(indy_index);
{
RecordLocation fp(this, "");
record_call_site_obj(thread, appendix);
}
// process the BSM
int pool_index = indy_info->constant_pool_index();
- BootstrapInfo bootstrap_specifier(cp, pool_index, index);
+ BootstrapInfo bootstrap_specifier(cp, pool_index, indy_index);
oop bsm = cp->resolve_possibly_cached_constant_at(bootstrap_specifier.bsm_index(), thread);
{
RecordLocation fp(this, "");
diff --git a/src/hotspot/share/ci/ciMethod.cpp b/src/hotspot/share/ci/ciMethod.cpp
index 0b41a257a4b..532ac7856b8 100644
--- a/src/hotspot/share/ci/ciMethod.cpp
+++ b/src/hotspot/share/ci/ciMethod.cpp
@@ -36,6 +36,7 @@
#include "ci/ciUtilities.inline.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
+#include "compiler/compilerOracle.hpp"
#include "compiler/methodLiveness.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
@@ -1062,7 +1063,7 @@ MethodCounters* ciMethod::ensure_method_counters() {
// ------------------------------------------------------------------
// ciMethod::has_option
//
-bool ciMethod::has_option(enum CompileCommand option) {
+bool ciMethod::has_option(CompileCommandEnum option) {
check_is_loaded();
VM_ENTRY_MARK;
methodHandle mh(THREAD, get_Method());
@@ -1072,7 +1073,7 @@ bool ciMethod::has_option(enum CompileCommand option) {
// ------------------------------------------------------------------
// ciMethod::has_option_value
//
-bool ciMethod::has_option_value(enum CompileCommand option, double& value) {
+bool ciMethod::has_option_value(CompileCommandEnum option, double& value) {
check_is_loaded();
VM_ENTRY_MARK;
methodHandle mh(THREAD, get_Method());
@@ -1125,13 +1126,14 @@ int ciMethod::code_size_for_inlining() {
// not highly relevant to an inlined method. So we use the more
// specific accessor nmethod::insts_size.
// Also some instructions inside the code are excluded from inline
-// heuristic (e.g. post call nop instructions; see InlineSkippedInstructionsCounter)
+// heuristic (e.g. post call nop instructions and GC barriers;
+// see InlineSkippedInstructionsCounter).
int ciMethod::inline_instructions_size() {
if (_inline_instructions_size == -1) {
GUARDED_VM_ENTRY(
nmethod* code = get_Method()->code();
if (code != nullptr && (code->comp_level() == CompLevel_full_optimization)) {
- int isize = code->insts_end() - code->verified_entry_point() - code->skipped_instructions_size();
+ int isize = code->inline_insts_size();
_inline_instructions_size = isize > 0 ? isize : 0;
} else {
_inline_instructions_size = 0;
diff --git a/src/hotspot/share/ci/ciMethod.hpp b/src/hotspot/share/ci/ciMethod.hpp
index 0c171d0cf0b..a7c18b09f13 100644
--- a/src/hotspot/share/ci/ciMethod.hpp
+++ b/src/hotspot/share/ci/ciMethod.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,10 +31,10 @@
#include "ci/ciSignature.hpp"
#include "classfile/vmIntrinsics.hpp"
#include "compiler/methodLiveness.hpp"
-#include "compiler/compilerOracle.hpp"
#include "oops/method.hpp"
#include "runtime/handles.hpp"
#include "utilities/bitMap.hpp"
+#include "utilities/vmEnums.hpp"
class ciMethodBlocks;
class MethodLiveness;
@@ -303,8 +303,8 @@ class ciMethod : public ciMetadata {
// Find the proper vtable index to invoke this method.
int resolve_vtable_index(ciKlass* caller, ciKlass* receiver);
- bool has_option(enum CompileCommand option);
- bool has_option_value(enum CompileCommand option, double& value);
+ bool has_option(CompileCommandEnum option);
+ bool has_option_value(CompileCommandEnum option, double& value);
bool can_be_compiled();
bool can_be_parsed() const { return _can_be_parsed; }
bool has_compiled_code();
diff --git a/src/hotspot/share/ci/ciReplay.cpp b/src/hotspot/share/ci/ciReplay.cpp
index 6edbadcec4f..5fa30f86411 100644
--- a/src/hotspot/share/ci/ciReplay.cpp
+++ b/src/hotspot/share/ci/ciReplay.cpp
@@ -416,7 +416,6 @@ class CompileReplay : public StackObj {
int pool_index = 0;
if (bytecode.is_invokedynamic()) {
- index = cp->decode_invokedynamic_index(index);
cp->cache()->set_dynamic_call(callInfo, index);
appendix = cp->resolved_reference_from_indy(index);
diff --git a/src/hotspot/share/ci/ciStreams.cpp b/src/hotspot/share/ci/ciStreams.cpp
index 47c1ee76bf3..8220910d74c 100644
--- a/src/hotspot/share/ci/ciStreams.cpp
+++ b/src/hotspot/share/ci/ciStreams.cpp
@@ -470,7 +470,7 @@ ciMethod* ciBytecodeStream::get_method(bool& will_link, ciSignature* *declared_s
bool ciBytecodeStream::has_appendix() {
VM_ENTRY_MARK;
constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
- return ConstantPool::has_appendix_at_if_loaded(cpool, get_method_index());
+ return ConstantPool::has_appendix_at_if_loaded(cpool, get_method_index(), cur_bc());
}
// ------------------------------------------------------------------
@@ -481,7 +481,7 @@ bool ciBytecodeStream::has_appendix() {
ciObject* ciBytecodeStream::get_appendix() {
VM_ENTRY_MARK;
constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
- oop appendix_oop = ConstantPool::appendix_at_if_loaded(cpool, get_method_index());
+ oop appendix_oop = ConstantPool::appendix_at_if_loaded(cpool, get_method_index(), cur_bc());
return CURRENT_ENV->get_object(appendix_oop);
}
@@ -493,7 +493,7 @@ ciObject* ciBytecodeStream::get_appendix() {
bool ciBytecodeStream::has_local_signature() {
GUARDED_VM_ENTRY(
constantPoolHandle cpool(Thread::current(), _method->get_Method()->constants());
- return ConstantPool::has_local_signature_at_if_loaded(cpool, get_method_index());
+ return ConstantPool::has_local_signature_at_if_loaded(cpool, get_method_index(), cur_bc());
)
}
@@ -543,4 +543,3 @@ int ciBytecodeStream::get_method_signature_index(const constantPoolHandle& cpool
return cpool->signature_ref_index_at(name_and_type_index);
)
}
-
diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp
index fa7ca41e0c5..734b425cb45 100644
--- a/src/hotspot/share/classfile/classFileParser.cpp
+++ b/src/hotspot/share/classfile/classFileParser.cpp
@@ -4188,8 +4188,7 @@ void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
// If it cannot be fast-path allocated, set a bit in the layout helper.
// See documentation of InstanceKlass::can_be_fastpath_allocated().
assert(ik->size_helper() > 0, "layout_helper is initialized");
- if ((!RegisterFinalizersAtInit && ik->has_finalizer())
- || ik->is_abstract() || ik->is_interface()
+ if (ik->is_abstract() || ik->is_interface()
|| (ik->name() == vmSymbols::java_lang_Class() && ik->class_loader() == nullptr)
|| ik->size_helper() >= FastAllocateSizeLimit) {
// Forbid fast-path allocation.
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 3fc9b5f7976..76084ff1685 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -119,6 +119,7 @@ PerfCounter* ClassLoader::_perf_define_appclass_selftime = nullptr;
PerfCounter* ClassLoader::_perf_app_classfile_bytes_read = nullptr;
PerfCounter* ClassLoader::_perf_sys_classfile_bytes_read = nullptr;
PerfCounter* ClassLoader::_unsafe_defineClassCallCounter = nullptr;
+PerfCounter* ClassLoader::_perf_secondary_hash_time = nullptr;
GrowableArray* ClassLoader::_patch_mod_entries = nullptr;
GrowableArray* ClassLoader::_exploded_entries = nullptr;
@@ -1337,6 +1338,7 @@ void ClassLoader::initialize(TRAPS) {
NEWPERFBYTECOUNTER(_perf_sys_classfile_bytes_read, SUN_CLS, "sysClassBytes");
NEWPERFEVENTCOUNTER(_unsafe_defineClassCallCounter, SUN_CLS, "unsafeDefineClassCalls");
+ NEWPERFTICKCOUNTER(_perf_secondary_hash_time, SUN_CLS, "secondarySuperHashTime");
}
// lookup java library entry points
diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp
index c8ea47435dd..d3ca476ac95 100644
--- a/src/hotspot/share/classfile/classLoader.hpp
+++ b/src/hotspot/share/classfile/classLoader.hpp
@@ -169,6 +169,9 @@ class ClassLoader: AllStatic {
static PerfCounter* _unsafe_defineClassCallCounter;
+ // Count the time taken to hash the scondary superclass arrays.
+ static PerfCounter* _perf_secondary_hash_time;
+
// The boot class path consists of 3 ordered pieces:
// 1. the module/path pairs specified to --patch-module
// --patch-module==()*
@@ -269,6 +272,9 @@ class ClassLoader: AllStatic {
static PerfCounter* perf_class_link_time() { return _perf_class_link_time; }
static PerfCounter* perf_class_link_selftime() { return _perf_class_link_selftime; }
static PerfCounter* perf_shared_classload_time() { return _perf_shared_classload_time; }
+ static PerfCounter* perf_secondary_hash_time() {
+ return _perf_secondary_hash_time;
+ }
static PerfCounter* perf_sys_classload_time() { return _perf_sys_classload_time; }
static PerfCounter* perf_app_classload_time() { return _perf_app_classload_time; }
static PerfCounter* perf_app_classload_selftime() { return _perf_app_classload_selftime; }
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index b942c155d1a..077b05f936e 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -2081,18 +2081,17 @@ oop java_lang_Throwable::message(oop throwable) {
return throwable->obj_field(_detailMessage_offset);
}
-oop java_lang_Throwable::cause(oop throwable) {
- return throwable->obj_field(_cause_offset);
+const char* java_lang_Throwable::message_as_utf8(oop throwable) {
+ oop msg = java_lang_Throwable::message(throwable);
+ const char* msg_utf8 = nullptr;
+ if (msg != nullptr) {
+ msg_utf8 = java_lang_String::as_utf8_string(msg);
+ }
+ return msg_utf8;
}
-// Return Symbol for detailed_message or null
-Symbol* java_lang_Throwable::detail_message(oop throwable) {
- PreserveExceptionMark pm(Thread::current());
- oop detailed_message = java_lang_Throwable::message(throwable);
- if (detailed_message != nullptr) {
- return java_lang_String::as_symbol(detailed_message);
- }
- return nullptr;
+oop java_lang_Throwable::cause(oop throwable) {
+ return throwable->obj_field(_cause_offset);
}
void java_lang_Throwable::set_message(oop throwable, oop value) {
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index b7660c1cdd3..e3bb453ae0a 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -598,12 +598,14 @@ class java_lang_Throwable: AllStatic {
static void set_backtrace(oop throwable, oop value);
static int depth(oop throwable);
static void set_depth(oop throwable, int value);
- static int get_detailMessage_offset() { CHECK_INIT(_detailMessage_offset); }
// Message
+ static int get_detailMessage_offset() { CHECK_INIT(_detailMessage_offset); }
static oop message(oop throwable);
- static oop cause(oop throwable);
+ static const char* message_as_utf8(oop throwable);
static void set_message(oop throwable, oop value);
- static Symbol* detail_message(oop throwable);
+
+ static oop cause(oop throwable);
+
static void print_stack_element(outputStream *st, Method* method, int bci);
static void compute_offsets();
diff --git a/src/hotspot/share/classfile/resolutionErrors.cpp b/src/hotspot/share/classfile/resolutionErrors.cpp
index dc916eefaf2..cf598df59d7 100644
--- a/src/hotspot/share/classfile/resolutionErrors.cpp
+++ b/src/hotspot/share/classfile/resolutionErrors.cpp
@@ -66,8 +66,8 @@ void ResolutionErrorTable::initialize() {
// create new error entry
void ResolutionErrorTable::add_entry(const constantPoolHandle& pool, int cp_index,
- Symbol* error, Symbol* message,
- Symbol* cause, Symbol* cause_msg)
+ Symbol* error, const char* message,
+ Symbol* cause, const char* cause_msg)
{
assert_locked_or_safepoint(SystemDictionary_lock);
assert(!pool.is_null() && error != nullptr, "adding null obj");
@@ -97,26 +97,30 @@ ResolutionErrorEntry* ResolutionErrorTable::find_entry(const constantPoolHandle&
return entry == nullptr ? nullptr : *entry;
}
-ResolutionErrorEntry::ResolutionErrorEntry(Symbol* error, Symbol* message,
- Symbol* cause, Symbol* cause_msg):
+ResolutionErrorEntry::ResolutionErrorEntry(Symbol* error, const char* message,
+ Symbol* cause, const char* cause_msg):
_error(error),
- _message(message),
+ _message(message != nullptr ? os::strdup(message) : nullptr),
_cause(cause),
- _cause_msg(cause_msg),
+ _cause_msg(cause_msg != nullptr ? os::strdup(cause_msg) : nullptr),
_nest_host_error(nullptr) {
Symbol::maybe_increment_refcount(_error);
- Symbol::maybe_increment_refcount(_message);
Symbol::maybe_increment_refcount(_cause);
- Symbol::maybe_increment_refcount(_cause_msg);
}
ResolutionErrorEntry::~ResolutionErrorEntry() {
// decrement error refcount
Symbol::maybe_decrement_refcount(_error);
- Symbol::maybe_decrement_refcount(_message);
Symbol::maybe_decrement_refcount(_cause);
- Symbol::maybe_decrement_refcount(_cause_msg);
+
+ if (_message != nullptr) {
+ FREE_C_HEAP_ARRAY(char, _message);
+ }
+
+ if (_cause_msg != nullptr) {
+ FREE_C_HEAP_ARRAY(char, _cause_msg);
+ }
if (nest_host_error() != nullptr) {
FREE_C_HEAP_ARRAY(char, nest_host_error());
diff --git a/src/hotspot/share/classfile/resolutionErrors.hpp b/src/hotspot/share/classfile/resolutionErrors.hpp
index 388ab6c4ec6..7eff7816b2e 100644
--- a/src/hotspot/share/classfile/resolutionErrors.hpp
+++ b/src/hotspot/share/classfile/resolutionErrors.hpp
@@ -36,10 +36,11 @@ class ResolutionErrorTable : AllStatic {
public:
static void initialize();
- static void add_entry(const constantPoolHandle& pool, int which, Symbol* error, Symbol* message,
- Symbol* cause, Symbol* cause_msg);
+ static void add_entry(const constantPoolHandle& pool, int cp_index,
+ Symbol* error, const char* error_msg,
+ Symbol* cause, const char* cause_msg);
- static void add_entry(const constantPoolHandle& pool, int which, const char* message);
+ static void add_entry(const constantPoolHandle& pool, int cp_index, const char* message);
// find error given the constant pool and constant pool index
static ResolutionErrorEntry* find_entry(const constantPoolHandle& pool, int cp_index);
@@ -57,9 +58,8 @@ public:
static const int CPCACHE_INDEX_MANGLE_VALUE = 1000000;
// This function is used to encode an invokedynamic index to differentiate it from a
- // constant pool index. It assumes it is being called with a index that is less than 0
+ // constant pool index.
static int encode_indy_index(int index) {
- assert(index < 0, "Unexpected non-negative cpCache index");
return index + CPCACHE_INDEX_MANGLE_VALUE;
}
};
@@ -68,34 +68,38 @@ public:
class ResolutionErrorEntry : public CHeapObj {
private:
Symbol* _error;
- Symbol* _message;
+ const char* _message;
Symbol* _cause;
- Symbol* _cause_msg;
+ const char* _cause_msg;
const char* _nest_host_error;
NONCOPYABLE(ResolutionErrorEntry);
public:
- ResolutionErrorEntry(Symbol* error, Symbol* message, Symbol* cause, Symbol* cause_msg);
+ // The incoming message and cause_msg are copied to the C-Heap.
+ ResolutionErrorEntry(Symbol* error, const char* message,
+ Symbol* cause, const char* cause_msg);
- ResolutionErrorEntry(const char* message):
+ // The incoming nest host error message is already in the C-Heap.
+ ResolutionErrorEntry(const char* message):
_error(nullptr),
_message(nullptr),
_cause(nullptr),
_cause_msg(nullptr),
_nest_host_error(message) {}
- ~ResolutionErrorEntry();
+ ~ResolutionErrorEntry();
- void set_nest_host_error(const char* message) {
- _nest_host_error = message;
- }
+ // The incoming nest host error message is already in the C-Heap.
+ void set_nest_host_error(const char* message) {
+ _nest_host_error = message;
+ }
Symbol* error() const { return _error; }
- Symbol* message() const { return _message; }
+ const char* message() const { return _message; }
Symbol* cause() const { return _cause; }
- Symbol* cause_msg() const { return _cause_msg; }
+ const char* cause_msg() const { return _cause_msg; }
const char* nest_host_error() const { return _nest_host_error; }
};
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 6a8d2975059..3a6c372cc50 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -1799,8 +1799,8 @@ bool SystemDictionary::add_loader_constraint(Symbol* class_name,
// Add entry to resolution error table to record the error when the first
// attempt to resolve a reference to a class has failed.
void SystemDictionary::add_resolution_error(const constantPoolHandle& pool, int which,
- Symbol* error, Symbol* message,
- Symbol* cause, Symbol* cause_msg) {
+ Symbol* error, const char* message,
+ Symbol* cause, const char* cause_msg) {
{
MutexLocker ml(Thread::current(), SystemDictionary_lock);
ResolutionErrorEntry* entry = ResolutionErrorTable::find_entry(pool, which);
@@ -1817,7 +1817,8 @@ void SystemDictionary::delete_resolution_error(ConstantPool* pool) {
// Lookup resolution error table. Returns error if found, otherwise null.
Symbol* SystemDictionary::find_resolution_error(const constantPoolHandle& pool, int which,
- Symbol** message, Symbol** cause, Symbol** cause_msg) {
+ const char** message,
+ Symbol** cause, const char** cause_msg) {
{
MutexLocker ml(Thread::current(), SystemDictionary_lock);
@@ -2008,9 +2009,9 @@ Method* SystemDictionary::find_method_handle_intrinsic(vmIntrinsicID iid,
}
}
- // Throw VirtualMachineError or the pending exception in the JavaThread
+ // Throw OOM or the pending exception in the JavaThread
if (throw_error && !HAS_PENDING_EXCEPTION) {
- THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(),
+ THROW_MSG_NULL(vmSymbols::java_lang_OutOfMemoryError(),
"Out of space in CodeCache for method handle intrinsic");
}
return nullptr;
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index a65606e8cf6..9f65cb593d3 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -272,12 +272,13 @@ public:
// Record the error when the first attempt to resolve a reference from a constant
// pool entry to a class fails.
- static void add_resolution_error(const constantPoolHandle& pool, int which, Symbol* error,
- Symbol* message, Symbol* cause = nullptr, Symbol* cause_msg = nullptr);
+ static void add_resolution_error(const constantPoolHandle& pool, int which,
+ Symbol* error, const char* message,
+ Symbol* cause = nullptr, const char* cause_msg = nullptr);
static void delete_resolution_error(ConstantPool* pool);
static Symbol* find_resolution_error(const constantPoolHandle& pool, int which,
- Symbol** message, Symbol** cause, Symbol** cause_msg);
-
+ const char** message,
+ Symbol** cause, const char** cause_msg);
// Record a nest host resolution/validation error
static void add_nest_host_error(const constantPoolHandle& pool, int which,
diff --git a/src/hotspot/share/classfile/verificationType.hpp b/src/hotspot/share/classfile/verificationType.hpp
index 5d674e19037..4f0609f0cce 100644
--- a/src/hotspot/share/classfile/verificationType.hpp
+++ b/src/hotspot/share/classfile/verificationType.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,10 +49,10 @@ class ClassVerifier;
class VerificationType {
private:
- // Least significant bits of _handle are always 0, so we use these as
- // the indicator that the _handle is valid. Otherwise, the _data field
+ // Least significant 2 bits of _sym are always 0, so we use these as
+ // the indicator that _sym is a valid pointer. Otherwise, the _data field
// contains encoded data (as specified below). Should the VM change
- // and the lower bits on oops aren't 0, the assert in the constructor
+ // and the lower 2 bits of Symbol* aren't 0, the assert in the constructor
// will catch this and we'll have to add a descriminator tag to this
// structure.
union {
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 743bd9d06ba..a66fbf645f5 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -255,7 +255,7 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
// or one of it's superclasses, we're in trouble and are going
// to infinitely recurse when we try to initialize the exception.
// So bail out here by throwing the preallocated VM error.
- THROW_OOP_(Universe::virtual_machine_error_instance(), false);
+ THROW_OOP_(Universe::internal_error_instance(), false);
}
kls = kls->super();
}
diff --git a/src/hotspot/share/classfile/vmIntrinsics.cpp b/src/hotspot/share/classfile/vmIntrinsics.cpp
index 6e72b4cbac3..8d4f57165e1 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.cpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -506,6 +506,9 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_copyMemory:
if (!InlineArrayCopy || !InlineUnsafeOps) return true;
break;
+ case vmIntrinsics::_setMemory:
+ if (!InlineUnsafeOps) return true;
+ break;
#ifdef COMPILER2
case vmIntrinsics::_clone:
case vmIntrinsics::_copyOf:
diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp
index 6104fb5683b..a0db1a65d3a 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.hpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.hpp
@@ -620,6 +620,9 @@ class methodHandle;
do_intrinsic(_copyMemory, jdk_internal_misc_Unsafe, copyMemory_name, copyMemory_signature, F_RN) \
do_name( copyMemory_name, "copyMemory0") \
do_signature(copyMemory_signature, "(Ljava/lang/Object;JLjava/lang/Object;JJ)V") \
+ do_intrinsic(_setMemory, jdk_internal_misc_Unsafe, setMemory_name, setMemory_signature, F_RN) \
+ do_name( setMemory_name, "setMemory0") \
+ do_signature(setMemory_signature, "(Ljava/lang/Object;JJB)V") \
do_intrinsic(_loadFence, jdk_internal_misc_Unsafe, loadFence_name, loadFence_signature, F_R) \
do_name( loadFence_name, "loadFence") \
do_alias( loadFence_signature, void_method_signature) \
diff --git a/src/hotspot/share/classfile/vmSymbols.cpp b/src/hotspot/share/classfile/vmSymbols.cpp
index 05cd4767e9a..172d074255b 100644
--- a/src/hotspot/share/classfile/vmSymbols.cpp
+++ b/src/hotspot/share/classfile/vmSymbols.cpp
@@ -205,9 +205,9 @@ void vmSymbols::metaspace_pointers_do(MetaspaceClosure *closure) {
}
void vmSymbols::serialize(SerializeClosure* soc) {
- soc->do_region((u_char*)&Symbol::_vm_symbols[FIRST_SID],
+ soc->do_ptrs((void**)&Symbol::_vm_symbols[FIRST_SID],
(SID_LIMIT - FIRST_SID) * sizeof(Symbol::_vm_symbols[0]));
- soc->do_region((u_char*)_type_signatures, sizeof(_type_signatures));
+ soc->do_ptrs((void**)_type_signatures, sizeof(_type_signatures));
}
#ifndef PRODUCT
diff --git a/src/hotspot/share/classfile/vmSymbols.hpp b/src/hotspot/share/classfile/vmSymbols.hpp
index f35858fe586..f79af705f0b 100644
--- a/src/hotspot/share/classfile/vmSymbols.hpp
+++ b/src/hotspot/share/classfile/vmSymbols.hpp
@@ -432,7 +432,7 @@ class SerializeClosure;
template(yieldInfo_name, "yieldInfo") \
template(tail_name, "tail") \
template(size_name, "size") \
- template(argsize_name, "argsize") \
+ template(bottom_name, "bottom") \
template(mode_name, "mode") \
template(numFrames_name, "numFrames") \
template(numOops_name, "numOops") \
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index 83418742988..81c4d001078 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -73,74 +73,48 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size;
}
-#ifdef ASSERT
-void CodeBlob::verify_parameters() {
- assert(is_aligned(_size, oopSize), "unaligned size");
- assert(is_aligned(_header_size, oopSize), "unaligned size");
- assert(is_aligned(_relocation_size, oopSize), "unaligned size");
- assert(_data_offset <= size(), "codeBlob is too small");
- assert(code_end() == content_end(), "must be the same - see code_end()");
-#ifdef COMPILER1
- // probably wrong for tiered
- assert(frame_size() >= -1, "must use frame size or -1 for runtime stubs");
-#endif // COMPILER1
-}
-#endif
-
-CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size,
- int content_offset, int code_offset, int frame_complete_offset, int data_offset,
- int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
- _oop_maps(oop_maps),
- _name(name),
- _size(size),
- _header_size(header_size),
- _relocation_size(relocation_size),
- _content_offset(content_offset),
- _code_offset(code_offset),
- _frame_complete_offset(frame_complete_offset),
- _data_offset(data_offset),
- _frame_size(frame_size),
- S390_ONLY(_ctable_offset(0) COMMA)
- _kind(kind),
- _caller_must_gc_arguments(caller_must_gc_arguments)
-{
- DEBUG_ONLY( verify_parameters(); )
-}
-
-CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size,
- int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
+CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
+ int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_oop_maps(nullptr), // will be set by set_oop_maps() call
_name(name),
_size(size),
- _header_size(header_size),
_relocation_size(align_up(cb->total_relocation_size(), oopSize)),
- _content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
+ _content_offset(CodeBlob::align_code_offset(header_size + _relocation_size)),
_code_offset(_content_offset + cb->total_offset_of(cb->insts())),
- _frame_complete_offset(frame_complete_offset),
_data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
_frame_size(frame_size),
S390_ONLY(_ctable_offset(0) COMMA)
+ _header_size(header_size),
+ _frame_complete_offset(frame_complete_offset),
_kind(kind),
_caller_must_gc_arguments(caller_must_gc_arguments)
{
- DEBUG_ONLY( verify_parameters(); )
+ assert(is_aligned(_size, oopSize), "unaligned size");
+ assert(is_aligned(header_size, oopSize), "unaligned size");
+ assert(is_aligned(_relocation_size, oopSize), "unaligned size");
+ assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
+ assert(code_end() == content_end(), "must be the same - see code_end()");
+#ifdef COMPILER1
+ // probably wrong for tiered
+ assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
+#endif // COMPILER1
set_oop_maps(oop_maps);
}
// Simple CodeBlob used for simple BufferBlob.
-CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size) :
+CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
_oop_maps(nullptr),
_name(name),
_size(size),
- _header_size(header_size),
_relocation_size(0),
_content_offset(CodeBlob::align_code_offset(header_size)),
_code_offset(_content_offset),
- _frame_complete_offset(CodeOffsets::frame_never_safe),
_data_offset(size),
_frame_size(0),
S390_ONLY(_ctable_offset(0) COMMA)
+ _header_size(header_size),
+ _frame_complete_offset(CodeOffsets::frame_never_safe),
_kind(kind),
_caller_must_gc_arguments(false)
{
@@ -148,7 +122,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, int header_siz
assert(is_aligned(header_size, oopSize), "unaligned size");
}
-void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) {
+void CodeBlob::purge() {
if (_oop_maps != nullptr) {
delete _oop_maps;
_oop_maps = nullptr;
@@ -185,8 +159,8 @@ RuntimeBlob::RuntimeBlob(
CodeBlobKind kind,
CodeBuffer* cb,
int size,
- int header_size,
- int frame_complete,
+ uint16_t header_size,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments)
@@ -198,7 +172,7 @@ RuntimeBlob::RuntimeBlob(
void RuntimeBlob::free(RuntimeBlob* blob) {
assert(blob != nullptr, "caller must check for nullptr");
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
- blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
+ blob->purge();
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
@@ -408,7 +382,7 @@ RuntimeStub::RuntimeStub(
const char* name,
CodeBuffer* cb,
int size,
- int frame_complete,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments
@@ -420,7 +394,7 @@ RuntimeStub::RuntimeStub(
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
CodeBuffer* cb,
- int frame_complete,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments,
@@ -668,10 +642,6 @@ void UpcallStub::free(UpcallStub* blob) {
RuntimeBlob::free(blob);
}
-void UpcallStub::preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) {
- ShouldNotReachHere(); // caller should never have to gc arguments
-}
-
//----------------------------------------------------------------------------------------------------
// Verification and printing
diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp
index 134d60e5cb5..26ae5f7df59 100644
--- a/src/hotspot/share/code/codeBlob.hpp
+++ b/src/hotspot/share/code/codeBlob.hpp
@@ -90,8 +90,8 @@ enum class CodeBlobKind : u1 {
Number_Of_Kinds
};
-class UpcallStub; // for as_upcall_stub()
-class RuntimeStub; // for as_runtime_stub()
+class UpcallStub; // for as_upcall_stub()
+class RuntimeStub; // for as_runtime_stub()
class JavaFrameAnchor; // for UpcallStub::jfa_for_frame
class CodeBlob {
@@ -101,43 +101,39 @@ class CodeBlob {
protected:
// order fields from large to small to minimize padding between fields
- ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
+ ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
const char* _name;
- int _size; // total size of CodeBlob in bytes
- int _header_size; // size of header (depends on subclass)
- int _relocation_size; // size of relocation
- int _content_offset; // offset to where content region begins (this includes consts, insts, stubs)
- int _code_offset; // offset to where instructions region begins (this includes insts, stubs)
- int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
- // not finished setting up their frame. Beware of pc's in
- // that range. There is a similar range(s) on returns
- // which we don't detect.
- int _data_offset; // offset to where data region begins
- int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words)
+ int _size; // total size of CodeBlob in bytes
+ int _relocation_size; // size of relocation (could be bigger than 64Kb)
+ int _content_offset; // offset to where content region begins (this includes consts, insts, stubs)
+ int _code_offset; // offset to where instructions region begins (this includes insts, stubs)
- S390_ONLY(int _ctable_offset;)
+ int _data_offset; // offset to where data region begins
+ int _frame_size; // size of stack frame in words (NOT slots. On x64 these are 64bit words)
- CodeBlobKind _kind; // Kind of this code blob
+ S390_ONLY(int _ctable_offset;)
- bool _caller_must_gc_arguments;
+ uint16_t _header_size; // size of header (depends on subclass)
+ int16_t _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
+ // not finished setting up their frame. Beware of pc's in
+ // that range. There is a similar range(s) on returns
+ // which we don't detect.
+
+ CodeBlobKind _kind; // Kind of this code blob
+
+ bool _caller_must_gc_arguments;
#ifndef PRODUCT
AsmRemarks _asm_remarks;
DbgStrings _dbg_strings;
-#endif // not PRODUCT
+#endif
- DEBUG_ONLY( void verify_parameters() );
-
- CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size, int relocation_size,
- int content_offset, int code_offset, int data_offset, int frame_complete_offset,
- int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
-
- CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size,
- int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
+ CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
+ int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
// Simple CodeBlob used for simple BufferBlob.
- CodeBlob(const char* name, CodeBlobKind kind, int size, int header_size);
+ CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size);
void operator delete(void* p) { }
@@ -152,7 +148,7 @@ public:
static unsigned int align_code_offset(int offset);
// Deletion
- virtual void purge(bool free_code_cache_data, bool unregister_nmethod);
+ void purge();
// Typing
bool is_nmethod() const { return _kind == CodeBlobKind::Nmethod; }
@@ -225,7 +221,6 @@ public:
const ImmutableOopMap* oop_map_for_slot(int slot, address return_address) const;
const ImmutableOopMap* oop_map_for_return_address(address return_address) const;
- virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) = 0;
// Frame support. Sizes are in word units.
int frame_size() const { return _frame_size; }
@@ -273,7 +268,7 @@ class RuntimeBlob : public CodeBlob {
// Creation
// a) simple CodeBlob
- RuntimeBlob(const char* name, CodeBlobKind kind, int size, int header_size)
+ RuntimeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size)
: CodeBlob(name, kind, size, header_size)
{}
@@ -285,8 +280,8 @@ class RuntimeBlob : public CodeBlob {
CodeBlobKind kind,
CodeBuffer* cb,
int size,
- int header_size,
- int frame_complete,
+ uint16_t header_size,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments = false
@@ -324,10 +319,9 @@ class BufferBlob: public RuntimeBlob {
static void free(BufferBlob* buf);
- // GC/Verification support
- void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override { /* nothing to do */ }
-
+ // Verification support
void verify() override;
+
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@@ -381,7 +375,7 @@ class RuntimeStub: public RuntimeBlob {
const char* name,
CodeBuffer* cb,
int size,
- int frame_complete,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments
@@ -394,7 +388,7 @@ class RuntimeStub: public RuntimeBlob {
static RuntimeStub* new_runtime_stub(
const char* stub_name,
CodeBuffer* cb,
- int frame_complete,
+ int16_t frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments,
@@ -405,10 +399,9 @@ class RuntimeStub: public RuntimeBlob {
address entry_point() const { return code_begin(); }
- // GC/Verification support
- void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ }
-
+ // Verification support
void verify() override;
+
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@@ -429,7 +422,7 @@ class SingletonBlob: public RuntimeBlob {
CodeBlobKind kind,
CodeBuffer* cb,
int size,
- int header_size,
+ uint16_t header_size,
int frame_size,
OopMapSet* oop_maps
)
@@ -438,9 +431,9 @@ class SingletonBlob: public RuntimeBlob {
address entry_point() { return code_begin(); }
- // GC/Verification support
- void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override { /* nothing to do */ }
+ // Verification support
void verify() override; // does nothing
+
void print_on(outputStream* st) const override;
void print_value_on(outputStream* st) const override;
};
@@ -632,7 +625,6 @@ class UpcallStub: public RuntimeBlob {
// GC/Verification support
void oops_do(OopClosure* f, const frame& frame);
- void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) override;
void verify() override;
// Misc.
diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp
index 73eca19f086..20583ce492d 100644
--- a/src/hotspot/share/code/codeCache.cpp
+++ b/src/hotspot/share/code/codeCache.cpp
@@ -34,7 +34,6 @@
#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
-#include "compiler/compilerDirectives.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/classUnloadingContext.hpp"
@@ -1330,67 +1329,6 @@ void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* d
#endif // INCLUDE_JVMTI
-void CodeCache::mark_directives_matches(bool top_only) {
- MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- Thread *thread = Thread::current();
- HandleMark hm(thread);
-
- NMethodIterator iter(NMethodIterator::not_unloading);
- while(iter.next()) {
- nmethod* nm = iter.method();
- methodHandle mh(thread, nm->method());
- if (DirectivesStack::hasMatchingDirectives(mh, top_only)) {
- ResourceMark rm;
- log_trace(codecache)("Mark because of matching directives %s", mh->external_name());
- mh->set_has_matching_directives();
- }
- }
-}
-
-void CodeCache::recompile_marked_directives_matches() {
- Thread *thread = Thread::current();
- HandleMark hm(thread);
-
- // Try the max level and let the directives be applied during the compilation.
- int comp_level = CompilationPolicy::highest_compile_level();
- RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading);
- while(iter.next()) {
- nmethod* nm = iter.method();
- methodHandle mh(thread, nm->method());
- if (mh->has_matching_directives()) {
- ResourceMark rm;
- mh->clear_directive_flags();
- bool deopt = false;
-
- if (!nm->is_osr_method()) {
- log_trace(codecache)("Recompile to level %d because of matching directives %s",
- comp_level, mh->external_name());
- nmethod * comp_nm = CompileBroker::compile_method(mh, InvocationEntryBci, comp_level,
- methodHandle(), 0,
- CompileTask::Reason_DirectivesChanged,
- (JavaThread*)thread);
- if (comp_nm == nullptr) {
- log_trace(codecache)("Recompilation to level %d failed, deoptimize %s",
- comp_level, mh->external_name());
- deopt = true;
- }
- } else {
- log_trace(codecache)("Deoptimize OSR %s", mh->external_name());
- deopt = true;
- }
- // For some reason the method cannot be compiled by C2, e.g. the new directives forbid it.
- // Deoptimize the method and let the usual hotspot logic do the rest.
- if (deopt) {
- if (!nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
- nm->make_not_entrant();
- nm->make_deoptimized();
- }
- }
- gc_on_allocation(); // Flush unused methods from CodeCache if required.
- }
- }
-}
-
// Mark methods for deopt (if safe or possible).
void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp
index 7d8835abd11..8fcbf32b644 100644
--- a/src/hotspot/share/code/codeCache.hpp
+++ b/src/hotspot/share/code/codeCache.hpp
@@ -295,9 +295,6 @@ class CodeCache : AllStatic {
static void mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee);
static void make_marked_nmethods_deoptimized();
- static void mark_directives_matches(bool top_only = false);
- static void recompile_marked_directives_matches();
-
// Marks dependents during classloading
static void mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee);
diff --git a/src/hotspot/share/code/compiledIC.hpp b/src/hotspot/share/code/compiledIC.hpp
index 4439ff958f7..22b93c1760a 100644
--- a/src/hotspot/share/code/compiledIC.hpp
+++ b/src/hotspot/share/code/compiledIC.hpp
@@ -28,6 +28,7 @@
#include "code/nativeInst.hpp"
#include "interpreter/linkResolver.hpp"
#include "runtime/safepointVerifiers.hpp"
+#include "opto/c2_MacroAssembler.hpp"
//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
@@ -185,7 +186,7 @@ private:
public:
// Returns null if CodeBuffer::expand fails
- static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
+ static address emit_to_interp_stub(MacroAssembler *masm, address mark = nullptr);
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
diff --git a/src/hotspot/share/code/debugInfoRec.cpp b/src/hotspot/share/code/debugInfoRec.cpp
index 85be80dbf0b..71da21f1e99 100644
--- a/src/hotspot/share/code/debugInfoRec.cpp
+++ b/src/hotspot/share/code/debugInfoRec.cpp
@@ -244,14 +244,11 @@ static
struct dir_stats_struct {
int chunks_queried;
int chunks_shared;
- int chunks_reshared;
int chunks_elided;
void print() {
- tty->print_cr("Debug Data Chunks: %d, shared %d+%d, non-SP's elided %d",
- chunks_queried,
- chunks_shared, chunks_reshared,
- chunks_elided);
+ tty->print_cr("Debug Data Chunks: %d, shared %d, non-SP's elided %d",
+ chunks_queried, chunks_shared, chunks_elided);
}
} dir_stats;
#endif //PRODUCT
diff --git a/src/hotspot/share/code/dependencies.cpp b/src/hotspot/share/code/dependencies.cpp
index 93a2bf22ba5..26e53ed9027 100644
--- a/src/hotspot/share/code/dependencies.cpp
+++ b/src/hotspot/share/code/dependencies.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -388,9 +388,7 @@ void Dependencies::copy_to(nmethod* nm) {
address beg = nm->dependencies_begin();
address end = nm->dependencies_end();
guarantee(end - beg >= (ptrdiff_t) size_in_bytes(), "bad sizing");
- Copy::disjoint_words((HeapWord*) content_bytes(),
- (HeapWord*) beg,
- size_in_bytes() / sizeof(HeapWord));
+ (void)memcpy(beg, content_bytes(), size_in_bytes());
assert(size_in_bytes() % sizeof(HeapWord) == 0, "copy by words");
}
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 51ba872b3ac..8cc36ba1b64 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -36,6 +36,7 @@
#include "compiler/compileLog.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/compilerDirectives.hpp"
+#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.inline.hpp"
@@ -109,6 +110,11 @@
#endif
+// Cast from int value to narrow type
+#define CHECKED_CAST(result, T, thing) \
+ result = static_cast(thing); \
+ assert(static_cast(result) == thing, "failed: %d != %d", static_cast(result), thing);
+
//---------------------------------------------------------------------------------
// NMethod statistics
// They are printed under various flags, including:
@@ -120,32 +126,33 @@
// and make it simpler to print from the debugger.
struct java_nmethod_stats_struct {
uint nmethod_count;
- uint total_size;
+ uint total_nm_size;
+ uint total_immut_size;
uint relocation_size;
uint consts_size;
uint insts_size;
+ uint inline_insts_size;
uint stub_size;
- uint scopes_data_size;
- uint scopes_pcs_size;
+ uint oops_size;
+ uint metadata_size;
uint dependencies_size;
- uint handler_table_size;
uint nul_chk_table_size;
+ uint handler_table_size;
+ uint scopes_pcs_size;
+ uint scopes_data_size;
#if INCLUDE_JVMCI
uint speculations_size;
uint jvmci_data_size;
#endif
- uint oops_size;
- uint metadata_size;
-
- uint size_gt_32k;
- int size_max;
void note_nmethod(nmethod* nm) {
nmethod_count += 1;
- total_size += nm->size();
+ total_nm_size += nm->size();
+ total_immut_size += nm->immutable_data_size();
relocation_size += nm->relocation_size();
consts_size += nm->consts_size();
insts_size += nm->insts_size();
+ inline_insts_size += nm->inline_insts_size();
stub_size += nm->stub_size();
oops_size += nm->oops_size();
metadata_size += nm->metadata_size();
@@ -158,33 +165,68 @@ struct java_nmethod_stats_struct {
speculations_size += nm->speculations_size();
jvmci_data_size += nm->jvmci_data_size();
#endif
- int short_pos_max = ((1<<15) - 1);
- if (nm->size() > short_pos_max) size_gt_32k++;
- if (nm->size() > size_max) size_max = nm->size();
}
void print_nmethod_stats(const char* name) {
if (nmethod_count == 0) return;
tty->print_cr("Statistics for %u bytecoded nmethods for %s:", nmethod_count, name);
- if (total_size != 0) tty->print_cr(" total in heap = %u (100%%)", total_size);
+ uint total_size = total_nm_size + total_immut_size;
+ if (total_nm_size != 0) {
+ tty->print_cr(" total size = %u (100%%)", total_size);
+ tty->print_cr(" in CodeCache = %u (%f%%)", total_nm_size, (total_nm_size * 100.0f)/total_size);
+ }
uint header_size = (uint)(nmethod_count * sizeof(nmethod));
- if (nmethod_count != 0) tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_size);
- if (relocation_size != 0) tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_size);
- if (consts_size != 0) tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_size);
- if (insts_size != 0) tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_size);
- if (stub_size != 0) tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_size);
- if (oops_size != 0) tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_size);
- if (metadata_size != 0) tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_size);
- if (scopes_data_size != 0) tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_size);
- if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_size);
- if (dependencies_size != 0) tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_size);
- if (handler_table_size != 0) tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_size);
- if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_size);
+ if (nmethod_count != 0) {
+ tty->print_cr(" header = %u (%f%%)", header_size, (header_size * 100.0f)/total_nm_size);
+ }
+ if (relocation_size != 0) {
+ tty->print_cr(" relocation = %u (%f%%)", relocation_size, (relocation_size * 100.0f)/total_nm_size);
+ }
+ if (consts_size != 0) {
+ tty->print_cr(" constants = %u (%f%%)", consts_size, (consts_size * 100.0f)/total_nm_size);
+ }
+ if (insts_size != 0) {
+ tty->print_cr(" main code = %u (%f%%)", insts_size, (insts_size * 100.0f)/total_nm_size);
+ }
+ if (inline_insts_size != 0) {
+ tty->print_cr(" inline code = %u (%f%%)", inline_insts_size, (inline_insts_size * 100.0f)/total_nm_size);
+ }
+ if (stub_size != 0) {
+ tty->print_cr(" stub code = %u (%f%%)", stub_size, (stub_size * 100.0f)/total_nm_size);
+ }
+ if (oops_size != 0) {
+ tty->print_cr(" oops = %u (%f%%)", oops_size, (oops_size * 100.0f)/total_nm_size);
+ }
+ if (metadata_size != 0) {
+ tty->print_cr(" metadata = %u (%f%%)", metadata_size, (metadata_size * 100.0f)/total_nm_size);
+ }
#if INCLUDE_JVMCI
- if (speculations_size != 0) tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_size);
- if (jvmci_data_size != 0) tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_size);
+ if (jvmci_data_size != 0) {
+ tty->print_cr(" JVMCI data = %u (%f%%)", jvmci_data_size, (jvmci_data_size * 100.0f)/total_nm_size);
+ }
+#endif
+ if (total_immut_size != 0) {
+ tty->print_cr(" immutable data = %u (%f%%)", total_immut_size, (total_immut_size * 100.0f)/total_size);
+ }
+ if (dependencies_size != 0) {
+ tty->print_cr(" dependencies = %u (%f%%)", dependencies_size, (dependencies_size * 100.0f)/total_immut_size);
+ }
+ if (nul_chk_table_size != 0) {
+ tty->print_cr(" nul chk table = %u (%f%%)", nul_chk_table_size, (nul_chk_table_size * 100.0f)/total_immut_size);
+ }
+ if (handler_table_size != 0) {
+ tty->print_cr(" handler table = %u (%f%%)", handler_table_size, (handler_table_size * 100.0f)/total_immut_size);
+ }
+ if (scopes_pcs_size != 0) {
+ tty->print_cr(" scopes pcs = %u (%f%%)", scopes_pcs_size, (scopes_pcs_size * 100.0f)/total_immut_size);
+ }
+ if (scopes_data_size != 0) {
+ tty->print_cr(" scopes data = %u (%f%%)", scopes_data_size, (scopes_data_size * 100.0f)/total_immut_size);
+ }
+#if INCLUDE_JVMCI
+ if (speculations_size != 0) {
+ tty->print_cr(" speculations = %u (%f%%)", speculations_size, (speculations_size * 100.0f)/total_immut_size);
+ }
#endif
- if (size_gt_32k != 0) tty->print_cr(" size > 32k = %u", size_gt_32k);
- if (size_max != 0) tty->print_cr(" max size = %d", size_max);
}
};
@@ -215,7 +257,7 @@ struct native_nmethod_stats_struct {
};
struct pc_nmethod_stats_struct {
- uint pc_desc_resets; // number of resets (= number of caches)
+ uint pc_desc_init; // number of initialization of cache (= number of caches)
uint pc_desc_queries; // queries to nmethod::find_pc_desc
uint pc_desc_approx; // number of those which have approximate true
uint pc_desc_repeats; // number of _pc_descs[0] hits
@@ -230,7 +272,7 @@ struct pc_nmethod_stats_struct {
(double)(pc_desc_tests + pc_desc_searches)
/ pc_desc_queries);
tty->print_cr(" caches=%d queries=%u/%u, hits=%u+%u, tests=%u+%u, adds=%u",
- pc_desc_resets,
+ pc_desc_init,
pc_desc_queries, pc_desc_approx,
pc_desc_repeats, pc_desc_hits,
pc_desc_tests, pc_desc_searches, pc_desc_adds);
@@ -348,28 +390,24 @@ void ExceptionCache::set_next(ExceptionCache *ec) {
// Helper used by both find_pc_desc methods.
static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests);
- if (!approximate)
+ if (!approximate) {
return pc->pc_offset() == pc_offset;
- else
+ } else {
return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset();
+ }
}
-void PcDescCache::reset_to(PcDesc* initial_pc_desc) {
- if (initial_pc_desc == nullptr) {
- _pc_descs[0] = nullptr; // native method; no PcDescs at all
- return;
- }
- NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets);
- // reset the cache by filling it with benign (non-null) values
- assert(initial_pc_desc->pc_offset() < 0, "must be sentinel");
- for (int i = 0; i < cache_size; i++)
+void PcDescCache::init_to(PcDesc* initial_pc_desc) {
+ NOT_PRODUCT(++pc_nmethod_stats.pc_desc_init);
+ // initialize the cache by filling it with benign (non-null) values
+ assert(initial_pc_desc != nullptr && initial_pc_desc->pc_offset() == PcDesc::lower_offset_limit,
+ "must start with a sentinel");
+ for (int i = 0; i < cache_size; i++) {
_pc_descs[i] = initial_pc_desc;
+ }
}
PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
- NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries);
- NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx);
-
// Note: one might think that caching the most recently
// read value separately would be a win, but one would be
// wrong. When many threads are updating it, the cache
@@ -382,8 +420,10 @@ PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
// Step one: Check the most recently added value.
res = _pc_descs[0];
- if (res == nullptr) return nullptr; // native method; no PcDescs at all
- if (match_desc(res, pc_offset, approximate)) {
+ assert(res != nullptr, "PcDesc cache should be initialized already");
+
+ // Approximate only here since PcDescContainer::find_pc_desc() checked for exact case.
+ if (approximate && match_desc(res, pc_offset, approximate)) {
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats);
return res;
}
@@ -403,7 +443,6 @@ PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) {
}
void PcDescCache::add_pc_desc(PcDesc* pc_desc) {
- MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current());)
NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds);
// Update the LRU cache by shifting pc_desc forward.
for (int i = 0; i < cache_size; i++) {
@@ -1004,27 +1043,6 @@ const char* nmethod::compiler_name() const {
return compilertype2name(_compiler_type);
}
-// Fill in default values for various flag fields
-void nmethod::init_defaults() {
- // avoid uninitialized fields, even for short time periods
- _exception_cache = nullptr;
-
- _has_unsafe_access = 0;
- _has_method_handle_invokes = 0;
- _has_wide_vectors = 0;
- _has_monitors = 0;
-
- _state = not_installed;
- _has_flushed_dependencies = 0;
- _load_reported = false; // jvmti state
-
- _oops_do_mark_link = nullptr;
- _osr_link = nullptr;
-#if INCLUDE_RTM_OPT
- _rtm_state = NoRTM;
-#endif
-}
-
#ifdef ASSERT
class CheckForOopsClosure : public OopClosure {
bool _found_oop = false;
@@ -1132,31 +1150,40 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
code_buffer->finalize_oop_references(method);
// create nmethod
nmethod* nm = nullptr;
+ int nmethod_size = CodeBlob::allocation_size(code_buffer, sizeof(nmethod));
#if INCLUDE_JVMCI
- int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0;
+ if (compiler->is_jvmci()) {
+ nmethod_size += align_up(jvmci_data->size(), oopSize);
+ }
#endif
- int nmethod_size =
- CodeBlob::allocation_size(code_buffer, sizeof(nmethod))
- + adjust_pcs_size(debug_info->pcs_size())
+
+ int immutable_data_size =
+ adjust_pcs_size(debug_info->pcs_size())
+ align_up((int)dependencies->size_in_bytes(), oopSize)
+ align_up(handler_table->size_in_bytes() , oopSize)
+ align_up(nul_chk_table->size_in_bytes() , oopSize)
#if INCLUDE_JVMCI
+ align_up(speculations_len , oopSize)
- + align_up(jvmci_data_size , oopSize)
#endif
+ align_up(debug_info->data_size() , oopSize);
+
+ // First, allocate space for immutable data in C heap.
+ address immutable_data = nullptr;
+ if (immutable_data_size > 0) {
+ immutable_data = (address)os::malloc(immutable_data_size, mtCode);
+ if (immutable_data == nullptr) {
+ vm_exit_out_of_memory(immutable_data_size, OOM_MALLOC_ERROR, "nmethod: no space for immutable data");
+ return nullptr;
+ }
+ }
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm = new (nmethod_size, comp_level)
- nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
- orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
- oop_maps,
- handler_table,
- nul_chk_table,
- compiler,
- comp_level
+ nmethod(method(), compiler->type(), nmethod_size, immutable_data_size,
+ compile_id, entry_bci, immutable_data, offsets, orig_pc_offset,
+ debug_info, dependencies, code_buffer, frame_size, oop_maps,
+ handler_table, nul_chk_table, compiler, comp_level
#if INCLUDE_JVMCI
, speculations,
speculations_len,
@@ -1199,6 +1226,61 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
return nm;
}
+// Fill in default values for various fields
+void nmethod::init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets) {
+ // avoid uninitialized fields, even for short time periods
+ _exception_cache = nullptr;
+ _gc_data = nullptr;
+ _oops_do_mark_link = nullptr;
+ _compiled_ic_data = nullptr;
+
+#if INCLUDE_RTM_OPT
+ _rtm_state = NoRTM;
+#endif
+ _is_unloading_state = 0;
+ _state = not_installed;
+
+ _has_unsafe_access = 0;
+ _has_method_handle_invokes = 0;
+ _has_wide_vectors = 0;
+ _has_monitors = 0;
+ _has_flushed_dependencies = 0;
+ _is_unlinked = 0;
+ _load_reported = 0; // jvmti state
+
+ _deoptimization_status = not_marked;
+
+ // SECT_CONSTS is first in code buffer so the offset should be 0.
+ int consts_offset = code_buffer->total_offset_of(code_buffer->consts());
+ assert(consts_offset == 0, "const_offset: %d", consts_offset);
+
+ _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
+
+ CHECKED_CAST(_entry_offset, uint16_t, (offsets->value(CodeOffsets::Entry)));
+ CHECKED_CAST(_verified_entry_offset, uint16_t, (offsets->value(CodeOffsets::Verified_Entry)));
+
+ int size = code_buffer->main_code_size();
+ assert(size >= 0, "should be initialized");
+ // Use instructions section size if it is 0 (e.g. native wrapper)
+ if (size == 0) size = code_size(); // requires _stub_offset to be set
+ assert(size <= code_size(), "incorrect size: %d > %d", size, code_size());
+ _inline_insts_size = size - _verified_entry_offset
+ - code_buffer->total_skipped_instructions_size();
+ assert(_inline_insts_size >= 0, "sanity");
+}
+
+// Post initialization
+void nmethod::post_init() {
+ clear_unloading_state();
+
+ finalize_relocations();
+
+ Universe::heap()->register_nmethod(this);
+ debug_only(Universe::heap()->verify_nmethod(this));
+
+ CodeCache::commit(this);
+}
+
// For native wrappers
nmethod::nmethod(
Method* method,
@@ -1214,68 +1296,62 @@ nmethod::nmethod(
: CodeBlob("native nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_deoptimization_generation(0),
+ _gc_epoch(CodeCache::gc_epoch()),
_method(method),
- _gc_data(nullptr),
- _compiled_ic_data(nullptr),
- _is_unlinked(false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset),
- _native_basic_lock_sp_offset(basic_lock_sp_offset),
- _is_unloading_state(0),
- _deoptimization_status(not_marked)
+ _native_basic_lock_sp_offset(basic_lock_sp_offset)
{
{
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
- init_defaults();
- _comp_level = CompLevel_none;
+ init_defaults(code_buffer, offsets);
+
+ _osr_entry_point = nullptr;
+ _pc_desc_container = nullptr;
_entry_bci = InvocationEntryBci;
- // We have no exception handler or deopt handler make the
- // values something that will never match a pc like the nmethod vtable entry
- _exception_offset = 0;
+ _compile_id = compile_id;
+ _comp_level = CompLevel_none;
+ _compiler_type = type;
_orig_pc_offset = 0;
+ _num_stack_arg_slots = _method->constMethod()->num_stack_arg_slots();
+
+ if (offsets->value(CodeOffsets::Exceptions) != -1) {
+ // Continuation enter intrinsic
+ _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
+ } else {
+ _exception_offset = 0;
+ }
+ // Native wrappers do not have deopt handlers. Make the values
+ // something that will never match a pc like the nmethod vtable entry
_deopt_handler_offset = 0;
_deopt_mh_handler_offset = 0;
- _gc_epoch = CodeCache::gc_epoch();
+ _unwind_handler_offset = 0;
- _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
- _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
- _oops_offset = data_offset();
- _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
- _scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
- _scopes_pcs_offset = _scopes_data_offset;
- _dependencies_offset = _scopes_pcs_offset;
- _handler_table_offset = _dependencies_offset;
- _nul_chk_table_offset = _handler_table_offset;
- _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
+ CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
+ int data_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
#if INCLUDE_JVMCI
- _speculations_offset = _nul_chk_table_offset;
- _jvmci_data_offset = _speculations_offset;
- _nmethod_end_offset = _jvmci_data_offset;
-#else
- _nmethod_end_offset = _nul_chk_table_offset;
+ // jvmci_data_size is 0 in native wrapper but we need to set offset
+ // to correctly calculate metadata_end address
+ CHECKED_CAST(_jvmci_data_offset, uint16_t, data_end_offset);
#endif
- _compile_id = compile_id;
- _compiler_type = type;
- _entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
- _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
- _osr_entry_point = nullptr;
- _exception_cache = nullptr;
- _pc_desc_container.reset_to(nullptr);
+ assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d < %d", nmethod_size, (data_offset() + data_end_offset));
- _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions);
+ // native wrapper does not have read-only data but we need unique not null address
+ _immutable_data = data_end();
+ _immutable_data_size = 0;
+ _nul_chk_table_offset = 0;
+ _handler_table_offset = 0;
+ _scopes_pcs_offset = 0;
+ _scopes_data_offset = 0;
+#if INCLUDE_JVMCI
+ _speculations_offset = 0;
+#endif
code_buffer->copy_code_and_locs_to(this);
code_buffer->copy_values_to(this);
- clear_unloading_state();
-
- finalize_relocations();
-
- Universe::heap()->register_nmethod(this);
- debug_only(Universe::heap()->verify_nmethod(this));
-
- CodeCache::commit(this);
+ post_init();
}
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@@ -1333,12 +1409,15 @@ void* nmethod::operator new(size_t size, int nmethod_size, bool allow_NonNMethod
return CodeCache::allocate(nmethod_size, CodeBlobType::NonNMethod);
}
+// For normal JIT compiled code
nmethod::nmethod(
Method* method,
CompilerType type,
int nmethod_size,
+ int immutable_data_size,
int compile_id,
int entry_bci,
+ address immutable_data,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder* debug_info,
@@ -1359,33 +1438,27 @@ nmethod::nmethod(
: CodeBlob("nmethod", CodeBlobKind::Nmethod, code_buffer, nmethod_size, sizeof(nmethod),
offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_deoptimization_generation(0),
+ _gc_epoch(CodeCache::gc_epoch()),
_method(method),
- _gc_data(nullptr),
- _compiled_ic_data(nullptr),
- _is_unlinked(false),
- _native_receiver_sp_offset(in_ByteSize(-1)),
- _native_basic_lock_sp_offset(in_ByteSize(-1)),
- _is_unloading_state(0),
- _deoptimization_status(not_marked)
+ _osr_link(nullptr)
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{
debug_only(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
- init_defaults();
- _entry_bci = entry_bci;
- _compile_id = compile_id;
- _compiler_type = type;
- _comp_level = comp_level;
- _orig_pc_offset = orig_pc_offset;
- _gc_epoch = CodeCache::gc_epoch();
+ init_defaults(code_buffer, offsets);
- // Section offsets
- _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts());
- _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs());
- set_ctable_begin(header_begin() + _consts_offset);
- _skipped_instructions_size = code_buffer->total_skipped_instructions_size();
+ _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
+ _entry_bci = entry_bci;
+ _compile_id = compile_id;
+ _comp_level = comp_level;
+ _compiler_type = type;
+ _orig_pc_offset = orig_pc_offset;
+
+ _num_stack_arg_slots = entry_bci != InvocationEntryBci ? 0 : _method->constMethod()->num_stack_arg_slots();
+
+ set_ctable_begin(header_begin() + content_offset());
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
@@ -1421,40 +1494,59 @@ nmethod::nmethod(
}
}
if (offsets->value(CodeOffsets::UnwindHandler) != -1) {
- _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
+ // C1 generates UnwindHandler at the end of instructions section.
+ // Calculate positive offset as distance between the start of stubs section
+ // (which is also the end of instructions section) and the start of the handler.
+ int unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler);
+ CHECKED_CAST(_unwind_handler_offset, int16_t, (_stub_offset - unwind_handler_offset));
} else {
_unwind_handler_offset = -1;
}
+ CHECKED_CAST(_metadata_offset, uint16_t, (align_up(code_buffer->total_oop_size(), oopSize)));
+ int metadata_end_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
- _oops_offset = data_offset();
- _metadata_offset = _oops_offset + align_up(code_buffer->total_oop_size(), oopSize);
- _scopes_data_offset = _metadata_offset + align_up(code_buffer->total_metadata_size(), wordSize);
-
- _scopes_pcs_offset = _scopes_data_offset + align_up(debug_info->data_size (), oopSize);
- _dependencies_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
- _handler_table_offset = _dependencies_offset + align_up((int)dependencies->size_in_bytes(), oopSize);
- _nul_chk_table_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
#if INCLUDE_JVMCI
- _speculations_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
- _jvmci_data_offset = _speculations_offset + align_up(speculations_len, oopSize);
- int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0;
- _nmethod_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize);
+ CHECKED_CAST(_jvmci_data_offset, uint16_t, metadata_end_offset);
+ int jvmci_data_size = compiler->is_jvmci() ? jvmci_data->size() : 0;
+ DEBUG_ONLY( int data_end_offset = _jvmci_data_offset + align_up(jvmci_data_size, oopSize); )
#else
- _nmethod_end_offset = _nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize);
+ DEBUG_ONLY( int data_end_offset = metadata_end_offset; )
#endif
- _entry_point = code_begin() + offsets->value(CodeOffsets::Entry);
- _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry);
- _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry);
- _exception_cache = nullptr;
+ assert((data_offset() + data_end_offset) <= nmethod_size, "wrong nmethod's size: %d > %d",
+ (data_offset() + data_end_offset), nmethod_size);
- _pc_desc_container.reset_to(scopes_pcs_begin());
+ _immutable_data_size = immutable_data_size;
+ if (immutable_data_size > 0) {
+ assert(immutable_data != nullptr, "required");
+ _immutable_data = immutable_data;
+ } else {
+ // We need unique not null address
+ _immutable_data = data_end();
+ }
+ CHECKED_CAST(_nul_chk_table_offset, uint16_t, (align_up((int)dependencies->size_in_bytes(), oopSize)));
+ CHECKED_CAST(_handler_table_offset, uint16_t, (_nul_chk_table_offset + align_up(nul_chk_table->size_in_bytes(), oopSize)));
+ _scopes_pcs_offset = _handler_table_offset + align_up(handler_table->size_in_bytes(), oopSize);
+ _scopes_data_offset = _scopes_pcs_offset + adjust_pcs_size(debug_info->pcs_size());
+#if INCLUDE_JVMCI
+ _speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
+ DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize); )
+#else
+ DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize); )
+#endif
+ assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
+ immutable_data_end_offset, immutable_data_size);
+
+ // Copy code and relocation info
code_buffer->copy_code_and_locs_to(this);
- // Copy contents of ScopeDescRecorder to nmethod
+ // Copy oops and metadata
code_buffer->copy_values_to(this);
- debug_info->copy_to(this);
dependencies->copy_to(this);
- clear_unloading_state();
+ // Copy PcDesc and ScopeDesc data
+ debug_info->copy_to(this);
+
+ // Create cache after PcDesc data is copied - it will be used to initialize cache
+ _pc_desc_container = new PcDescContainer(scopes_pcs_begin());
#if INCLUDE_JVMCI
if (compiler->is_jvmci()) {
@@ -1463,13 +1555,6 @@ nmethod::nmethod(
}
#endif
- finalize_relocations();
-
- Universe::heap()->register_nmethod(this);
- debug_only(Universe::heap()->verify_nmethod(this));
-
- CodeCache::commit(this);
-
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
@@ -1481,10 +1566,12 @@ nmethod::nmethod(
}
#endif
+ post_init();
+
// we use the information of entry points to find out if a method is
// static or non static
assert(compiler->is_c2() || compiler->is_jvmci() ||
- _method->is_static() == (entry_point() == _verified_entry_point),
+ _method->is_static() == (entry_point() == verified_entry_point()),
" entry points must be same for static methods and vice versa");
}
}
@@ -1613,15 +1700,15 @@ void nmethod::print_nmethod(bool printmethod) {
#if defined(SUPPORT_DATA_STRUCTS)
if (AbstractDisassembler::show_structs()) {
methodHandle mh(Thread::current(), _method);
- if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommand::PrintDebugInfo)) {
+ if (printmethod || PrintDebugInfo || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDebugInfo)) {
print_scopes();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
- if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommand::PrintRelocations)) {
+ if (printmethod || PrintRelocations || CompilerOracle::has_option(mh, CompileCommandEnum::PrintRelocations)) {
print_relocations();
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
- if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommand::PrintDependencies)) {
+ if (printmethod || PrintDependencies || CompilerOracle::has_option(mh, CompileCommandEnum::PrintDependencies)) {
print_dependencies_on(tty);
tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
}
@@ -1997,7 +2084,7 @@ bool nmethod::make_not_entrant() {
// For concurrent GCs, there must be a handshake between unlink and flush
void nmethod::unlink() {
- if (_is_unlinked) {
+ if (is_unlinked()) {
// Already unlinked.
return;
}
@@ -2031,13 +2118,12 @@ void nmethod::unlink() {
ClassUnloadingContext::context()->register_unlinked_nmethod(this);
}
-void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) {
- assert(!free_code_cache_data, "must only call not freeing code cache data");
+void nmethod::purge(bool unregister_nmethod) {
MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// completely deallocate this method
- Events::log(Thread::current(), "flushing nmethod " INTPTR_FORMAT, p2i(this));
+ Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:" SIZE_FORMAT "Kb",
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(),
@@ -2052,15 +2138,21 @@ void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) {
delete ec;
ec = next;
}
-
+ if (_pc_desc_container != nullptr) {
+ delete _pc_desc_container;
+ }
delete[] _compiled_ic_data;
+ if (_immutable_data != data_end()) {
+ os::free(_immutable_data);
+ _immutable_data = data_end(); // Valid not null address
+ }
if (unregister_nmethod) {
Universe::heap()->unregister_nmethod(this);
}
CodeCache::unregister_old_nmethod(this);
- CodeBlob::purge(free_code_cache_data, unregister_nmethod);
+ CodeBlob::purge();
}
oop nmethod::oop_at(int index) const {
@@ -2627,18 +2719,19 @@ void nmethod::copy_scopes_data(u_char* buffer, int size) {
}
#ifdef ASSERT
-static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool approximate) {
- PcDesc* lower = search.scopes_pcs_begin();
- PcDesc* upper = search.scopes_pcs_end();
- lower += 1; // exclude initial sentinel
+static PcDesc* linear_search(int pc_offset, bool approximate, PcDesc* lower, PcDesc* upper) {
PcDesc* res = nullptr;
- for (PcDesc* p = lower; p < upper; p++) {
+ assert(lower != nullptr && lower->pc_offset() == PcDesc::lower_offset_limit,
+ "must start with a sentinel");
+ // lower + 1 to exclude initial sentinel
+ for (PcDesc* p = lower + 1; p < upper; p++) {
NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc
if (match_desc(p, pc_offset, approximate)) {
- if (res == nullptr)
+ if (res == nullptr) {
res = p;
- else
+ } else {
res = (PcDesc*) badAddress;
+ }
}
}
return res;
@@ -2646,20 +2739,39 @@ static PcDesc* linear_search(const PcDescSearch& search, int pc_offset, bool app
#endif
+#ifndef PRODUCT
+// Version of method to collect statistic
+PcDesc* PcDescContainer::find_pc_desc(address pc, bool approximate, address code_begin,
+ PcDesc* lower, PcDesc* upper) {
+ ++pc_nmethod_stats.pc_desc_queries;
+ if (approximate) ++pc_nmethod_stats.pc_desc_approx;
+
+ PcDesc* desc = _pc_desc_cache.last_pc_desc();
+ assert(desc != nullptr, "PcDesc cache should be initialized already");
+ if (desc->pc_offset() == (pc - code_begin)) {
+ // Cached value matched
+ ++pc_nmethod_stats.pc_desc_tests;
+ ++pc_nmethod_stats.pc_desc_repeats;
+ return desc;
+ }
+ return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
+}
+#endif
+
// Finds a PcDesc with real-pc equal to "pc"
-PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search) {
- address base_address = search.code_begin();
- if ((pc < base_address) ||
- (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
+PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, address code_begin,
+ PcDesc* lower_incl, PcDesc* upper_incl) {
+ if ((pc < code_begin) ||
+ (pc - code_begin) >= (ptrdiff_t) PcDesc::upper_offset_limit) {
return nullptr; // PC is wildly out of range
}
- int pc_offset = (int) (pc - base_address);
+ int pc_offset = (int) (pc - code_begin);
// Check the PcDesc cache if it contains the desired PcDesc
// (This as an almost 100% hit rate.)
PcDesc* res = _pc_desc_cache.find_pc_desc(pc_offset, approximate);
if (res != nullptr) {
- assert(res == linear_search(search, pc_offset, approximate), "cache ok");
+ assert(res == linear_search(pc_offset, approximate, lower_incl, upper_incl), "cache ok");
return res;
}
@@ -2667,10 +2779,9 @@ PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, con
// Find the last pc_offset less than the given offset.
// The successor must be the required match, if there is a match at all.
// (Use a fixed radix to avoid expensive affine pointer arithmetic.)
- PcDesc* lower = search.scopes_pcs_begin();
- PcDesc* upper = search.scopes_pcs_end();
- upper -= 1; // exclude final sentinel
- if (lower >= upper) return nullptr; // native method; no PcDescs at all
+ PcDesc* lower = lower_incl; // this is initial sentinel
+ PcDesc* upper = upper_incl - 1; // exclude final sentinel
+ if (lower >= upper) return nullptr; // no PcDescs at all
#define assert_LU_OK \
/* invariant on lower..upper during the following search: */ \
@@ -2719,7 +2830,7 @@ PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, con
#undef assert_LU_OK
if (match_desc(upper, pc_offset, approximate)) {
- assert(upper == linear_search(search, pc_offset, approximate), "search ok");
+ assert(upper == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
if (!Thread::current_in_asgct()) {
// we don't want to modify the cache if we're in ASGCT
// which is typically called in a signal handler
@@ -2727,7 +2838,7 @@ PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, con
}
return upper;
} else {
- assert(nullptr == linear_search(search, pc_offset, approximate), "search ok");
+ assert(nullptr == linear_search(pc_offset, approximate, lower_incl, upper_incl), "search mismatch");
return nullptr;
}
}
@@ -2991,35 +3102,41 @@ void nmethod::print(outputStream* st) const {
p2i(metadata_begin()),
p2i(metadata_end()),
metadata_size());
- if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
- p2i(scopes_data_begin()),
- p2i(scopes_data_end()),
- scopes_data_size());
- if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
- p2i(scopes_pcs_begin()),
- p2i(scopes_pcs_end()),
- scopes_pcs_size());
+#if INCLUDE_JVMCI
+ if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ p2i(jvmci_data_begin()),
+ p2i(jvmci_data_end()),
+ jvmci_data_size());
+#endif
+ if (immutable_data_size() > 0) st->print_cr(" immutable data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ p2i(immutable_data_begin()),
+ p2i(immutable_data_end()),
+ immutable_data_size());
if (dependencies_size () > 0) st->print_cr(" dependencies [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
p2i(dependencies_begin()),
p2i(dependencies_end()),
dependencies_size());
- if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
- p2i(handler_table_begin()),
- p2i(handler_table_end()),
- handler_table_size());
if (nul_chk_table_size() > 0) st->print_cr(" nul chk table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
p2i(nul_chk_table_begin()),
p2i(nul_chk_table_end()),
nul_chk_table_size());
+ if (handler_table_size() > 0) st->print_cr(" handler table [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ p2i(handler_table_begin()),
+ p2i(handler_table_end()),
+ handler_table_size());
+ if (scopes_pcs_size () > 0) st->print_cr(" scopes pcs [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ p2i(scopes_pcs_begin()),
+ p2i(scopes_pcs_end()),
+ scopes_pcs_size());
+ if (scopes_data_size () > 0) st->print_cr(" scopes data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
+ p2i(scopes_data_begin()),
+ p2i(scopes_data_end()),
+ scopes_data_size());
#if INCLUDE_JVMCI
if (speculations_size () > 0) st->print_cr(" speculations [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
p2i(speculations_begin()),
p2i(speculations_end()),
speculations_size());
- if (jvmci_data_size () > 0) st->print_cr(" JVMCI data [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d",
- p2i(jvmci_data_begin()),
- p2i(jvmci_data_end()),
- jvmci_data_size());
#endif
}
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index e4552992f4b..0677aeec14e 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -101,45 +101,34 @@ class PcDescCache {
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
- void reset_to(PcDesc* initial_pc_desc);
+ void init_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
-class PcDescSearch {
-private:
- address _code_begin;
- PcDesc* _lower;
- PcDesc* _upper;
-public:
- PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
- _code_begin(code), _lower(lower), _upper(upper)
- {
- }
-
- address code_begin() const { return _code_begin; }
- PcDesc* scopes_pcs_begin() const { return _lower; }
- PcDesc* scopes_pcs_end() const { return _upper; }
-};
-
-class PcDescContainer {
+class PcDescContainer : public CHeapObj {
private:
PcDescCache _pc_desc_cache;
public:
- PcDescContainer() {}
+ PcDescContainer(PcDesc* initial_pc_desc) { _pc_desc_cache.init_to(initial_pc_desc); }
- PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
- void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
+ PcDesc* find_pc_desc_internal(address pc, bool approximate, address code_begin,
+ PcDesc* lower, PcDesc* upper);
- PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
- address base_address = search.code_begin();
+ PcDesc* find_pc_desc(address pc, bool approximate, address code_begin, PcDesc* lower, PcDesc* upper)
+#ifdef PRODUCT
+ {
PcDesc* desc = _pc_desc_cache.last_pc_desc();
- if (desc != nullptr && desc->pc_offset() == pc - base_address) {
+ assert(desc != nullptr, "PcDesc cache should be initialized already");
+ if (desc->pc_offset() == (pc - code_begin)) {
+ // Cached value matched
return desc;
}
- return find_pc_desc_internal(pc, approximate, search);
+ return find_pc_desc_internal(pc, approximate, code_begin, lower, upper);
}
+#endif
+ ;
};
// nmethods (native methods) are the compiled code versions of Java methods.
@@ -188,10 +177,28 @@ class nmethod : public CodeBlob {
Method* _method;
- // To support simple linked-list chaining of nmethods:
- nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
+ // To reduce header size union fields which usages do not overlap.
+ union {
+ // To support simple linked-list chaining of nmethods:
+ nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
+ struct {
+ // These are used for compiled synchronized native methods to
+ // locate the owner and stack slot for the BasicLock. They are
+ // needed because there is no debug information for compiled native
+ // wrappers and the oop maps are insufficient to allow
+ // frame::retrieve_receiver() to work. Currently they are expected
+ // to be byte offsets from the Java stack pointer for maximum code
+ // sharing between platforms. JVMTI's GetLocalInstance() uses these
+ // offsets to find the receiver for non-static native wrapper frames.
+ ByteSize _native_receiver_sp_offset;
+ ByteSize _native_basic_lock_sp_offset;
+ };
+ };
- PcDescContainer _pc_desc_container;
+ // nmethod's read-only data
+ address _immutable_data;
+
+ PcDescContainer* _pc_desc_container;
ExceptionCache* volatile _exception_cache;
void* _gc_data;
@@ -200,52 +207,58 @@ class nmethod : public CodeBlob {
static nmethod* volatile _oops_do_mark_nmethods;
oops_do_mark_link* volatile _oops_do_mark_link;
- // offsets for entry points
- address _entry_point; // entry point with class check
- address _verified_entry_point; // entry point without class check
- address _osr_entry_point; // entry point for on stack replacement
-
CompiledICData* _compiled_ic_data;
- // Shared fields for all nmethod's
- int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
+ // offsets for entry points
+ address _osr_entry_point; // entry point for on stack replacement
+ uint16_t _entry_offset; // entry point with class check
+ uint16_t _verified_entry_offset; // entry point without class check
+ int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
+ int _immutable_data_size;
- // Offsets for different nmethod parts
- int _exception_offset;
+ // _consts_offset == _content_offset because SECT_CONSTS is first in code buffer
+
+ int _inline_insts_size;
+
+ int _stub_offset;
+
+ // Offsets for different stubs section parts
+ int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
int _deopt_handler_offset;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
int _deopt_mh_handler_offset;
- // Offset of the unwind handler if it exists
- int _unwind_handler_offset;
+ // Offset (from insts_end) of the unwind handler if it exists
+ int16_t _unwind_handler_offset;
+ // Number of arguments passed on the stack
+ uint16_t _num_stack_arg_slots;
- int _consts_offset;
- int _stub_offset;
- int _oops_offset; // offset to where embedded oop table begins (inside data)
- int _metadata_offset; // embedded meta data table
- int _scopes_data_offset;
- int _scopes_pcs_offset;
- int _dependencies_offset;
- int _handler_table_offset;
- int _nul_chk_table_offset;
+ // Offsets in mutable data section
+ // _oops_offset == _data_offset, offset where embedded oop table begins (inside data)
+ uint16_t _metadata_offset; // embedded meta data table
#if INCLUDE_JVMCI
- int _speculations_offset;
- int _jvmci_data_offset;
+ uint16_t _jvmci_data_offset;
+#endif
+
+ // Offset in immutable data section
+ // _dependencies_offset == 0
+ uint16_t _nul_chk_table_offset;
+ uint16_t _handler_table_offset; // This table could be big in C1 code
+ int _scopes_pcs_offset;
+ int _scopes_data_offset;
+#if INCLUDE_JVMCI
+ int _speculations_offset;
#endif
- int _nmethod_end_offset;
- int _skipped_instructions_size;
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
int _orig_pc_offset;
- int _compile_id; // which compilation made this nmethod
-
- CompilerType _compiler_type; // which compiler made this nmethod (u1)
-
- bool _is_unlinked;
+ int _compile_id; // which compilation made this nmethod
+ CompLevel _comp_level; // compilation level (s1)
+ CompilerType _compiler_type; // which compiler made this nmethod (u1)
#if INCLUDE_RTM_OPT
// RTM state at compile time. Used during deoptimization to decide
@@ -253,25 +266,9 @@ class nmethod : public CodeBlob {
RTMState _rtm_state;
#endif
- // These are used for compiled synchronized native methods to
- // locate the owner and stack slot for the BasicLock. They are
- // needed because there is no debug information for compiled native
- // wrappers and the oop maps are insufficient to allow
- // frame::retrieve_receiver() to work. Currently they are expected
- // to be byte offsets from the Java stack pointer for maximum code
- // sharing between platforms. JVMTI's GetLocalInstance() uses these
- // offsets to find the receiver for non-static native wrapper frames.
- ByteSize _native_receiver_sp_offset;
- ByteSize _native_basic_lock_sp_offset;
-
- CompLevel _comp_level; // compilation level (s1)
-
// Local state used to keep track of whether unloading is happening or not
volatile uint8_t _is_unloading_state;
- // used by jvmti to track if an event has been posted for this nmethod.
- bool _load_reported;
-
// Protected by NMethodState_lock
volatile signed char _state; // {not_installed, in_use, not_entrant}
@@ -280,7 +277,9 @@ class nmethod : public CodeBlob {
_has_method_handle_invokes:1,// Has this method MethodHandle invokes?
_has_wide_vectors:1, // Preserve wide vectors at safepoints
_has_monitors:1, // Fastpath monitor detection for continuations
- _has_flushed_dependencies:1; // Used for maintenance of dependencies (under CodeCache_lock)
+ _has_flushed_dependencies:1, // Used for maintenance of dependencies (under CodeCache_lock)
+ _is_unlinked:1, // mark during class unloading
+ _load_reported:1; // used by jvmti to track if an event has been posted for this nmethod
enum DeoptimizationStatus : u1 {
not_marked,
@@ -295,6 +294,12 @@ class nmethod : public CodeBlob {
return Atomic::load(&_deoptimization_status);
}
+ // Initialize fields to their default values
+ void init_defaults(CodeBuffer *code_buffer, CodeOffsets* offsets);
+
+ // Post initialization
+ void post_init();
+
// For native wrappers
nmethod(Method* method,
CompilerType type,
@@ -307,12 +312,14 @@ class nmethod : public CodeBlob {
ByteSize basic_lock_sp_offset, /* synchronized natives only */
OopMapSet* oop_maps);
- // Creation support
+ // For normal JIT compiled code
nmethod(Method* method,
CompilerType type,
int nmethod_size,
+ int immutable_data_size,
int compile_id,
int entry_bci,
+ address immutable_data,
CodeOffsets* offsets,
int orig_pc_offset,
DebugInformationRecorder *recorder,
@@ -351,11 +358,9 @@ class nmethod : public CodeBlob {
// Inform external interfaces that a compiled method has been unloaded
void post_compiled_method_unload();
- // Initialize fields to their default values
- void init_defaults();
-
PcDesc* find_pc_desc(address pc, bool approximate) {
- return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
+ if (_pc_desc_container == nullptr) return nullptr; // native method
+ return _pc_desc_container->find_pc_desc(pc, approximate, code_begin(), scopes_pcs_begin(), scopes_pcs_end());
}
// STW two-phase nmethod root processing helpers.
@@ -521,62 +526,71 @@ public:
const char* compiler_name () const;
// boundaries for different parts
- address consts_begin () const { return header_begin() + _consts_offset ; }
- address consts_end () const { return header_begin() + code_offset() ; }
- address insts_begin () const { return header_begin() + code_offset() ; }
+ address consts_begin () const { return content_begin(); }
+ address consts_end () const { return code_begin() ; }
+ address insts_begin () const { return code_begin() ; }
address insts_end () const { return header_begin() + _stub_offset ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
- address stub_end () const { return header_begin() + _oops_offset ; }
+ address stub_end () const { return data_begin() ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deopt_handler_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deopt_mh_handler_offset ; }
- address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : nullptr; }
- oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
- oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
+ address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (insts_end() - _unwind_handler_offset) : nullptr; }
- Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
- Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
+ // mutable data
+ oop* oops_begin () const { return (oop*) data_begin(); }
+ oop* oops_end () const { return (oop*) (data_begin() + _metadata_offset) ; }
+ Metadata** metadata_begin () const { return (Metadata**) (data_begin() + _metadata_offset) ; }
+#if INCLUDE_JVMCI
+ Metadata** metadata_end () const { return (Metadata**) (data_begin() + _jvmci_data_offset) ; }
+ address jvmci_data_begin () const { return data_begin() + _jvmci_data_offset ; }
+ address jvmci_data_end () const { return data_end(); }
+#else
+ Metadata** metadata_end () const { return (Metadata**) data_end(); }
+#endif
- address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
- address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
- PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset) ; }
- PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
- address dependencies_begin () const { return header_begin() + _dependencies_offset ; }
- address dependencies_end () const { return header_begin() + _handler_table_offset ; }
- address handler_table_begin () const { return header_begin() + _handler_table_offset ; }
- address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; }
- address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; }
+ // immutable data
+ address immutable_data_begin () const { return _immutable_data; }
+ address immutable_data_end () const { return _immutable_data + _immutable_data_size ; }
+ address dependencies_begin () const { return _immutable_data; }
+ address dependencies_end () const { return _immutable_data + _nul_chk_table_offset; }
+ address nul_chk_table_begin () const { return _immutable_data + _nul_chk_table_offset; }
+ address nul_chk_table_end () const { return _immutable_data + _handler_table_offset; }
+ address handler_table_begin () const { return _immutable_data + _handler_table_offset; }
+ address handler_table_end () const { return _immutable_data + _scopes_pcs_offset ; }
+ PcDesc* scopes_pcs_begin () const { return (PcDesc*)(_immutable_data + _scopes_pcs_offset) ; }
+ PcDesc* scopes_pcs_end () const { return (PcDesc*)(_immutable_data + _scopes_data_offset) ; }
+ address scopes_data_begin () const { return _immutable_data + _scopes_data_offset ; }
#if INCLUDE_JVMCI
- address nul_chk_table_end () const { return header_begin() + _speculations_offset ; }
- address speculations_begin () const { return header_begin() + _speculations_offset ; }
- address speculations_end () const { return header_begin() + _jvmci_data_offset ; }
- address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; }
- address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; }
+ address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
+ address speculations_begin () const { return _immutable_data + _speculations_offset ; }
+ address speculations_end () const { return immutable_data_end(); }
#else
- address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
+ address scopes_data_end () const { return immutable_data_end(); }
#endif
// Sizes
- int consts_size () const { return int( consts_end () - consts_begin ()); }
- int insts_size () const { return int( insts_end () - insts_begin ()); }
- int stub_size () const { return int( stub_end () - stub_begin ()); }
- int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
- int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
- int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
- int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
- int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
- int handler_table_size() const { return int( handler_table_end() - handler_table_begin()); }
- int nul_chk_table_size() const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
+ int immutable_data_size() const { return _immutable_data_size; }
+ int consts_size () const { return int( consts_end () - consts_begin ()); }
+ int insts_size () const { return int( insts_end () - insts_begin ()); }
+ int stub_size () const { return int( stub_end () - stub_begin ()); }
+ int oops_size () const { return int((address) oops_end () - (address) oops_begin ()); }
+ int metadata_size () const { return int((address) metadata_end () - (address) metadata_begin ()); }
+ int scopes_data_size () const { return int( scopes_data_end () - scopes_data_begin ()); }
+ int scopes_pcs_size () const { return int((intptr_t)scopes_pcs_end () - (intptr_t)scopes_pcs_begin ()); }
+ int dependencies_size () const { return int( dependencies_end () - dependencies_begin ()); }
+ int handler_table_size () const { return int( handler_table_end() - handler_table_begin()); }
+ int nul_chk_table_size () const { return int( nul_chk_table_end() - nul_chk_table_begin()); }
#if INCLUDE_JVMCI
- int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
- int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
+ int speculations_size () const { return int( speculations_end () - speculations_begin ()); }
+ int jvmci_data_size () const { return int( jvmci_data_end () - jvmci_data_begin ()); }
#endif
int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
- int skipped_instructions_size () const { return _skipped_instructions_size; }
+ int inline_insts_size() const { return _inline_insts_size; }
int total_size() const;
// Containment
@@ -594,8 +608,8 @@ public:
bool nul_chk_table_contains (address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
// entry points
- address entry_point() const { return _entry_point; } // normal entry point
- address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
+ address entry_point() const { return code_begin() + _entry_offset; } // normal entry point
+ address verified_entry_point() const { return code_begin() + _verified_entry_offset; } // if klass is correct
enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
// allowed to advance state
@@ -615,9 +629,6 @@ public:
bool is_unloading();
void do_unloading(bool unloading_occurred);
- bool is_unlinked() const { return _is_unlinked; }
- void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; }
-
#if INCLUDE_RTM_OPT
// rtm state accessing and manipulating
RTMState rtm_state() const { return _rtm_state; }
@@ -677,6 +688,12 @@ public:
_has_flushed_dependencies = z;
}
+ bool is_unlinked() const { return _is_unlinked; }
+ void set_is_unlinked() {
+ assert(!_is_unlinked, "already unlinked");
+ _is_unlinked = true;
+ }
+
int comp_level() const { return _comp_level; }
// Support for oops in scopes and relocs:
@@ -721,7 +738,6 @@ protected:
// Note: _exception_cache may be read and cleaned concurrently.
ExceptionCache* exception_cache() const { return _exception_cache; }
ExceptionCache* exception_cache_acquire() const;
- void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
public:
address handler_for_exception_and_pc(Handle exception, address pc);
@@ -750,7 +766,7 @@ public:
return (addr >= code_begin() && addr < verified_entry_point());
}
- void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) override;
+ void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
// implicit exceptions support
address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
@@ -786,11 +802,15 @@ public:
void unlink_from_method();
// On-stack replacement support
- int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
- address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
- void invalidate_osr_method();
- nmethod* osr_link() const { return _osr_link; }
- void set_osr_link(nmethod *n) { _osr_link = n; }
+ int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
+ address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
+ nmethod* osr_link() const { return _osr_link; }
+ void set_osr_link(nmethod *n) { _osr_link = n; }
+ void invalidate_osr_method();
+
+ int num_stack_arg_slots(bool rounded = true) const {
+ return rounded ? align_up(_num_stack_arg_slots, 2) : _num_stack_arg_slots;
+ }
// Verify calls to dead methods have been cleaned.
void verify_clean_inline_caches();
@@ -799,7 +819,7 @@ public:
void unlink();
// Deallocate this nmethod - called by the GC
- void purge(bool free_code_cache_data, bool unregister_nmethod) override;
+ void purge(bool unregister_nmethod);
// See comment at definition of _last_seen_on_stack
void mark_as_maybe_on_stack();
@@ -964,16 +984,17 @@ public:
// JVMTI's GetLocalInstance() support
ByteSize native_receiver_sp_offset() {
+ assert(is_native_method(), "sanity");
return _native_receiver_sp_offset;
}
ByteSize native_basic_lock_sp_offset() {
+ assert(is_native_method(), "sanity");
return _native_basic_lock_sp_offset;
}
// support for code generation
- static ByteSize verified_entry_point_offset() { return byte_offset_of(nmethod, _verified_entry_point); }
- static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
- static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
+ static ByteSize osr_entry_point_offset() { return byte_offset_of(nmethod, _osr_entry_point); }
+ static ByteSize state_offset() { return byte_offset_of(nmethod, _state); }
void metadata_do(MetadataClosure* f);
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index 69ff4bc78d6..d0f732edac4 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -277,30 +277,6 @@ DEFINE_COPY_INTO_AUX(Relocation)
#undef DEFINE_COPY_INTO_AUX
#undef DEFINE_COPY_INTO
-//////// Methods for RelocationHolder
-
-RelocationHolder RelocationHolder::plus(int offset) const {
- if (offset != 0) {
- switch (type()) {
- case relocInfo::none:
- break;
- case relocInfo::oop_type:
- {
- oop_Relocation* r = (oop_Relocation*)reloc();
- return oop_Relocation::spec(r->oop_index(), r->offset() + offset);
- }
- case relocInfo::metadata_type:
- {
- metadata_Relocation* r = (metadata_Relocation*)reloc();
- return metadata_Relocation::spec(r->metadata_index(), r->offset() + offset);
- }
- default:
- ShouldNotReachHere();
- }
- }
- return (*this);
-}
-
//////// Methods for flyweight Relocation types
// some relocations can compute their own values
@@ -402,24 +378,24 @@ void CallRelocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer
void oop_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
- p = pack_2_ints_to(p, _oop_index, _offset);
+ p = pack_1_int_to(p, _oop_index);
dest->set_locs_end((relocInfo*) p);
}
void oop_Relocation::unpack_data() {
- unpack_2_ints(_oop_index, _offset);
+ _oop_index = unpack_1_int();
}
void metadata_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
- p = pack_2_ints_to(p, _metadata_index, _offset);
+ p = pack_1_int_to(p, _metadata_index);
dest->set_locs_end((relocInfo*) p);
}
void metadata_Relocation::unpack_data() {
- unpack_2_ints(_metadata_index, _offset);
+ _metadata_index = unpack_1_int();
}
@@ -855,8 +831,8 @@ void RelocIterator::print_current() {
raw_oop = *oop_addr;
oop_value = r->oop_value();
}
- tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
- p2i(oop_addr), p2i(raw_oop), r->offset());
+ tty->print(" | [oop_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT "]",
+ p2i(oop_addr), p2i(raw_oop));
// Do not print the oop by default--we want this routine to
// work even during GC or other inconvenient times.
if (WizardMode && oop_value != nullptr) {
@@ -878,8 +854,8 @@ void RelocIterator::print_current() {
raw_metadata = *metadata_addr;
metadata_value = r->metadata_value();
}
- tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT " offset=%d]",
- p2i(metadata_addr), p2i(raw_metadata), r->offset());
+ tty->print(" | [metadata_addr=" INTPTR_FORMAT " *=" INTPTR_FORMAT "]",
+ p2i(metadata_addr), p2i(raw_metadata));
if (metadata_value != nullptr) {
tty->print("metadata_value=" INTPTR_FORMAT ": ", p2i(metadata_value));
metadata_value->print_value_on(tty);
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index 9f1db9f4684..6d0907d97de 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -129,12 +129,7 @@ class nmethod;
// Value: an oop, or else the address (handle) of an oop
// Instruction types: memory (load), set (load address)
// Data: [] an oop stored in 4 bytes of instruction
-// [n] n is the index of an oop in the CodeBlob's oop pool
-// [[N]n l] and l is a byte offset to be applied to the oop
-// [Nn Ll] both index and offset may be 32 bits if necessary
-// Here is a special hack, used only by the old compiler:
-// [[N]n 00] the value is the __address__ of the nth oop in the pool
-// (Note that the offset allows optimal references to class variables.)
+// [[N]n] the index of an oop in the CodeBlob's oop pool
//
// relocInfo::internal_word_type -- an address within the same CodeBlob
// relocInfo::section_word_type -- same, but can refer to another section
@@ -515,9 +510,6 @@ class RelocationHolder {
Relocation* reloc() const { return (Relocation*)_relocbuf; }
inline relocInfo::relocType type() const;
- // Add a constant offset to a relocation. Helper for class Address.
- RelocationHolder plus(int offset) const;
-
// Return a holder containing a relocation of type Reloc, constructed using args.
template
static RelocationHolder construct(const Args&... args) {
@@ -788,8 +780,8 @@ class Relocation {
void const_set_data_value (address x);
void const_verify_data_value (address x);
// platform-dependent utilities for decoding and patching instructions
- void pd_set_data_value (address x, intptr_t off, bool verify_only = false); // a set or mem-ref
- void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); }
+ void pd_set_data_value (address x, bool verify_only = false); // a set or mem-ref
+ void pd_verify_data_value (address x) { pd_set_data_value(x, true); }
address pd_call_destination (address orig_addr = nullptr);
void pd_set_call_destination (address x);
@@ -895,41 +887,28 @@ relocInfo::relocType RelocationHolder::type() const {
// A DataRelocation always points at a memory or load-constant instruction..
// It is absolute on most machines, and the constant is split on RISCs.
// The specific subtypes are oop, external_word, and internal_word.
-// By convention, the "value" does not include a separately reckoned "offset".
class DataRelocation : public Relocation {
public:
DataRelocation(relocInfo::relocType type) : Relocation(type) {}
- bool is_data() override { return true; }
+ bool is_data() override { return true; }
- // both target and offset must be computed somehow from relocation data
- virtual int offset() { return 0; }
- address value() override = 0;
- void set_value(address x) override { set_value(x, offset()); }
- void set_value(address x, intptr_t o) {
- if (addr_in_const())
+ // target must be computed somehow from relocation data
+ address value() override = 0;
+ void set_value(address x) override {
+ if (addr_in_const()) {
const_set_data_value(x);
- else
- pd_set_data_value(x, o);
+ } else {
+ pd_set_data_value(x);
+ }
}
- void verify_value(address x) {
- if (addr_in_const())
+ void verify_value(address x) {
+ if (addr_in_const()) {
const_verify_data_value(x);
- else
- pd_verify_data_value(x, offset());
+ } else {
+ pd_verify_data_value(x);
+ }
}
-
- // The "o" (displacement) argument is relevant only to split relocations
- // on RISC machines. In some CPUs (SPARC), the set-hi and set-lo ins'ns
- // can encode more than 32 bits between them. This allows compilers to
- // share set-hi instructions between addresses that differ by a small
- // offset (e.g., different static variables in the same class).
- // On such machines, the "x" argument to set_value on all set-lo
- // instructions must be the same as the "x" argument for the
- // corresponding set-hi instructions. The "o" arguments for the
- // set-hi instructions are ignored, and must not affect the high-half
- // immediate constant. The "o" arguments for the set-lo instructions are
- // added into the low-half immediate constant, and must not overflow it.
};
class post_call_nop_Relocation : public Relocation {
@@ -976,40 +955,36 @@ class CallRelocation : public Relocation {
class oop_Relocation : public DataRelocation {
public:
- // encode in one of these formats: [] [n] [n l] [Nn l] [Nn Ll]
- // an oop in the CodeBlob's oop pool
- static RelocationHolder spec(int oop_index, int offset = 0) {
+ // an oop in the CodeBlob's oop pool; encoded as [n] or [Nn]
+ static RelocationHolder spec(int oop_index) {
assert(oop_index > 0, "must be a pool-resident oop");
- return RelocationHolder::construct(oop_index, offset);
+ return RelocationHolder::construct(oop_index);
}
- // an oop in the instruction stream
+ // an oop in the instruction stream; encoded as []
static RelocationHolder spec_for_immediate() {
// If no immediate oops are generated, we can skip some walks over nmethods.
// Assert that they don't get generated accidentally!
assert(relocInfo::mustIterateImmediateOopsInCode(),
"Must return true so we will search for oops as roots etc. in the code.");
const int oop_index = 0;
- const int offset = 0; // if you want an offset, use the oop pool
- return RelocationHolder::construct(oop_index, offset);
+ return RelocationHolder::construct(oop_index);
}
void copy_into(RelocationHolder& holder) const override;
private:
jint _oop_index; // if > 0, index into CodeBlob::oop_at
- jint _offset; // byte offset to apply to the oop itself
- oop_Relocation(int oop_index, int offset)
- : DataRelocation(relocInfo::oop_type), _oop_index(oop_index), _offset(offset) { }
+ oop_Relocation(int oop_index)
+ : DataRelocation(relocInfo::oop_type), _oop_index(oop_index) { }
friend class RelocationHolder;
oop_Relocation() : DataRelocation(relocInfo::oop_type) {}
public:
int oop_index() { return _oop_index; }
- int offset() override { return _offset; }
- // data is packed in "2_ints" format: [i o] or [Ii Oo]
+ // oop_index is packed in "1_int" format: [n] or [Nn]
void pack_data_to(CodeSection* dest) override;
void unpack_data() override;
@@ -1031,27 +1006,24 @@ class oop_Relocation : public DataRelocation {
class metadata_Relocation : public DataRelocation {
public:
- // encode in one of these formats: [] [n] [n l] [Nn l] [Nn Ll]
- // an metadata in the CodeBlob's metadata pool
- static RelocationHolder spec(int metadata_index, int offset = 0) {
+ // an metadata in the CodeBlob's metadata pool; encoded as [n] or [Nn]
+ static RelocationHolder spec(int metadata_index) {
assert(metadata_index > 0, "must be a pool-resident metadata");
- return RelocationHolder::construct(metadata_index, offset);
+ return RelocationHolder::construct(metadata_index);
}
- // an metadata in the instruction stream
+ // an metadata in the instruction stream; encoded as []
static RelocationHolder spec_for_immediate() {
const int metadata_index = 0;
- const int offset = 0; // if you want an offset, use the metadata pool
- return RelocationHolder::construct(metadata_index, offset);
+ return RelocationHolder::construct(metadata_index);
}
void copy_into(RelocationHolder& holder) const override;
private:
jint _metadata_index; // if > 0, index into nmethod::metadata_at
- jint _offset; // byte offset to apply to the metadata itself
- metadata_Relocation(int metadata_index, int offset)
- : DataRelocation(relocInfo::metadata_type), _metadata_index(metadata_index), _offset(offset) { }
+ metadata_Relocation(int metadata_index)
+ : DataRelocation(relocInfo::metadata_type), _metadata_index(metadata_index) { }
friend class RelocationHolder;
metadata_Relocation() : DataRelocation(relocInfo::metadata_type) { }
@@ -1063,9 +1035,8 @@ class metadata_Relocation : public DataRelocation {
public:
int metadata_index() { return _metadata_index; }
- int offset() override { return _offset; }
- // data is packed in "2_ints" format: [i o] or [Ii Oo]
+ // metadata_index is packed in "1_int" format: [n] or [Nn]
void pack_data_to(CodeSection* dest) override;
void unpack_data() override;
diff --git a/src/hotspot/share/code/vmreg.cpp b/src/hotspot/share/code/vmreg.cpp
index 37c3137a102..ba30c114c6c 100644
--- a/src/hotspot/share/code/vmreg.cpp
+++ b/src/hotspot/share/code/vmreg.cpp
@@ -30,7 +30,7 @@
// used by SA and jvmti, but it's a leaky abstraction: SA and jvmti
// "know" that stack0 is an integer masquerading as a pointer. For the
// sake of those clients, we preserve this interface.
-VMReg VMRegImpl::stack0 = (VMReg)(intptr_t)VMRegImpl::stack_0()->value();
+VMReg VMRegImpl::stack0 = (VMReg)(intptr_t)FIRST_STACK;
// VMRegs are 4 bytes wide on all platforms
const int VMRegImpl::stack_slot_size = 4;
diff --git a/src/hotspot/share/code/vmreg.hpp b/src/hotspot/share/code/vmreg.hpp
index 78f52b8d57a..41e40e9a017 100644
--- a/src/hotspot/share/code/vmreg.hpp
+++ b/src/hotspot/share/code/vmreg.hpp
@@ -54,7 +54,8 @@ friend class OptoReg;
// friend class Location;
private:
enum {
- BAD_REG = -1
+ BAD_REG = -1,
+ FIRST_STACK = (ConcreteRegisterImpl::number_of_registers + 7) & ~7
};
// Despite being private, this field is exported to the
@@ -71,7 +72,7 @@ private:
public:
static constexpr VMReg stack_0() {
- return first() + ((ConcreteRegisterImpl::number_of_registers + 7) & ~7);
+ return first() + FIRST_STACK;
}
static VMReg as_VMReg(int val, bool bad_ok = false) {
diff --git a/src/hotspot/share/compiler/compilationMemoryStatistic.cpp b/src/hotspot/share/compiler/compilationMemoryStatistic.cpp
index 7c39024a82a..196a11fd065 100644
--- a/src/hotspot/share/compiler/compilationMemoryStatistic.cpp
+++ b/src/hotspot/share/compiler/compilationMemoryStatistic.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2023, Red Hat, Inc. and/or its affiliates.
+ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2024, Red Hat, Inc. and/or its affiliates.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@
ArenaStatCounter::ArenaStatCounter() :
_current(0), _start(0), _peak(0),
_na(0), _ra(0),
- _limit(0), _hit_limit(false),
+ _limit(0), _hit_limit(false), _limit_in_process(false),
_na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0)
{}
@@ -167,12 +167,9 @@ public:
ss.put(')');
return buf;
}
-
- bool equals(const FullMethodName& b) const {
+ bool operator== (const FullMethodName& b) const {
return _k == b._k && _m == b._m && _s == b._s;
}
-
- bool operator== (const FullMethodName& other) const { return equals(other); }
};
// Note: not mtCompiler since we don't want to change what we measure
@@ -184,10 +181,16 @@ class MemStatEntry : public CHeapObj {
int _num_recomp;
// Compiling thread. Only for diagnostic purposes. Thread may not be alive anymore.
const Thread* _thread;
+ // active limit for this compilation, if any
+ size_t _limit;
+ // peak usage, bytes, over all arenas
size_t _total;
+ // usage in node arena when total peaked
size_t _na_at_peak;
+ // usage in resource area when total peaked
size_t _ra_at_peak;
+ // number of nodes (c2 only) when total peaked
unsigned _live_nodes_at_peak;
const char* _result;
@@ -195,7 +198,7 @@ public:
MemStatEntry(FullMethodName method)
: _method(method), _comptype(compiler_c1),
- _time(0), _num_recomp(0), _thread(nullptr),
+ _time(0), _num_recomp(0), _thread(nullptr), _limit(0),
_total(0), _na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0),
_result(nullptr) {
}
@@ -203,6 +206,7 @@ public:
void set_comptype(CompilerType comptype) { _comptype = comptype; }
void set_current_time() { _time = os::elapsedTime(); }
void set_current_thread() { _thread = Thread::current(); }
+ void set_limit(size_t limit) { _limit = limit; }
void inc_recompilation() { _num_recomp++; }
void set_total(size_t n) { _total = n; }
@@ -221,6 +225,7 @@ public:
st->print_cr(" RA : ...how much in resource areas");
st->print_cr(" result : Result: 'ok' finished successfully, 'oom' hit memory limit, 'err' compilation failed");
st->print_cr(" #nodes : ...how many nodes (c2 only)");
+ st->print_cr(" limit : memory limit, if set");
st->print_cr(" time : time of last compilation (sec)");
st->print_cr(" type : compiler type");
st->print_cr(" #rc : how often recompiled");
@@ -228,7 +233,7 @@ public:
}
static void print_header(outputStream* st) {
- st->print_cr("total NA RA result #nodes time type #rc thread method");
+ st->print_cr("total NA RA result #nodes limit time type #rc thread method");
}
void print_on(outputStream* st, bool human_readable) const {
@@ -263,7 +268,19 @@ public:
col += 8; st->fill_to(col);
// Number of Nodes when memory peaked
- st->print("%u ", _live_nodes_at_peak);
+ if (_live_nodes_at_peak > 0) {
+ st->print("%u ", _live_nodes_at_peak);
+ } else {
+ st->print("-");
+ }
+ col += 8; st->fill_to(col);
+
+ // Limit
+ if (_limit > 0) {
+ st->print(PROPERFMT " ", PROPERFMTARGS(_limit));
+ } else {
+ st->print("-");
+ }
col += 8; st->fill_to(col);
// TimeStamp
@@ -292,28 +309,47 @@ public:
const size_t x2 = _total;
return x1 < x2 ? -1 : x1 == x2 ? 0 : 1;
}
+};
- bool equals(const FullMethodName& b) const {
- return _method.equals(b);
+// The MemStatTable contains records of memory usage of all compilations. It is printed,
+// as memory summary, either with jcmd Compiler.memory, or - if the "print" suboption has
+// been given with the MemStat compile command - as summary printout at VM exit.
+// For any given compiled method, we only keep the memory statistics of the most recent
+// compilation, but on a per-compiler basis. If one needs statistics of prior compilations,
+// one needs to look into the log produced by the "print" suboption.
+
+class MemStatTableKey {
+ const FullMethodName _fmn;
+ const CompilerType _comptype;
+public:
+ MemStatTableKey(FullMethodName fmn, CompilerType comptype) :
+ _fmn(fmn), _comptype(comptype) {}
+ MemStatTableKey(const MemStatTableKey& o) :
+ _fmn(o._fmn), _comptype(o._comptype) {}
+ bool operator== (const MemStatTableKey& other) const {
+ return _fmn == other._fmn && _comptype == other._comptype;
+ }
+ static unsigned compute_hash(const MemStatTableKey& n) {
+ return FullMethodName::compute_hash(n._fmn) + (unsigned)n._comptype;
}
};
class MemStatTable :
- public ResourceHashtable
+ public ResourceHashtable
{
public:
void add(const FullMethodName& fmn, CompilerType comptype,
size_t total, size_t na_at_peak, size_t ra_at_peak,
- unsigned live_nodes_at_peak, const char* result) {
+ unsigned live_nodes_at_peak, size_t limit, const char* result) {
assert_lock_strong(NMTCompilationCostHistory_lock);
-
- MemStatEntry** pe = get(fmn);
+ MemStatTableKey key(fmn, comptype);
+ MemStatEntry** pe = get(key);
MemStatEntry* e = nullptr;
if (pe == nullptr) {
e = new MemStatEntry(fmn);
- put(fmn, e);
+ put(key, e);
} else {
// Update existing entry
e = *pe;
@@ -327,6 +363,7 @@ public:
e->set_na_at_peak(na_at_peak);
e->set_ra_at_peak(ra_at_peak);
e->set_live_nodes_at_peak(live_nodes_at_peak);
+ e->set_limit(limit);
e->set_result(result);
}
@@ -338,7 +375,7 @@ public:
const int num_all = number_of_entries();
MemStatEntry** flat = NEW_C_HEAP_ARRAY(MemStatEntry*, num_all, mtInternal);
int i = 0;
- auto do_f = [&] (const FullMethodName& ignored, MemStatEntry* e) {
+ auto do_f = [&] (const MemStatTableKey& ignored, MemStatEntry* e) {
if (e->total() >= min_size) {
flat[i] = e;
assert(i < num_all, "Sanity");
@@ -378,23 +415,19 @@ void CompilationMemoryStatistic::on_end_compilation() {
ResourceMark rm;
CompilerThread* const th = Thread::current()->as_Compiler_thread();
ArenaStatCounter* const arena_stat = th->arena_stat();
- const CompilerType ct = th->task()->compiler()->type();
+ CompileTask* const task = th->task();
+ const CompilerType ct = task->compiler()->type();
const Method* const m = th->task()->method();
FullMethodName fmn(m);
fmn.make_permanent();
const DirectiveSet* directive = th->task()->directive();
- assert(directive->should_collect_memstat(), "Only call if memstat is enabled");
+ assert(directive->should_collect_memstat(), "Should only be called if memstat is enabled for this method");
const bool print = directive->should_print_memstat();
- if (print) {
- char buf[1024];
- fmn.as_C_string(buf, sizeof(buf));
- tty->print("%s Arena usage %s: ", compilertype2name(ct), buf);
- arena_stat->print_on(tty);
- tty->cr();
- }
+ // Store memory used in task, for later processing by JFR
+ task->set_arena_bytes(arena_stat->peak_since_start());
// Store result
// For this to work, we must call on_end_compilation() at a point where
@@ -418,8 +451,16 @@ void CompilationMemoryStatistic::on_end_compilation() {
arena_stat->na_at_peak(),
arena_stat->ra_at_peak(),
arena_stat->live_nodes_at_peak(),
+ arena_stat->limit(),
result);
}
+ if (print) {
+ char buf[1024];
+ fmn.as_C_string(buf, sizeof(buf));
+ tty->print("%s Arena usage %s: ", compilertype2name(ct), buf);
+ arena_stat->print_on(tty);
+ tty->cr();
+ }
arena_stat->end(); // reset things
}
@@ -464,20 +505,25 @@ void CompilationMemoryStatistic::on_arena_change(ssize_t diff, const Arena* aren
CompilerThread* const th = Thread::current()->as_Compiler_thread();
ArenaStatCounter* const arena_stat = th->arena_stat();
+ if (arena_stat->limit_in_process()) {
+ return; // avoid recursion on limit hit
+ }
+
bool hit_limit_before = arena_stat->hit_limit();
if (arena_stat->account(diff, (int)arena->get_tag())) { // new peak?
// Limit handling
if (arena_stat->hit_limit()) {
-
char name[1024] = "";
bool print = false;
bool crash = false;
CompilerType ct = compiler_none;
+ arena_stat->set_limit_in_process(true); // prevent recursive limit hits
+
// get some more info
- const CompileTask* task = th->task();
+ const CompileTask* const task = th->task();
if (task != nullptr) {
ct = task->compiler()->type();
const DirectiveSet* directive = task->directive();
@@ -497,8 +543,8 @@ void CompilationMemoryStatistic::on_arena_change(ssize_t diff, const Arena* aren
if (ct != compiler_none && name[0] != '\0') {
ss.print("%s %s: ", compilertype2name(ct), name);
}
- ss.print("Hit MemLimit %s (limit: %zu now: %zu)",
- (hit_limit_before ? "again" : ""),
+ ss.print("Hit MemLimit %s(limit: %zu now: %zu)",
+ (hit_limit_before ? "again " : ""),
arena_stat->limit(), arena_stat->peak_since_start());
}
@@ -514,6 +560,8 @@ void CompilationMemoryStatistic::on_arena_change(ssize_t diff, const Arena* aren
} else {
inform_compilation_about_oom(ct);
}
+
+ arena_stat->set_limit_in_process(false);
}
}
}
@@ -523,6 +571,10 @@ static inline ssize_t diff_entries_by_size(const MemStatEntry* e1, const MemStat
}
void CompilationMemoryStatistic::print_all_by_size(outputStream* st, bool human_readable, size_t min_size) {
+
+ MutexLocker ml(NMTCompilationCostHistory_lock, Mutex::_no_safepoint_check_flag);
+
+ st->cr();
st->print_cr("Compilation memory statistics");
if (!enabled()) {
@@ -543,29 +595,27 @@ void CompilationMemoryStatistic::print_all_by_size(outputStream* st, bool human_
MemStatEntry::print_header(st);
MemStatEntry** filtered = nullptr;
- {
- MutexLocker ml(NMTCompilationCostHistory_lock, Mutex::_no_safepoint_check_flag);
- if (_the_table != nullptr) {
- // We sort with quicksort
- int num = 0;
- filtered = _the_table->calc_flat_array(num, min_size);
- if (min_size > 0) {
- st->print_cr("(%d/%d)", num, _the_table->number_of_entries());
- }
- if (num > 0) {
- QuickSort::sort(filtered, num, diff_entries_by_size, false);
- // Now print. Has to happen under lock protection too, since entries may be changed.
- for (int i = 0; i < num; i ++) {
- filtered[i]->print_on(st, human_readable);
- }
- } else {
- st->print_cr("No entries.");
+ if (_the_table != nullptr) {
+ // We sort with quicksort
+ int num = 0;
+ filtered = _the_table->calc_flat_array(num, min_size);
+ if (min_size > 0) {
+ st->print_cr("(%d/%d)", num, _the_table->number_of_entries());
+ }
+ if (num > 0) {
+ QuickSort::sort(filtered, num, diff_entries_by_size, false);
+ // Now print. Has to happen under lock protection too, since entries may be changed.
+ for (int i = 0; i < num; i ++) {
+ filtered[i]->print_on(st, human_readable);
}
} else {
- st->print_cr("Not initialized.");
+ st->print_cr("No entries.");
}
- } // locked
+ } else {
+ st->print_cr("Not initialized.");
+ }
+ st->cr();
FREE_C_HEAP_ARRAY(Entry, filtered);
}
diff --git a/src/hotspot/share/compiler/compilationMemoryStatistic.hpp b/src/hotspot/share/compiler/compilationMemoryStatistic.hpp
index 625875f05f4..d7cccad17fd 100644
--- a/src/hotspot/share/compiler/compilationMemoryStatistic.hpp
+++ b/src/hotspot/share/compiler/compilationMemoryStatistic.hpp
@@ -50,6 +50,7 @@ class ArenaStatCounter : public CHeapObj {
// MemLimit handling
size_t _limit;
bool _hit_limit;
+ bool _limit_in_process;
// Peak composition:
// Size of node arena when total peaked (c2 only)
@@ -86,6 +87,9 @@ public:
size_t limit() const { return _limit; }
bool hit_limit() const { return _hit_limit; }
+ bool limit_in_process() const { return _limit_in_process; }
+ void set_limit_in_process(bool v) { _limit_in_process = v; }
+
};
class CompilationMemoryStatistic : public AllStatic {
diff --git a/src/hotspot/share/compiler/compilationPolicy.cpp b/src/hotspot/share/compiler/compilationPolicy.cpp
index 4fcd9b5bde4..0e6d8f6a6c9 100644
--- a/src/hotspot/share/compiler/compilationPolicy.cpp
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp
@@ -229,7 +229,7 @@ class LoopPredicate : AllStatic {
public:
static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
double threshold_scaling;
- if (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, threshold_scaling)) {
+ if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
scale *= threshold_scaling;
}
switch(cur_level) {
@@ -267,7 +267,7 @@ class CallPredicate : AllStatic {
public:
static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
double threshold_scaling;
- if (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, threshold_scaling)) {
+ if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
scale *= threshold_scaling;
}
switch(cur_level) {
diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp
index a41718d5831..45b0b331e25 100644
--- a/src/hotspot/share/compiler/compileBroker.cpp
+++ b/src/hotspot/share/compiler/compileBroker.cpp
@@ -38,6 +38,7 @@
#include "compiler/compilerEvent.hpp"
#include "compiler/compilerOracle.hpp"
#include "compiler/directivesParser.hpp"
+#include "gc/shared/memAllocator.hpp"
#include "interpreter/linkResolver.hpp"
#include "jvm.h"
#include "jfr/jfrEvents.hpp"
@@ -1171,13 +1172,11 @@ void CompileBroker::compile_method_base(const methodHandle& method,
tty->cr();
}
- if (compile_reason != CompileTask::Reason_DirectivesChanged) {
- // A request has been made for compilation. Before we do any
- // real work, check to see if the method has been compiled
- // in the meantime with a definitive result.
- if (compilation_is_complete(method, osr_bci, comp_level)) {
- return;
- }
+ // A request has been made for compilation. Before we do any
+ // real work, check to see if the method has been compiled
+ // in the meantime with a definitive result.
+ if (compilation_is_complete(method, osr_bci, comp_level)) {
+ return;
}
#ifndef PRODUCT
@@ -1222,13 +1221,11 @@ void CompileBroker::compile_method_base(const methodHandle& method,
return;
}
- if (compile_reason != CompileTask::Reason_DirectivesChanged) {
- // We need to check again to see if the compilation has
- // completed. A previous compilation may have registered
- // some result.
- if (compilation_is_complete(method, osr_bci, comp_level)) {
- return;
- }
+ // We need to check again to see if the compilation has
+ // completed. A previous compilation may have registered
+ // some result.
+ if (compilation_is_complete(method, osr_bci, comp_level)) {
+ return;
}
// We now know that this compilation is not pending, complete,
@@ -1377,7 +1374,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
if (osr_bci == InvocationEntryBci) {
// standard compilation
nmethod* method_code = method->code();
- if (method_code != nullptr && (compile_reason != CompileTask::Reason_DirectivesChanged)) {
+ if (method_code != nullptr) {
if (compilation_is_complete(method, osr_bci, comp_level)) {
return method_code;
}
@@ -1396,6 +1393,7 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
// some prerequisites that are compiler specific
if (comp->is_c2() || comp->is_jvmci()) {
+ InternalOOMEMark iom(THREAD);
method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL);
// Resolve all classes seen in the signature of the method
// we are compiling.
@@ -1549,7 +1547,7 @@ bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int os
// The method may be explicitly excluded by the user.
double scale;
- if (excluded || (CompilerOracle::has_option_value(method, CompileCommand::CompileThresholdScaling, scale) && scale == 0)) {
+ if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) {
bool quietly = CompilerOracle::be_quiet();
if (PrintCompilation && !quietly) {
// This does not happen quietly...
@@ -1793,7 +1791,7 @@ bool CompileBroker::init_compiler_runtime() {
void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) {
BufferBlob* blob = thread->get_buffer_blob();
if (blob != nullptr) {
- blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */);
+ blob->purge();
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free(blob);
}
@@ -2126,7 +2124,8 @@ static void post_compilation_event(EventCompilation& event, CompileTask* task) {
task->is_success(),
task->osr_bci() != CompileBroker::standard_entry_bci,
task->nm_total_size(),
- task->num_inlined_bytecodes());
+ task->num_inlined_bytecodes(),
+ task->arena_bytes());
}
int DirectivesStack::_depth = 0;
diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp
index 4690cd5c134..1a2af572166 100644
--- a/src/hotspot/share/compiler/compileTask.cpp
+++ b/src/hotspot/share/compiler/compileTask.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -123,6 +123,7 @@ void CompileTask::initialize(int compile_id,
_nm_total_size = 0;
_failure_reason = nullptr;
_failure_reason_on_C_heap = false;
+ _arena_bytes = 0;
if (LogCompilation) {
if (hot_method.not_null()) {
diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp
index fabfb8e0487..37459bd0ff5 100644
--- a/src/hotspot/share/compiler/compileTask.hpp
+++ b/src/hotspot/share/compiler/compileTask.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,6 @@ class CompileTask : public CHeapObj {
Reason_Whitebox, // Whitebox API
Reason_MustBeCompiled, // Used for -Xcomp or AlwaysCompileLoopMethods (see CompilationPolicy::must_be_compiled())
Reason_Bootstrap, // JVMCI bootstrap
- Reason_DirectivesChanged, // Changed CompilerDirectivesStack
Reason_Count
};
@@ -75,8 +74,7 @@ class CompileTask : public CHeapObj {
"replay",
"whitebox",
"must_be_compiled",
- "bootstrap",
- "directives_changed"
+ "bootstrap"
};
return reason_names[compile_reason];
}
@@ -114,6 +112,7 @@ class CompileTask : public CHeapObj {
const char* _failure_reason;
// Specifies if _failure_reason is on the C heap.
bool _failure_reason_on_C_heap;
+ size_t _arena_bytes; // peak size of temporary memory during compilation (e.g. node arenas)
public:
CompileTask() : _failure_reason(nullptr), _failure_reason_on_C_heap(false) {
@@ -200,6 +199,9 @@ class CompileTask : public CHeapObj