8355617: Remove historical debug_only macro in favor of DEBUG_ONLY

Reviewed-by: stefank, kbarrett, jwaters
This commit is contained in:
Aleksey Shipilev 2025-04-28 08:43:14 +00:00
parent 3140de411b
commit db6fa5923c
119 changed files with 368 additions and 371 deletions

View File

@ -69,7 +69,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
return;
}
@ -90,7 +90,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ blr(lr);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
@ -103,7 +103,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
@ -274,7 +274,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
@ -289,7 +289,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), rscratch2);
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}

View File

@ -1238,11 +1238,11 @@ encode %{
enc_class save_last_PC %{
// preserve mark
address mark = __ inst_mark();
debug_only(int off0 = __ offset());
DEBUG_ONLY(int off0 = __ offset());
int ret_addr_offset = as_MachCall()->ret_addr_offset();
__ adr(LR, mark + ret_addr_offset);
__ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
debug_only(int off1 = __ offset());
DEBUG_ONLY(int off1 = __ offset());
assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
// restore mark
__ set_inst_mark(mark);
@ -1251,11 +1251,11 @@ encode %{
enc_class preserve_SP %{
// preserve mark
address mark = __ inst_mark();
debug_only(int off0 = __ offset());
DEBUG_ONLY(int off0 = __ offset());
// FP is preserved across all calls, even compiled calls.
// Use it to preserve SP in places where the callee might change the SP.
__ mov(Rmh_SP_save, SP);
debug_only(int off1 = __ offset());
DEBUG_ONLY(int off1 = __ offset());
assert(off1 - off0 == 4, "correct size prediction");
// restore mark
__ set_inst_mark(mark);

View File

@ -59,7 +59,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
return;
}
// Pass the array index on stack because all registers must be preserved
@ -91,7 +91,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {

View File

@ -72,7 +72,7 @@ void NativeNMethodBarrier::verify() const {
static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() - entry_barrier_bytes;
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
DEBUG_ONLY(barrier->verify());
return barrier;
}

View File

@ -74,7 +74,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
return;
}
@ -98,7 +98,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
}
@ -115,7 +115,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
}
@ -156,7 +156,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
}
@ -179,7 +179,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
__ bctrl();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
}
@ -193,7 +193,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
__ mtctr(R0);
__ bctrl();
ce->add_call_info_here(_info);
debug_only( __ illtrap(); )
DEBUG_ONLY( __ illtrap(); )
}
@ -441,7 +441,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
__ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
__ bctrl();
ce->add_call_info_here(_info);
debug_only(__ illtrap());
DEBUG_ONLY(__ illtrap());
}

View File

@ -189,7 +189,7 @@ LIR_Opr FrameMap::_caller_save_fpu_regs[] = {};
FloatRegister FrameMap::nr2floatreg (int rnr) {
assert(_init_done, "tables not initialized");
debug_only(fpu_range_check(rnr);)
DEBUG_ONLY(fpu_range_check(rnr);)
return _fpu_regs[rnr];
}

View File

@ -108,7 +108,7 @@ static NativeNMethodBarrier* get_nmethod_barrier(nmethod* nm) {
}
auto barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
DEBUG_ONLY(barrier->verify());
return barrier;
}

View File

@ -70,7 +70,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
return;
}
@ -92,7 +92,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ rt_call(Runtime1::entry_for(stub_id), ra);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
@ -105,7 +105,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
@ -258,7 +258,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
__ far_call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
@ -272,7 +272,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {

View File

@ -300,7 +300,7 @@ public:
inline NativeGeneralJump* nativeGeneralJump_at(address addr) {
assert_cond(addr != nullptr);
NativeGeneralJump* jump = (NativeGeneralJump*)(addr);
debug_only(jump->verify();)
DEBUG_ONLY(jump->verify();)
return jump;
}

View File

@ -52,7 +52,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
return;
}
@ -74,7 +74,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
@ -88,7 +88,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
@ -116,7 +116,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
ce->emit_call_c(Runtime1::entry_for (C1StubId::throw_div0_exception_id));
CHECK_BAILOUT();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
@ -134,7 +134,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
CHECK_BAILOUT();
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
// Note: pass object in Z_R1_scratch
@ -147,7 +147,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
ce->emit_call_c(a);
CHECK_BAILOUT();
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {

View File

@ -144,13 +144,13 @@ LIR_Opr FrameMap::_caller_save_fpu_regs[] = {};
// c1 rnr -> FloatRegister
FloatRegister FrameMap::nr2floatreg (int rnr) {
assert(_init_done, "tables not initialized");
debug_only(fpu_range_check(rnr);)
DEBUG_ONLY(fpu_range_check(rnr);)
return _fpu_rnr2reg[rnr];
}
void FrameMap::map_float_register(int rnr, FloatRegister reg) {
debug_only(fpu_range_check(rnr);)
debug_only(fpu_range_check(reg->encoding());)
DEBUG_ONLY(fpu_range_check(rnr);)
DEBUG_ONLY(fpu_range_check(reg->encoding());)
_fpu_rnr2reg[rnr] = reg; // mapping c1 regnr. -> FloatRegister
_fpu_reg2rnr[reg->encoding()] = rnr; // mapping assembler encoding -> c1 regnr.
}

View File

@ -107,7 +107,7 @@
static int fpu_reg2rnr (FloatRegister reg) {
assert(_init_done, "tables not initialized");
int c1rnr = _fpu_reg2rnr[reg->encoding()];
debug_only(fpu_range_check(c1rnr);)
DEBUG_ONLY(fpu_range_check(c1rnr);)
return c1rnr;
}

View File

@ -40,7 +40,7 @@ class NativeMethodBarrier: public NativeInstruction {
address get_patchable_data_address() const {
address inst_addr = get_barrier_start_address() + PATCHABLE_INSTRUCTION_OFFSET;
debug_only(Assembler::is_z_cfi(*((long*)inst_addr)));
DEBUG_ONLY(Assembler::is_z_cfi(*((long*)inst_addr)));
return inst_addr + 2;
}
@ -91,7 +91,7 @@ static NativeMethodBarrier* get_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() - NativeMethodBarrier::BARRIER_TOTAL_LENGTH;
auto barrier = reinterpret_cast<NativeMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
DEBUG_ONLY(barrier->verify());
return barrier;
}

View File

@ -444,7 +444,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// Useful if consumed previously by access via stackTop().
void InterpreterMacroAssembler::popx(int len) {
add2reg(Z_esp, len*Interpreter::stackElementSize);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
// Get Address object of stack top. No checks. No pop.
@ -458,38 +458,38 @@ void InterpreterMacroAssembler::pop_i(Register r) {
z_l(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
add2reg(Z_esp, Interpreter::stackElementSize);
assert_different_registers(r, Z_R1_scratch);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
void InterpreterMacroAssembler::pop_ptr(Register r) {
z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
add2reg(Z_esp, Interpreter::stackElementSize);
assert_different_registers(r, Z_R1_scratch);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
void InterpreterMacroAssembler::pop_l(Register r) {
z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp);
add2reg(Z_esp, 2*Interpreter::stackElementSize);
assert_different_registers(r, Z_R1_scratch);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
void InterpreterMacroAssembler::pop_f(FloatRegister f) {
mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), false);
add2reg(Z_esp, Interpreter::stackElementSize);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
void InterpreterMacroAssembler::pop_d(FloatRegister f) {
mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), true);
add2reg(Z_esp, 2*Interpreter::stackElementSize);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
}
void InterpreterMacroAssembler::push_i(Register r) {
assert_different_registers(r, Z_R1_scratch);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
z_st(r, Address(Z_esp));
add2reg(Z_esp, -Interpreter::stackElementSize);
}
@ -501,7 +501,7 @@ void InterpreterMacroAssembler::push_ptr(Register r) {
void InterpreterMacroAssembler::push_l(Register r) {
assert_different_registers(r, Z_R1_scratch);
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
int offset = -Interpreter::stackElementSize;
z_stg(r, Address(Z_esp, offset));
clear_mem(Address(Z_esp), Interpreter::stackElementSize);
@ -509,13 +509,13 @@ void InterpreterMacroAssembler::push_l(Register r) {
}
void InterpreterMacroAssembler::push_f(FloatRegister f) {
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
freg2mem_opt(f, Address(Z_esp), false);
add2reg(Z_esp, -Interpreter::stackElementSize);
}
void InterpreterMacroAssembler::push_d(FloatRegister d) {
debug_only(verify_esp(Z_esp, Z_R1_scratch));
DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch));
int offset = -Interpreter::stackElementSize;
freg2mem_opt(d, Address(Z_esp, offset));
add2reg(Z_esp, 2 * offset);

View File

@ -801,7 +801,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
address ip = inst;
bool is_64bit = false;
debug_only(bool has_disp32 = false);
DEBUG_ONLY(bool has_disp32 = false);
int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
again_after_prefix:
@ -859,7 +859,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x8A: // movb r, a
case 0x8B: // movl r, a
case 0x8F: // popl a
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
break;
case 0x68: // pushq #32
@ -898,10 +898,10 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x8B: // movw r, a
case 0x89: // movw a, r
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
break;
case 0xC7: // movw a, #16
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
tail_size = 2; // the imm16
break;
case 0x0F: // several SSE/SSE2 variants
@ -923,7 +923,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x69: // imul r, a, #32
case 0xC7: // movl a, #32(oop?)
tail_size = 4;
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0x0F: // movx..., etc.
@ -932,11 +932,11 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
tail_size = 1;
case 0x38: // ptest, pmovzxbw
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0x70: // pshufd r, r/a, #8
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
case 0x73: // psrldq r, #8
tail_size = 1;
break;
@ -961,7 +961,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
case 0xD6: // movq
case 0xFE: // paddd
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
break;
case 0xAD: // shrd r, a, %cl
@ -976,18 +976,18 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0xC1: // xaddl
case 0xC7: // cmpxchg8
case REP16(0x90): // setcc a
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
// fall out of the switch to decode the address
break;
case 0xC4: // pinsrw r, a, #8
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
case 0xC5: // pextrw r, r, #8
tail_size = 1; // the imm8
break;
case 0xAC: // shrd r, a, #8
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
tail_size = 1; // the imm8
break;
@ -1004,12 +1004,12 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
// on 32bit in the case of cmpl, the imm might be an oop
tail_size = 4;
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0x83: // addl a, #8; addl r, #8
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
tail_size = 1;
break;
@ -1026,7 +1026,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x9B:
switch (0xFF & *ip++) {
case 0xD9: // fnstcw a
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
break;
default:
ShouldNotReachHere();
@ -1045,7 +1045,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x87: // xchg r, a
case REP4(0x38): // cmp...
case 0x85: // test r, a
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0xA8: // testb rax, #8
@ -1057,7 +1057,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0xC6: // movb a, #8
case 0x80: // cmpb a, #8
case 0x6B: // imul r, a, #8
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
tail_size = 1; // the imm8
break;
@ -1109,7 +1109,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
break;
}
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0x62: // EVEX_4bytes
@ -1135,7 +1135,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
break;
}
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
@ -1147,7 +1147,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
debug_only(has_disp32 = true);
DEBUG_ONLY(has_disp32 = true);
break;
case 0xE8: // call rdisp32
@ -1184,7 +1184,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
default:
ip++;
}
debug_only(has_disp32 = true); // has both kinds of operands!
DEBUG_ONLY(has_disp32 = true); // has both kinds of operands!
break;
default:

View File

@ -68,7 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
return;
}
@ -88,7 +88,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
@ -101,7 +101,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
@ -111,7 +111,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry);
__ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
@ -399,7 +399,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
__ call(RuntimeAddress(a));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}
@ -413,7 +413,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
}
__ call(RuntimeAddress(Runtime1::entry_for(_stub)));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
DEBUG_ONLY(__ should_not_reach_here());
}

View File

@ -140,7 +140,7 @@ bool NativeCall::is_displacement_aligned() {
// Used in the runtime linkage of calls; see class CompiledIC.
// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
void NativeCall::set_destination_mt_safe(address dest) {
debug_only(verify());
DEBUG_ONLY(verify());
// Make sure patching code is locked. No two threads can patch at the same
// time but one may be executing this code.
assert(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint() ||

View File

@ -503,7 +503,7 @@ class NativeGeneralJump: public NativeInstruction {
inline NativeGeneralJump* nativeGeneralJump_at(address address) {
NativeGeneralJump* jump = (NativeGeneralJump*)(address);
debug_only(jump->verify();)
DEBUG_ONLY(jump->verify();)
return jump;
}

View File

@ -1847,14 +1847,14 @@ encode %{
%}
enc_class clear_avx %{
debug_only(int off0 = __ offset());
DEBUG_ONLY(int off0 = __ offset());
if (generate_vzeroupper(Compile::current())) {
// Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
__ vzeroupper();
}
debug_only(int off1 = __ offset());
DEBUG_ONLY(int off1 = __ offset());
assert(off1 - off0 == clear_avx_size(), "correct size prediction");
%}

View File

@ -1662,7 +1662,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
ThreadInVMfromNative tiv(jt);
debug_only(VMNativeEntryWrapper vew;)
DEBUG_ONLY(VMNativeEntryWrapper vew;)
VM_LinuxDllLoad op(filename, ebuf, ebuflen);
VMThread::execute(&op);

View File

@ -147,7 +147,7 @@ public:
};
debug_only(static bool signal_sets_initialized = false);
DEBUG_ONLY(static bool signal_sets_initialized = false);
static sigset_t unblocked_sigs, vm_sigs, preinstalled_sigs;
// Our own signal handlers should never ever get replaced by a third party one.
@ -1547,7 +1547,7 @@ static void signal_sets_init() {
if (!ReduceSignalUsage) {
sigaddset(&vm_sigs, BREAK_SIGNAL);
}
debug_only(signal_sets_initialized = true);
DEBUG_ONLY(signal_sets_initialized = true);
}
// These are signals that are unblocked while a thread is running Java.

View File

@ -73,7 +73,7 @@ class Label;
*/
class Label {
private:
enum { PatchCacheSize = 4 debug_only( +4 ) };
enum { PatchCacheSize = 4 DEBUG_ONLY( +4 ) };
// _loc encodes both the binding state (via its sign)
// and the binding locator (via its value) of a label.

View File

@ -92,7 +92,7 @@ CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this)
// Provide code buffer with meaningful name
initialize_misc(blob->name());
initialize(blob->content_begin(), blob->content_size());
debug_only(verify_section_allocation();)
DEBUG_ONLY(verify_section_allocation();)
}
void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
@ -120,7 +120,7 @@ void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
_insts.initialize_locs(locs_size / sizeof(relocInfo));
}
debug_only(verify_section_allocation();)
DEBUG_ONLY(verify_section_allocation();)
}
@ -494,7 +494,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
prev_cs = cs;
}
debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert
DEBUG_ONLY(dest_cs->_start = nullptr); // defeat double-initialization assert
dest_cs->initialize(buf+buf_offset, csize);
dest_cs->set_end(buf+buf_offset+csize);
assert(dest_cs->is_allocated(), "must always be allocated");
@ -505,7 +505,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
// Done calculating sections; did it come out to the right end?
assert(buf_offset == total_content_size(), "sanity");
debug_only(dest->verify_section_allocation();)
DEBUG_ONLY(dest->verify_section_allocation();)
}
// Append an oop reference that keeps the class alive.
@ -939,11 +939,11 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
cb.set_blob(nullptr);
// Zap the old code buffer contents, to avoid mistakenly using them.
debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
DEBUG_ONLY(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size,
badCodeHeapFreeVal);)
// Make certain that the new sections are all snugly inside the new blob.
debug_only(verify_section_allocation();)
DEBUG_ONLY(verify_section_allocation();)
#ifndef PRODUCT
_decode_begin = nullptr; // sanity

View File

@ -121,8 +121,8 @@ class CodeSection {
_locs_own = false;
_scratch_emit = false;
_skipped_instructions_size = 0;
debug_only(_index = -1);
debug_only(_outer = (CodeBuffer*)badAddress);
DEBUG_ONLY(_index = -1);
DEBUG_ONLY(_outer = (CodeBuffer*)badAddress);
}
void initialize_outer(CodeBuffer* outer, int8_t index) {
@ -535,7 +535,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) {
assert(code_start != nullptr, "sanity");
initialize_misc("static buffer");
initialize(code_start, code_size);
debug_only(verify_section_allocation();)
DEBUG_ONLY(verify_section_allocation();)
}
// (2) CodeBuffer referring to pre-allocated CodeBlob.

View File

@ -109,19 +109,19 @@ class FrameMap : public CompilationResourceObj {
static Register cpu_rnr2reg (int rnr) {
assert(_init_done, "tables not initialized");
debug_only(cpu_range_check(rnr);)
DEBUG_ONLY(cpu_range_check(rnr);)
return _cpu_rnr2reg[rnr];
}
static int cpu_reg2rnr (Register reg) {
assert(_init_done, "tables not initialized");
debug_only(cpu_range_check(reg->encoding());)
DEBUG_ONLY(cpu_range_check(reg->encoding());)
return _cpu_reg2rnr[reg->encoding()];
}
static void map_register(int rnr, Register reg) {
debug_only(cpu_range_check(rnr);)
debug_only(cpu_range_check(reg->encoding());)
DEBUG_ONLY(cpu_range_check(rnr);)
DEBUG_ONLY(cpu_range_check(reg->encoding());)
_cpu_rnr2reg[rnr] = reg;
_cpu_reg2rnr[reg->encoding()] = rnr;
}

View File

@ -1363,7 +1363,7 @@ int Runtime1::move_klass_patching(JavaThread* current) {
//
// NOTE: we are still in Java
//
debug_only(NoHandleMark nhm;)
DEBUG_ONLY(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;
@ -1380,7 +1380,7 @@ int Runtime1::move_mirror_patching(JavaThread* current) {
//
// NOTE: we are still in Java
//
debug_only(NoHandleMark nhm;)
DEBUG_ONLY(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;
@ -1397,7 +1397,7 @@ int Runtime1::move_appendix_patching(JavaThread* current) {
//
// NOTE: we are still in Java
//
debug_only(NoHandleMark nhm;)
DEBUG_ONLY(NoHandleMark nhm;)
{
// Enter VM mode
ResetNoHandleMark rnhm;

View File

@ -2924,7 +2924,7 @@ void ciTypeFlow::flow_types() {
// Continue flow analysis until fixed point reached
debug_only(int max_block = _next_pre_order;)
DEBUG_ONLY(int max_block = _next_pre_order;)
while (!work_list_empty()) {
Block* blk = work_list_next();

View File

@ -253,7 +253,7 @@ public:
set_type_at_tos(type);
}
void pop() {
debug_only(set_type_at_tos(bottom_type()));
DEBUG_ONLY(set_type_at_tos(bottom_type()));
_stack_size--;
}
ciType* pop_value() {

View File

@ -37,7 +37,7 @@
ThreadInVMfromNative __tiv(thread); \
HandleMarkCleaner __hm(thread); \
JavaThread* THREAD = thread; /* For exception macros. */ \
debug_only(VMNativeEntryWrapper __vew;)
DEBUG_ONLY(VMNativeEntryWrapper __vew;)
@ -49,10 +49,10 @@
* [TODO] The NoHandleMark line does nothing but declare a function prototype \
* The NoHandkeMark constructor is NOT executed. If the ()'s are \
* removed, causes the NoHandleMark assert to trigger. \
* debug_only(NoHandleMark __hm();) \
* DEBUG_ONLY(NoHandleMark __hm();) \
*/ \
JavaThread* THREAD = thread; /* For exception macros. */ \
debug_only(VMNativeEntryWrapper __vew;)
DEBUG_ONLY(VMNativeEntryWrapper __vew;)
#define EXCEPTION_CONTEXT \

View File

@ -176,7 +176,7 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s
const ClassFileStream cfs1 = *stream;
const ClassFileStream* const cfs = &cfs1;
debug_only(const u1* const old_current = stream->current();)
DEBUG_ONLY(const u1* const old_current = stream->current();)
// Used for batching symbol allocations.
const char* names[SymbolTable::symbol_alloc_batch_size];
@ -5243,7 +5243,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
// it's official
set_klass(ik);
debug_only(ik->verify();)
DEBUG_ONLY(ik->verify();)
}
void ClassFileParser::update_class_name(Symbol* new_class_name) {

View File

@ -4821,7 +4821,7 @@ bool java_lang_ClassLoader::isAncestor(oop loader, oop cl) {
assert(is_instance(loader), "loader must be oop");
assert(cl == nullptr || is_instance(cl), "cl argument must be oop");
oop acl = loader;
debug_only(jint loop_count = 0);
DEBUG_ONLY(jint loop_count = 0);
// This loop taken verbatim from ClassLoader.java:
do {
acl = parent(acl);

View File

@ -140,7 +140,7 @@ DebugInformationRecorder::DebugInformationRecorder(OopRecorder* oop_recorder)
add_new_pc_offset(PcDesc::lower_offset_limit); // sentinel record
debug_only(_recording_state = rs_null);
DEBUG_ONLY(_recording_state = rs_null);
}
@ -159,7 +159,7 @@ void DebugInformationRecorder::add_safepoint(int pc_offset, OopMap* map) {
add_new_pc_offset(pc_offset);
assert(_recording_state == rs_null, "nesting of recording calls");
debug_only(_recording_state = rs_safepoint);
DEBUG_ONLY(_recording_state = rs_safepoint);
}
void DebugInformationRecorder::add_non_safepoint(int pc_offset) {
@ -169,7 +169,7 @@ void DebugInformationRecorder::add_non_safepoint(int pc_offset) {
add_new_pc_offset(pc_offset);
assert(_recording_state == rs_null, "nesting of recording calls");
debug_only(_recording_state = rs_non_safepoint);
DEBUG_ONLY(_recording_state = rs_non_safepoint);
}
void DebugInformationRecorder::add_new_pc_offset(int pc_offset) {
@ -360,7 +360,7 @@ void DebugInformationRecorder::dump_object_pool(GrowableArray<ScopeValue*>* obje
void DebugInformationRecorder::end_scopes(int pc_offset, bool is_safepoint) {
assert(_recording_state == (is_safepoint? rs_safepoint: rs_non_safepoint),
"nesting of recording calls");
debug_only(_recording_state = rs_null);
DEBUG_ONLY(_recording_state = rs_null);
// Try to compress away an equivalent non-safepoint predecessor.
// (This only works because we have previously recognized redundant
@ -413,13 +413,13 @@ DebugToken* DebugInformationRecorder::create_monitor_values(GrowableArray<Monito
int DebugInformationRecorder::data_size() {
debug_only(mark_recorders_frozen()); // mark it "frozen" for asserts
DEBUG_ONLY(mark_recorders_frozen()); // mark it "frozen" for asserts
return _stream->position();
}
int DebugInformationRecorder::pcs_size() {
debug_only(mark_recorders_frozen()); // mark it "frozen" for asserts
DEBUG_ONLY(mark_recorders_frozen()); // mark it "frozen" for asserts
if (last_pc()->pc_offset() != PcDesc::upper_offset_limit)
add_new_pc_offset(PcDesc::upper_offset_limit);
return _pcs_length * sizeof(PcDesc);

View File

@ -1122,7 +1122,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
if (nm != nullptr) {
// verify nmethod
debug_only(nm->verify();) // might block
DEBUG_ONLY(nm->verify();) // might block
nm->log_new_nmethod();
}
@ -1269,7 +1269,7 @@ void nmethod::post_init() {
finalize_relocations();
Universe::heap()->register_nmethod(this);
debug_only(Universe::heap()->verify_nmethod(this));
DEBUG_ONLY(Universe::heap()->verify_nmethod(this));
CodeCache::commit(this);
}
@ -1296,7 +1296,7 @@ nmethod::nmethod(
_native_basic_lock_sp_offset(basic_lock_sp_offset)
{
{
debug_only(NoSafepointVerifier nsv;)
DEBUG_ONLY(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
init_defaults(code_buffer, offsets);
@ -1437,7 +1437,7 @@ nmethod::nmethod(
{
assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
{
debug_only(NoSafepointVerifier nsv;)
DEBUG_ONLY(NoSafepointVerifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
init_defaults(code_buffer, offsets);
@ -2802,7 +2802,7 @@ PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, add
}
// Take giant steps at first (4096, then 256, then 16, then 1)
const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1);
const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1);
const int RADIX = (1 << LOG2_RADIX);
for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) {
while ((mid = lower + step) < upper) {

View File

@ -100,7 +100,7 @@ class PcDescCache {
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); }
void init_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);

View File

@ -122,7 +122,7 @@ template <class T> int ValueRecorder<T>::add_handle(T h, bool make_findable) {
template <class T> int ValueRecorder<T>::maybe_find_index(T h) {
debug_only(_find_index_calls++);
DEBUG_ONLY(_find_index_calls++);
assert(!_complete, "cannot allocate more elements after size query");
maybe_initialize();
if (h == nullptr) return null_index;
@ -134,7 +134,7 @@ template <class T> int ValueRecorder<T>::maybe_find_index(T h) {
return -1; // We know this handle is completely new.
}
if (cindex >= first_index && _handles->at(cindex - first_index) == h) {
debug_only(_hit_indexes++);
DEBUG_ONLY(_hit_indexes++);
return cindex;
}
if (!_indexes->cache_location_collision(cloc)) {
@ -151,7 +151,7 @@ template <class T> int ValueRecorder<T>::maybe_find_index(T h) {
if (cloc != nullptr) {
_indexes->set_cache_location_index(cloc, findex);
}
debug_only(_missed_indexes++);
DEBUG_ONLY(_missed_indexes++);
return findex;
}
}

View File

@ -78,7 +78,7 @@ relocInfo* relocInfo::finish_prefix(short* prefix_limit) {
assert(prefix_limit >= p, "must be a valid span of data");
int plen = checked_cast<int>(prefix_limit - p);
if (plen == 0) {
debug_only(_value = 0xFFFF);
DEBUG_ONLY(_value = 0xFFFF);
return this; // no data: remove self completely
}
if (plen == 1 && fits_into_immediate(p[0])) {
@ -342,7 +342,7 @@ address Relocation::old_addr_for(address newa,
address Relocation::new_addr_for(address olda,
const CodeBuffer* src, CodeBuffer* dest) {
debug_only(const CodeBuffer* src0 = src);
DEBUG_ONLY(const CodeBuffer* src0 = src);
int sect = CodeBuffer::SECT_NONE;
// Look for olda in the source buffer, and all previous incarnations
// if the source buffer has been expanded.

View File

@ -574,7 +574,7 @@ class RelocIterator : public StackObj {
void set_has_current(bool b) {
_datalen = !b ? -1 : 0;
debug_only(_data = nullptr);
DEBUG_ONLY(_data = nullptr);
}
void set_current(relocInfo& ri) {
_current = &ri;

View File

@ -176,14 +176,14 @@ void StubQueue::commit(int committed_code_size) {
_queue_end += committed_size;
_number_of_stubs++;
if (_mutex != nullptr) _mutex->unlock();
debug_only(stub_verify(s);)
DEBUG_ONLY(stub_verify(s);)
}
void StubQueue::remove_first() {
if (number_of_stubs() == 0) return;
Stub* s = first();
debug_only(stub_verify(s);)
DEBUG_ONLY(stub_verify(s);)
stub_finalize(s);
_queue_begin += stub_size(s);
assert(_queue_begin <= _buffer_limit, "sanity check");
@ -210,7 +210,7 @@ void StubQueue::remove_first(int n) {
void StubQueue::remove_all(){
debug_only(verify();)
DEBUG_ONLY(verify();)
remove_first(number_of_stubs());
assert(number_of_stubs() == 0, "sanity check");
}

View File

@ -326,7 +326,7 @@ void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
assert(reg->value() < _locs_length, "too big reg value for stack size");
assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
debug_only( _locs_used[reg->value()] = x; )
DEBUG_ONLY( _locs_used[reg->value()] = x; )
OopMapValue o(reg, x, optional);
o.write_on(write_stream());
@ -511,7 +511,7 @@ void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map)
// Any reg might be saved by a safepoint handler (see generate_handler_blob).
assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id),
"already updated this map; do not 'update' it twice!" );
debug_only(reg_map->_update_for_id = fr->id());
DEBUG_ONLY(reg_map->_update_for_id = fr->id());
// Check if caller must update oop argument
assert((reg_map->include_argument_oops() ||

View File

@ -162,7 +162,7 @@ class OopMap: public ResourceObj {
bool _has_derived_oops;
CompressedWriteStream* _write_stream;
debug_only( OopMapValue::oop_types* _locs_used; int _locs_length;)
DEBUG_ONLY( OopMapValue::oop_types* _locs_used; int _locs_length;)
// Accessors
int omv_count() const { return _omv_count; }

View File

@ -121,7 +121,7 @@ void ObjectStartArray::update_for_block_work(HeapWord* blk_start,
assert(start_entry_for_region > end_entry, "Sanity check");
}
debug_only(verify_for_block(blk_start, blk_end);)
DEBUG_ONLY(verify_for_block(blk_start, blk_end);)
}
void ObjectStartArray::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {

View File

@ -1629,7 +1629,7 @@ void PSParallelCompact::forward_to_new_addr() {
} task(nworkers);
ParallelScavengeHeap::heap()->workers().run_task(&task);
debug_only(verify_forward();)
DEBUG_ONLY(verify_forward();)
}
#ifdef ASSERT

View File

@ -45,7 +45,7 @@ void PSPromotionLAB::initialize(MemRegion lab) {
// We can be initialized to a zero size!
if (free() > 0) {
if (ZapUnusedHeapArea) {
debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
DEBUG_ONLY(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord));
}
// NOTE! We need to allow space for a filler object.

View File

@ -55,7 +55,7 @@ class PSPromotionLAB : public CHeapObj<mtGC> {
void set_end(HeapWord* value) { _end = value; }
// The shared initialize code invokes this.
debug_only(virtual bool lab_is_valid(MemRegion lab) { return false; });
DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab) { return false; });
PSPromotionLAB() : _top(nullptr), _bottom(nullptr), _end(nullptr), _state(zero_size) { }
@ -95,7 +95,7 @@ class PSYoungPromotionLAB : public PSPromotionLAB {
// Not MT safe
inline HeapWord* allocate(size_t size);
debug_only(virtual bool lab_is_valid(MemRegion lab);)
DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab);)
};
class PSOldPromotionLAB : public PSPromotionLAB {
@ -127,7 +127,7 @@ class PSOldPromotionLAB : public PSPromotionLAB {
return nullptr;
}
debug_only(virtual bool lab_is_valid(MemRegion lab));
DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab));
};
#endif // SHARE_GC_PARALLEL_PSPROMOTIONLAB_HPP

View File

@ -155,7 +155,7 @@ void SerialBlockOffsetTable::update_for_block_work(HeapWord* blk_start,
assert(start_card_for_region > end_card, "Sanity check");
}
debug_only(verify_for_block(blk_start, blk_end);)
DEBUG_ONLY(verify_for_block(blk_start, blk_end);)
}
HeapWord* SerialBlockOffsetTable::block_start_reaching_into_card(const void* addr) const {

View File

@ -179,7 +179,7 @@ protected:
virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
// Verification functions
debug_only(static void check_for_valid_allocation_state();)
DEBUG_ONLY(static void check_for_valid_allocation_state();)
public:
enum Name {

View File

@ -82,7 +82,7 @@ void HSpaceCounters::update_all(size_t capacity, size_t used) {
update_used(used);
}
debug_only(
DEBUG_ONLY(
// for security reasons, we do not allow arbitrary reads from
// the counters as they may live in shared memory.
jlong HSpaceCounters::used() {

View File

@ -56,7 +56,7 @@ class HSpaceCounters: public CHeapObj<mtGC> {
void update_all(size_t capacity, size_t used);
debug_only(
DEBUG_ONLY(
// for security reasons, we do not allow arbitrary reads from
// the counters as they may live in shared memory.
jlong used();

View File

@ -145,7 +145,7 @@ void MemAllocator::Allocation::verify_before() {
// not take out a lock if from tlab, so clear here.
JavaThread* THREAD = _thread; // For exception macros.
assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
debug_only(check_for_valid_allocation_state());
DEBUG_ONLY(check_for_valid_allocation_state());
assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed");
}
@ -327,7 +327,7 @@ HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
}
// Allocation of an oop can always invoke a safepoint.
debug_only(allocation._thread->check_for_valid_safepoint_state());
DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state());
if (UseTLAB) {
// Try refilling the TLAB and allocating the object in it.

View File

@ -131,13 +131,13 @@ bool ScavengableNMethods::has_scavengable_oops(nmethod* nm) {
void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) {
assert_locked_or_safepoint(CodeCache_lock);
debug_only(mark_on_list_nmethods());
DEBUG_ONLY(mark_on_list_nmethods());
nmethod* prev = nullptr;
nmethod* cur = _head;
while (cur != nullptr) {
ScavengableNMethodsData data = gc_data(cur);
debug_only(data.clear_marked());
DEBUG_ONLY(data.clear_marked());
assert(data.on_list(), "else shouldn't be on this list");
if (cl != nullptr) {
@ -156,7 +156,7 @@ void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) {
}
// Check for stray marks.
debug_only(verify_nmethods());
DEBUG_ONLY(verify_nmethods());
}
void ScavengableNMethods::prune_nmethods_not_into_young() {
@ -166,13 +166,13 @@ void ScavengableNMethods::prune_nmethods_not_into_young() {
void ScavengableNMethods::prune_unlinked_nmethods() {
assert_locked_or_safepoint(CodeCache_lock);
debug_only(mark_on_list_nmethods());
DEBUG_ONLY(mark_on_list_nmethods());
nmethod* prev = nullptr;
nmethod* cur = _head;
while (cur != nullptr) {
ScavengableNMethodsData data = gc_data(cur);
debug_only(data.clear_marked());
DEBUG_ONLY(data.clear_marked());
assert(data.on_list(), "else shouldn't be on this list");
nmethod* const next = data.next();
@ -187,7 +187,7 @@ void ScavengableNMethods::prune_unlinked_nmethods() {
}
// Check for stray marks.
debug_only(verify_nmethods());
DEBUG_ONLY(verify_nmethods());
}
// Walk the list of methods which might contain oops to the java heap.

View File

@ -896,7 +896,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo
uint gc_state_idx = Compile::AliasIdxRaw;
const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
DEBUG_ONLY(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED)));

View File

@ -190,11 +190,11 @@ void ShenandoahCollectionSet::print_on(outputStream* out) const {
byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()),
byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
debug_only(size_t regions = 0;)
DEBUG_ONLY(size_t regions = 0;)
for (size_t index = 0; index < _heap->num_regions(); index ++) {
if (is_in(index)) {
_heap->get_region(index)->print_on(out);
debug_only(regions ++;)
DEBUG_ONLY(regions ++;)
}
}
assert(regions == count(), "Must match");

View File

@ -309,14 +309,14 @@ private:
volatile jint _claimed_index;
shenandoah_padding(1);
debug_only(uint _reserved; )
DEBUG_ONLY(uint _reserved; )
public:
using GenericTaskQueueSet<T, MT>::size;
public:
ParallelClaimableQueueSet(int n) : GenericTaskQueueSet<T, MT>(n), _claimed_index(0) {
debug_only(_reserved = 0; )
DEBUG_ONLY(_reserved = 0; )
}
void clear_claimed() { _claimed_index = 0; }
@ -326,10 +326,10 @@ public:
void reserve(uint n) {
assert(n <= size(), "Sanity");
_claimed_index = (jint)n;
debug_only(_reserved = n;)
DEBUG_ONLY(_reserved = n;)
}
debug_only(uint get_reserved() const { return (uint)_reserved; })
DEBUG_ONLY(uint get_reserved() const { return (uint)_reserved; })
};
template <class T, MemTag MT>

View File

@ -311,7 +311,7 @@ void OopMapCacheEntry::deallocate_bit_mask() {
assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
"This bit mask should not be in the resource area");
FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
debug_only(_bit_mask[0] = 0;)
DEBUG_ONLY(_bit_mask[0] = 0;)
}
}

View File

@ -504,7 +504,7 @@ void JfrCheckpointManager::begin_epoch_shift() {
void JfrCheckpointManager::end_epoch_shift() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();)
DEBUG_ONLY(const u1 current_epoch = JfrTraceIdEpoch::current();)
JfrTraceIdEpoch::end_epoch_shift();
assert(current_epoch != JfrTraceIdEpoch::current(), "invariant");
JfrStringPool::on_epoch_shift();

View File

@ -398,7 +398,7 @@ static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const
#endif // ASSERT
BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
debug_only(assert_flush_precondition(cur, used, native, t);)
DEBUG_ONLY(assert_flush_precondition(cur, used, native, t);)
const u1* const cur_pos = cur->pos();
req += used;
// requested size now encompass the outstanding used size
@ -407,7 +407,7 @@ BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native,
}
BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
DEBUG_ONLY(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
// A flush is needed before memmove since a non-large buffer is thread stable
// (thread local). The flush will not modify memory in addresses above pos()
// which is where the "used / uncommitted" data resides. It is therefore both
@ -450,7 +450,7 @@ static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
}
BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
DEBUG_ONLY(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
// Can the "regular" buffer (now shelved) accommodate the requested size?
BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
assert(shelved != nullptr, "invariant");
@ -480,7 +480,7 @@ static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_inst
// even though it might be smaller than the requested size.
// Caller needs to ensure if the size was successfully accommodated.
BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
debug_only(assert_provision_large_precondition(cur, used, req, t);)
DEBUG_ONLY(assert_provision_large_precondition(cur, used, req, t);)
assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant");
BufferPtr const buffer = acquire_large(req, t);
if (buffer == nullptr) {

View File

@ -83,7 +83,7 @@ static void hook_memory_allocation(const char* allocation, size_t alloc_size) {
vm_exit_out_of_memory(alloc_size, OOM_MALLOC_ERROR, "AllocateHeap");
}
}
debug_only(add(alloc_size));
DEBUG_ONLY(add(alloc_size));
}
void JfrCHeapObj::on_memory_allocation(const void* allocation, size_t size) {
@ -111,12 +111,12 @@ void* JfrCHeapObj::operator new [](size_t size, const std::nothrow_t& nothrow_c
}
void JfrCHeapObj::operator delete(void* p, size_t size) {
debug_only(hook_memory_deallocation(size);)
DEBUG_ONLY(hook_memory_deallocation(size);)
CHeapObj<mtTracing>::operator delete(p);
}
void JfrCHeapObj::operator delete[](void* p, size_t size) {
debug_only(hook_memory_deallocation(size);)
DEBUG_ONLY(hook_memory_deallocation(size);)
CHeapObj<mtTracing>::operator delete[](p);
}
@ -127,7 +127,7 @@ char* JfrCHeapObj::realloc_array(char* old, size_t size) {
}
void JfrCHeapObj::free(void* p, size_t size) {
debug_only(hook_memory_deallocation(size);)
DEBUG_ONLY(hook_memory_deallocation(size);)
FreeHeap(p);
}

View File

@ -205,7 +205,7 @@ void JfrDoublyLinkedList<T>::append_list(T* const head_node, T* const tail_node,
}
*lt = tail_node;
const T* node = head_node;
debug_only(validate_count_param(node, count);)
DEBUG_ONLY(validate_count_param(node, count);)
_count += count;
assert(tail() == tail_node, "invariant");
assert(in_list(tail_node), "not in list error");

View File

@ -45,7 +45,7 @@ class ExclusiveAccessAssert {
template <typename Adapter, typename AP, typename AccessAssert = ExclusiveAccessAssert>
class MemoryWriterHost : public StorageHost<Adapter, AP> {
debug_only(AccessAssert _access;)
DEBUG_ONLY(AccessAssert _access;)
public:
typedef typename Adapter::StorageType StorageType;
protected:
@ -53,7 +53,7 @@ class MemoryWriterHost : public StorageHost<Adapter, AP> {
MemoryWriterHost(StorageType* storage, Thread* thread);
MemoryWriterHost(StorageType* storage, size_t size);
MemoryWriterHost(Thread* thread);
debug_only(bool is_acquired() const;)
DEBUG_ONLY(bool is_acquired() const;)
public:
void acquire();
void release();

View File

@ -52,18 +52,18 @@ inline MemoryWriterHost<Adapter, AP, AccessAssert>::MemoryWriterHost(Thread* thr
template <typename Adapter, typename AP, typename AccessAssert>
inline void MemoryWriterHost<Adapter, AP, AccessAssert>::acquire() {
debug_only(_access.acquire();)
DEBUG_ONLY(_access.acquire();)
if (!this->is_valid()) {
this->flush();
}
debug_only(is_acquired();)
DEBUG_ONLY(is_acquired();)
}
template <typename Adapter, typename AP, typename AccessAssert>
inline void MemoryWriterHost<Adapter, AP, AccessAssert>::release() {
debug_only(is_acquired();)
DEBUG_ONLY(is_acquired();)
StorageHost<Adapter, AP>::release();
debug_only(_access.release();)
DEBUG_ONLY(_access.release();)
}
#ifdef ASSERT

View File

@ -173,7 +173,7 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) {
ThreadInVMfromNative __tiv(thread); \
HandleMarkCleaner __hm(thread); \
JavaThread* THREAD = thread; \
debug_only(VMNativeEntryWrapper __vew;)
DEBUG_ONLY(VMNativeEntryWrapper __vew;)
// Native method block that transitions current thread to '_thread_in_vm'.
// Note: CompilerThreadCanCallJava must precede JVMCIENV_FROM_JNI so that

View File

@ -59,17 +59,17 @@ class ConfigurationLock : public StackObj {
private:
// Semaphore used as lock
static Semaphore _semaphore;
debug_only(static intx _locking_thread_id;)
DEBUG_ONLY(static intx _locking_thread_id;)
public:
ConfigurationLock() {
_semaphore.wait();
debug_only(_locking_thread_id = os::current_thread_id());
DEBUG_ONLY(_locking_thread_id = os::current_thread_id());
}
~ConfigurationLock() {
debug_only(_locking_thread_id = -1);
DEBUG_ONLY(_locking_thread_id = -1);
_semaphore.signal();
}
debug_only(static bool current_thread_has_lock();)
DEBUG_ONLY(static bool current_thread_has_lock();)
};
Semaphore ConfigurationLock::_semaphore(1);

View File

@ -165,8 +165,8 @@ uintx Universe::_the_array_interfaces_bitmap = 0;
uintx Universe::_the_empty_klass_bitmap = 0;
// These variables are guarded by FullGCALot_lock.
debug_only(OopHandle Universe::_fullgc_alot_dummy_array;)
debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
DEBUG_ONLY(OopHandle Universe::_fullgc_alot_dummy_array;)
DEBUG_ONLY(int Universe::_fullgc_alot_dummy_next = 0;)
// Heap
int Universe::_verify_count = 0;

View File

@ -117,8 +117,8 @@ class Universe: AllStatic {
static intptr_t _non_oop_bits;
// array of dummy objects used with +FullGCAlot
debug_only(static OopHandle _fullgc_alot_dummy_array;)
debug_only(static int _fullgc_alot_dummy_next;)
DEBUG_ONLY(static OopHandle _fullgc_alot_dummy_array;)
DEBUG_ONLY(static int _fullgc_alot_dummy_next;)
// Compiler/dispatch support
static int _base_vtable_size; // Java vtbl size of klass Object (in words)
@ -357,7 +357,7 @@ class Universe: AllStatic {
// Change the number of dummy objects kept reachable by the full gc dummy
// array; this should trigger relocation in a sliding compaction collector.
debug_only(static bool release_fullgc_alot_dummy();)
DEBUG_ONLY(static bool release_fullgc_alot_dummy();)
// The non-oop pattern (see compiledIC.hpp, etc)
static void* non_oop_word();
static bool contains_non_oop_word(void* p);

View File

@ -200,7 +200,7 @@ static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre
return true;
}
debug_only(warning(
DEBUG_ONLY(warning(
"INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
" size=%zu, executable=%d) failed",
p2i(start), p2i(start + size), size, executable);)
@ -371,7 +371,7 @@ void VirtualSpace::shrink_by(size_t size) {
aligned_upper_new_high + upper_needs <= upper_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
debug_only(warning("os::uncommit_memory failed"));
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_upper_high -= upper_needs;
@ -382,7 +382,7 @@ void VirtualSpace::shrink_by(size_t size) {
aligned_middle_new_high + middle_needs <= middle_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
debug_only(warning("os::uncommit_memory failed"));
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_middle_high -= middle_needs;
@ -393,7 +393,7 @@ void VirtualSpace::shrink_by(size_t size) {
aligned_lower_new_high + lower_needs <= lower_high_boundary(),
"must not shrink beyond region");
if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
debug_only(warning("os::uncommit_memory failed"));
DEBUG_ONLY(warning("os::uncommit_memory failed"));
return;
} else {
_lower_high -= lower_needs;

View File

@ -76,7 +76,7 @@ class ConstantPoolCache: public MetaspaceObj {
Array<ResolvedMethodEntry>* _resolved_method_entries;
// Sizing
debug_only(friend class ClassVerifier;)
DEBUG_ONLY(friend class ClassVerifier;)
public:
// specific but defiinitions for ldc

View File

@ -2027,7 +2027,7 @@ void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, i
int target_bci = rtEnt->jsrs(i);
// Make sure a jrtRet does not set the changed bit for dead basicblock.
BasicBlock* jsr_bb = get_basic_block_containing(target_bci - 1);
debug_only(BasicBlock* target_bb = &jsr_bb[1];)
DEBUG_ONLY(BasicBlock* target_bb = &jsr_bb[1];)
assert(target_bb == get_basic_block_at(target_bci), "wrong calc. of successor basicblock");
bool alive = jsr_bb->is_alive();
if (TraceNewOopMapGeneration) {

View File

@ -1321,7 +1321,7 @@ void InstanceKlass::initialize_impl(TRAPS) {
// Step 9
if (!HAS_PENDING_EXCEPTION) {
set_initialization_state_and_notify(fully_initialized, CHECK);
debug_only(vtable().verify(tty, true);)
DEBUG_ONLY(vtable().verify(tty, true);)
}
else {
// Step 10 and 11
@ -4191,7 +4191,7 @@ JNIid::JNIid(Klass* holder, int offset, JNIid* next) {
_holder = holder;
_offset = offset;
_next = next;
debug_only(_is_static_field_id = false;)
DEBUG_ONLY(_is_static_field_id = false;)
}

View File

@ -71,10 +71,10 @@ void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
// Check that we have the right class
debug_only(static bool first_time = true);
DEBUG_ONLY(static bool first_time = true);
assert(k == vmClasses::Reference_klass() && first_time,
"Invalid update of maps");
debug_only(first_time = false);
DEBUG_ONLY(first_time = false);
assert(ik->nonstatic_oop_map_count() == 1, "just checking");
OopMapBlock* map = ik->start_of_nonstatic_oop_maps();

View File

@ -678,7 +678,7 @@ void Klass::append_to_sibling_list() {
if (Universe::is_fully_initialized()) {
assert_locked_or_safepoint(Compile_lock);
}
debug_only(verify();)
DEBUG_ONLY(verify();)
// add ourselves to superklass' subklass list
InstanceKlass* super = superklass();
if (super == nullptr) return; // special case: class Object
@ -703,7 +703,7 @@ void Klass::append_to_sibling_list() {
return;
}
}
debug_only(verify();)
DEBUG_ONLY(verify();)
}
void Klass::clean_subklass() {

View File

@ -1256,7 +1256,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) {
// or adapter that it points to is still live and valid.
// This function must not hit a safepoint!
address Method::verified_code_entry() {
debug_only(NoSafepointVerifier nsv;)
DEBUG_ONLY(NoSafepointVerifier nsv;)
assert(_from_compiled_entry != nullptr, "must be set");
return _from_compiled_entry;
}

View File

@ -42,7 +42,7 @@ void Block_Array::grow( uint i ) {
if (i < Max()) {
return; // No need to grow
}
debug_only(_limit = i+1);
DEBUG_ONLY(_limit = i+1);
if( i < _size ) return;
if( !_size ) {
_size = 1;

View File

@ -48,7 +48,7 @@ struct Tarjan;
// allocation I do not need a destructor to reclaim storage.
class Block_Array : public ArenaObj {
uint _size; // allocated size, as opposed to formal limit
debug_only(uint _limit;) // limit to formal domain
DEBUG_ONLY(uint _limit;) // limit to formal domain
Arena *_arena; // Arena to allocate in
ReallocMark _nesting; // Safety checks for arena reallocation
protected:
@ -57,7 +57,7 @@ protected:
public:
Block_Array(Arena *a) : _size(OptoBlockListSize), _arena(a) {
debug_only(_limit=0);
DEBUG_ONLY(_limit=0);
_blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
for( int i = 0; i < OptoBlockListSize; i++ ) {
_blocks[i] = nullptr;
@ -69,7 +69,7 @@ public:
{ assert( i < Max(), "oob" ); return _blocks[i]; }
// Extend the mapping: index i maps to Block *n.
void map( uint i, Block *n ) { grow(i); _blocks[i] = n; }
uint Max() const { debug_only(return _limit); return _size; }
uint Max() const { DEBUG_ONLY(return _limit); return _size; }
};

View File

@ -191,7 +191,7 @@ void OopFlow::clone( OopFlow *flow, int max_size ) {
OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
Node **defs = NEW_ARENA_ARRAY(A,Node*,max_size+1);
debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
DEBUG_ONLY( memset(defs,0,(max_size+1)*sizeof(Node*)) );
OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
assert( &flow->_defs [OptoReg::Bad] == defs , "Ok to index at OptoReg::Bad" );
@ -209,7 +209,7 @@ static void clr_live_bit( int *live, int reg ) {
OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
int framesize = regalloc->_framesize;
int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
DEBUG_ONLY( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
memset(dup_check,0,OptoReg::stack0()) );
OopMap *omap = new OopMap( framesize, max_inarg_slot );
@ -351,7 +351,7 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i
} else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
// It's a callee-save value
assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
debug_only( dup_check[_callees[reg]]=1; )
DEBUG_ONLY( dup_check[_callees[reg]]=1; )
VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg]));
omap->set_callee_saved(r, callee);

View File

@ -266,8 +266,8 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) :
assert(method != nullptr, "must be valid call site");
_bci = InvocationEntryBci;
_reexecute = Reexecute_Undefined;
debug_only(_bci = -99); // random garbage value
debug_only(_map = (SafePointNode*)-1);
DEBUG_ONLY(_bci = -99); // random garbage value
DEBUG_ONLY(_map = (SafePointNode*)-1);
_caller = caller;
_depth = 1 + (caller == nullptr ? 0 : caller->depth());
_locoff = TypeFunc::Parms;
@ -281,7 +281,7 @@ JVMState::JVMState(int stack_size) :
_method(nullptr) {
_bci = InvocationEntryBci;
_reexecute = Reexecute_Undefined;
debug_only(_map = (SafePointNode*)-1);
DEBUG_ONLY(_map = (SafePointNode*)-1);
_caller = nullptr;
_depth = 1;
_locoff = TypeFunc::Parms;
@ -323,14 +323,14 @@ bool JVMState::same_calls_as(const JVMState* that) const {
//------------------------------debug_start------------------------------------
uint JVMState::debug_start() const {
debug_only(JVMState* jvmroot = of_depth(1));
DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
return of_depth(1)->locoff();
}
//-------------------------------debug_end-------------------------------------
uint JVMState::debug_end() const {
debug_only(JVMState* jvmroot = of_depth(1));
DEBUG_ONLY(JVMState* jvmroot = of_depth(1));
assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
return endoff();
}
@ -1465,7 +1465,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) {
void SafePointNode::pop_monitor() {
// Delete last monitor from debug info
debug_only(int num_before_pop = jvms()->nof_monitors());
DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors());
const int MonitorEdges = 2;
assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
int scloff = jvms()->scloff();

View File

@ -2233,7 +2233,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
// One unique input.
debug_only(Node* ident = Identity(phase));
DEBUG_ONLY(Node* ident = Identity(phase));
// The unique input must eventually be detected by the Identity call.
#ifdef ASSERT
if (ident != uin && !ident->is_top() && !must_wait_for_region_in_irreducible_loop(phase)) {

View File

@ -1323,7 +1323,7 @@ void PhaseChaitin::Simplify( ) {
bool bound = lrgs(lo_score)._is_bound;
// Find cheapest guy
debug_only( int lo_no_simplify=0; );
DEBUG_ONLY( int lo_no_simplify=0; );
for (uint i = _hi_degree; i; i = lrgs(i)._next) {
assert(!_ifg->_yanked->test(i), "");
// It's just vaguely possible to move hi-degree to lo-degree without
@ -1335,7 +1335,7 @@ void PhaseChaitin::Simplify( ) {
lo_score = i;
break;
}
debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
DEBUG_ONLY( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
double iscore = lrgs(i).score();
double iarea = lrgs(i)._area;
double icost = lrgs(i)._cost;
@ -1577,7 +1577,7 @@ uint PhaseChaitin::Select( ) {
// Remove neighbor colors
IndexSet *s = _ifg->neighbors(lidx);
debug_only(RegMask orig_mask = lrg->mask();)
DEBUG_ONLY(RegMask orig_mask = lrg->mask();)
if (!s->is_empty()) {
IndexSetIterator elements(s);
@ -1706,8 +1706,8 @@ uint PhaseChaitin::Select( ) {
ttyLocker ttyl;
tty->print("L%d spilling with neighbors: ", lidx);
s->dump();
debug_only(tty->print(" original mask: "));
debug_only(orig_mask.dump());
DEBUG_ONLY(tty->print(" original mask: "));
DEBUG_ONLY(orig_mask.dump());
dump_lrg(lidx);
}
#endif

View File

@ -82,11 +82,11 @@ public:
// set makes it not valid.
void set_degree( uint degree ) {
_eff_degree = degree;
debug_only(_degree_valid = 1;)
DEBUG_ONLY(_degree_valid = 1;)
assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
}
// Made a change that hammered degree
void invalid_degree() { debug_only(_degree_valid=0;) }
void invalid_degree() { DEBUG_ONLY(_degree_valid=0;) }
// Incrementally modify degree. If it was correct, it should remain correct
void inc_degree( uint mod ) {
_eff_degree += mod;
@ -128,15 +128,15 @@ public:
// count of bits in the current mask.
int get_invalid_mask_size() const { return _mask_size; }
const RegMask &mask() const { return _mask; }
void set_mask( const RegMask &rm ) { _mask = rm; debug_only(_msize_valid=0;)}
void AND( const RegMask &rm ) { _mask.AND(rm); debug_only(_msize_valid=0;)}
void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); debug_only(_msize_valid=0;)}
void Clear() { _mask.Clear() ; debug_only(_msize_valid=1); _mask_size = 0; }
void Set_All() { _mask.Set_All(); debug_only(_msize_valid=1); _mask_size = RegMask::CHUNK_SIZE; }
void set_mask( const RegMask &rm ) { _mask = rm; DEBUG_ONLY(_msize_valid=0;)}
void AND( const RegMask &rm ) { _mask.AND(rm); DEBUG_ONLY(_msize_valid=0;)}
void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); DEBUG_ONLY(_msize_valid=0;)}
void Clear() { _mask.Clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; }
void Set_All() { _mask.Set_All(); DEBUG_ONLY(_msize_valid=1); _mask_size = RegMask::CHUNK_SIZE; }
void Insert( OptoReg::Name reg ) { _mask.Insert(reg); debug_only(_msize_valid=0;) }
void Remove( OptoReg::Name reg ) { _mask.Remove(reg); debug_only(_msize_valid=0;) }
void clear_to_sets() { _mask.clear_to_sets(_num_regs); debug_only(_msize_valid=0;) }
void Insert( OptoReg::Name reg ) { _mask.Insert(reg); DEBUG_ONLY(_msize_valid=0;) }
void Remove( OptoReg::Name reg ) { _mask.Remove(reg); DEBUG_ONLY(_msize_valid=0;) }
void clear_to_sets() { _mask.clear_to_sets(_num_regs); DEBUG_ONLY(_msize_valid=0;) }
private:
// Number of registers this live range uses when it colors

View File

@ -474,7 +474,7 @@ void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_Lis
remove_useless_late_inlines( &_string_late_inlines, useful);
remove_useless_late_inlines( &_boxing_late_inlines, useful);
remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
debug_only(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
DEBUG_ONLY(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
}
// ============================================================================

View File

@ -4567,7 +4567,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
}
}
} else {
debug_only(n->dump();)
DEBUG_ONLY(n->dump();)
assert(false, "EA: unexpected node");
continue;
}

View File

@ -72,8 +72,8 @@ GraphKit::GraphKit()
{
_exceptions = nullptr;
set_map(nullptr);
debug_only(_sp = -99);
debug_only(set_bci(-99));
DEBUG_ONLY(_sp = -99);
DEBUG_ONLY(set_bci(-99));
}
@ -196,7 +196,7 @@ bool GraphKit::has_exception_handler() {
void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) {
assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again");
ex_map->add_req(ex_oop);
debug_only(verify_exception_state(ex_map));
DEBUG_ONLY(verify_exception_state(ex_map));
}
inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) {
@ -296,7 +296,7 @@ JVMState* GraphKit::transfer_exceptions_into_jvms() {
_map = clone_map();
_map->set_next_exception(nullptr);
clear_saved_ex_oop(_map);
debug_only(verify_map());
DEBUG_ONLY(verify_map());
} else {
// ...or created from scratch
JVMState* jvms = new (C) JVMState(_method, nullptr);
@ -672,7 +672,7 @@ ciInstance* GraphKit::builtin_throw_exception(Deoptimization::DeoptReason reason
//----------------------------PreserveJVMState---------------------------------
PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) {
debug_only(kit->verify_map());
DEBUG_ONLY(kit->verify_map());
_kit = kit;
_map = kit->map(); // preserve the map
_sp = kit->sp();
@ -780,7 +780,7 @@ void GraphKit::set_map_clone(SafePointNode* m) {
_map = m;
_map = clone_map();
_map->set_next_exception(nullptr);
debug_only(verify_map());
DEBUG_ONLY(verify_map());
}
@ -1537,7 +1537,7 @@ Node* GraphKit::memory(uint alias_idx) {
Node* GraphKit::reset_memory() {
Node* mem = map()->memory();
// do not use this node for any more parsing!
debug_only( map()->set_memory((Node*)nullptr) );
DEBUG_ONLY( map()->set_memory((Node*)nullptr) );
return _gvn.transform( mem );
}
@ -1574,7 +1574,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = nullptr; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
ld = _gvn.transform(ld);
@ -1602,7 +1602,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (unaligned) {

View File

@ -145,7 +145,7 @@ class GraphKit : public Phase {
_sp = jvms->sp();
_bci = jvms->bci();
_method = jvms->has_method() ? jvms->method() : nullptr; }
void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
void set_map(SafePointNode* m) { _map = m; DEBUG_ONLY(verify_map()); }
void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; }
void clean_stack(int from_sp); // clear garbage beyond from_sp to top
@ -226,14 +226,14 @@ class GraphKit : public Phase {
if (ex_map != nullptr) {
_exceptions = ex_map->next_exception();
ex_map->set_next_exception(nullptr);
debug_only(verify_exception_state(ex_map));
DEBUG_ONLY(verify_exception_state(ex_map));
}
return ex_map;
}
// Add an exception, using the given JVM state, without commoning.
void push_exception_state(SafePointNode* ex_map) {
debug_only(verify_exception_state(ex_map));
DEBUG_ONLY(verify_exception_state(ex_map));
ex_map->set_next_exception(_exceptions);
_exceptions = ex_map;
}

View File

@ -354,7 +354,7 @@ Node* IdealKit::load(Node* ctl,
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = nullptr; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access);
return transform(ld);
@ -366,7 +366,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
bool mismatched) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access);
if (mismatched) {

View File

@ -121,7 +121,7 @@ IndexSet::BitBlock *IndexSet::alloc_block_containing(uint element) {
// Add a BitBlock to the free list.
void IndexSet::free_block(uint i) {
debug_only(check_watch("free block", i));
DEBUG_ONLY(check_watch("free block", i));
assert(i < _max_blocks, "block index too large");
BitBlock *block = _blocks[i];
assert(block != &_empty_block, "cannot free the empty block");

View File

@ -359,7 +359,7 @@ void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj
// for this loop
if (TraceLoopLimitCheck) {
tty->print_cr("Counted Loop Limit Check generated:");
debug_only( bol->dump(2); )
DEBUG_ONLY( bol->dump(2); )
}
#endif
}

View File

@ -225,7 +225,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me
if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) {
// Can not bypass initialization of the instance
// we are looking.
debug_only(intptr_t offset;)
DEBUG_ONLY(intptr_t offset;)
assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity");
InitializeNode* init = alloc->as_Allocate()->initialization();
// We are looking for stored value, return Initialize node
@ -1328,7 +1328,7 @@ void PhaseMacroExpand::expand_allocate_common(
// No initial test, just fall into next case
assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
toobig_false = ctrl;
debug_only(slow_region = NodeSentinel);
DEBUG_ONLY(slow_region = NodeSentinel);
}
// If we are here there are several possibilities

View File

@ -128,7 +128,7 @@ Matcher::Matcher()
idealreg2mhdebugmask[Op_RegFlags] = nullptr;
idealreg2mhdebugmask[Op_RegVectMask] = nullptr;
debug_only(_mem_node = nullptr;) // Ideal memory node consumed by mach node
DEBUG_ONLY(_mem_node = nullptr;) // Ideal memory node consumed by mach node
}
//------------------------------warp_incoming_stk_arg------------------------
@ -1184,7 +1184,7 @@ Node *Matcher::xform( Node *n, int max_stack ) {
n->_idx);
C->set_node_notes_at(m->_idx, nn);
}
debug_only(match_alias_type(C, n, m));
DEBUG_ONLY(match_alias_type(C, n, m));
}
n = m; // n is now a new-space node
mstack.set_node(n);
@ -1591,7 +1591,7 @@ MachNode *Matcher::match_tree( const Node *n ) {
}
}
debug_only( _mem_node = save_mem_node; )
DEBUG_ONLY( _mem_node = save_mem_node; )
return m;
}
@ -1965,9 +1965,9 @@ void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* ma
assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
DEBUG_ONLY(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst(s, newrule, mem1) );
debug_only(_mem_node = save_mem_node;)
DEBUG_ONLY(_mem_node = save_mem_node;)
}
return;
}
@ -1979,7 +1979,7 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
if( s->_leaf->is_Load() ) {
Node *mem2 = s->_leaf->in(MemNode::Memory);
assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
DEBUG_ONLY( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
mem = mem2;
}
if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
@ -2023,9 +2023,9 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
// --> ReduceInst( newrule )
mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
DEBUG_ONLY(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
debug_only(_mem_node = save_mem_node;)
DEBUG_ONLY(_mem_node = save_mem_node;)
}
}
assert( mach->_opnds[num_opnds-1], "" );
@ -2056,7 +2056,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
if( s->_leaf->is_Load() ) {
assert( mem == (Node*)1, "multiple Memories being matched at once?" );
mem = s->_leaf->in(MemNode::Memory);
debug_only(_mem_node = s->_leaf;)
DEBUG_ONLY(_mem_node = s->_leaf;)
}
handle_precedence_edges(s->_leaf, mach);
@ -2085,9 +2085,9 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
// Reduce the instruction, and add a direct pointer from this
// machine instruction to the newly reduced one.
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
DEBUG_ONLY(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( kid, newrule, mem1 ) );
debug_only(_mem_node = save_mem_node;)
DEBUG_ONLY(_mem_node = save_mem_node;)
}
}
}

View File

@ -71,7 +71,7 @@ protected:
_unsafe_access(false),
_barrier_data(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
DEBUG_ONLY(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) :
Node(c0,c1,c2,c3),
@ -80,7 +80,7 @@ protected:
_unsafe_access(false),
_barrier_data(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
DEBUG_ONLY(_adr_type=at; adr_type();)
}
MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) :
Node(c0,c1,c2,c3,c4),
@ -89,7 +89,7 @@ protected:
_unsafe_access(false),
_barrier_data(0) {
init_class_id(Class_Mem);
debug_only(_adr_type=at; adr_type();)
DEBUG_ONLY(_adr_type=at; adr_type();)
}
virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; }
@ -273,7 +273,7 @@ public:
// Following method is copied from TypeNode:
void set_type(const Type* t) {
assert(t != nullptr, "sanity");
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
*(const Type**)&_type = t; // cast away const-ness
// If this node is in the hash table, make sure it doesn't need a rehash.
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
@ -1497,7 +1497,7 @@ class MergeMemStream : public StackObj {
MergeMemStream(MergeMemNode* mm) {
mm->iteration_setup();
init(mm);
debug_only(_cnt2 = 999);
DEBUG_ONLY(_cnt2 = 999);
}
// iterate in parallel over two merges
// only iterates through non-empty elements of mm2

View File

@ -71,7 +71,7 @@ public:
// Optimistic setting. Need additional checks in Node::is_dead_loop_safe().
if (con != TypeFunc::Memory || src->is_Start())
init_flags(Flag_is_dead_loop_safe);
debug_only(check_con());
DEBUG_ONLY(check_con());
}
const uint _con; // The field in the tuple we are projecting
const bool _is_io_use; // Used to distinguish between the projections

View File

@ -322,7 +322,7 @@ Node::Node(uint req)
#endif
{
assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
if (req == 0) {
_in = nullptr;
@ -341,7 +341,7 @@ Node::Node(Node *n0)
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
_in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this);
@ -354,7 +354,7 @@ Node::Node(Node *n0, Node *n1)
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -369,7 +369,7 @@ Node::Node(Node *n0, Node *n1, Node *n2)
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -386,7 +386,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -405,7 +405,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -427,7 +427,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -451,7 +451,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
, _parse_idx(_idx)
#endif
{
debug_only( verify_construction() );
DEBUG_ONLY( verify_construction() );
NOT_PRODUCT(nodes_created++);
assert( is_not_dead(n0), "can not use dead node");
assert( is_not_dead(n1), "can not use dead node");
@ -489,7 +489,7 @@ Node *Node::clone() const {
n->_outcnt = 0;
n->_outmax = 0;
// Unlock this guy, since he is not in any hash table.
debug_only(n->_hash_lock = 0);
DEBUG_ONLY(n->_hash_lock = 0);
// Walk the old node's input list to duplicate its edges
uint i;
for( i = 0; i < len(); i++ ) {
@ -525,11 +525,11 @@ Node *Node::clone() const {
n->set_idx(C->next_unique()); // Get new unique index as well
NOT_PRODUCT(n->_igv_idx = C->next_igv_idx());
debug_only( n->verify_construction() );
DEBUG_ONLY( n->verify_construction() );
NOT_PRODUCT(nodes_created++);
// Do not patch over the debug_idx of a clone, because it makes it
// impossible to break on the clone's moment of creation.
//debug_only( n->set_debug_idx( debug_idx() ) );
//DEBUG_ONLY( n->set_debug_idx( debug_idx() ) );
C->copy_node_notes_to(n, (Node*) this);
@ -942,7 +942,7 @@ void Node::disconnect_inputs(Compile* C) {
#endif
// Node::destruct requires all out edges be deleted first
// debug_only(destruct();) // no reuse benefit expected
// DEBUG_ONLY(destruct();) // no reuse benefit expected
C->record_dead_node(_idx);
}

View File

@ -427,11 +427,11 @@ public:
assert(_outcnt > 0,"oob");
#if OPTO_DU_ITERATOR_ASSERT
// Record that a change happened here.
debug_only(_last_del = _out[i]; ++_del_tick);
DEBUG_ONLY(_last_del = _out[i]; ++_del_tick);
#endif
_out[i] = _out[--_outcnt];
// Smash the old edge so it can't be used accidentally.
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
}
#ifdef ASSERT
@ -533,10 +533,10 @@ private:
} while (*--outp != n);
*outp = _out[--_outcnt];
// Smash the old edge so it can't be used accidentally.
debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
// Record that a change happened here.
#if OPTO_DU_ITERATOR_ASSERT
debug_only(_last_del = n; ++_del_tick);
DEBUG_ONLY(_last_del = n; ++_del_tick);
#endif
}
// Close gap after removing edge.
@ -593,7 +593,7 @@ public:
}
// Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
void swap_edges(uint i1, uint i2) {
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
// Def-Use info is unchanged
Node* n1 = in(i1);
Node* n2 = in(i2);
@ -1431,15 +1431,15 @@ class DUIterator : public DUIterator_Common {
#endif
DUIterator(const Node* node, int dummy_to_avoid_conversion)
{ _idx = 0; debug_only(sample(node)); }
{ _idx = 0; DEBUG_ONLY(sample(node)); }
public:
// initialize to garbage; clear _vdui to disable asserts
DUIterator()
{ /*initialize to garbage*/ debug_only(_vdui = false); }
{ /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
DUIterator(const DUIterator& that)
{ _idx = that._idx; debug_only(_vdui = false; reset(that)); }
{ _idx = that._idx; DEBUG_ONLY(_vdui = false; reset(that)); }
void operator++(int dummy_to_specify_postfix_op)
{ _idx++; VDUI_ONLY(verify_increment()); }
@ -1451,7 +1451,7 @@ class DUIterator : public DUIterator_Common {
{ VDUI_ONLY(verify_finish()); }
void operator=(const DUIterator& that)
{ _idx = that._idx; debug_only(reset(that)); }
{ _idx = that._idx; DEBUG_ONLY(reset(that)); }
};
DUIterator Node::outs() const
@ -1461,7 +1461,7 @@ DUIterator& Node::refresh_out_pos(DUIterator& i) const
bool Node::has_out(DUIterator& i) const
{ I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
Node* Node::out(DUIterator& i) const
{ I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
{ I_VDUI_ONLY(i, i.verify(this)); return DEBUG_ONLY(i._last=) _out[i._idx]; }
// Faster DU iterator. Disallows insertions into the out array.
@ -1496,15 +1496,15 @@ class DUIterator_Fast : public DUIterator_Common {
// Note: offset must be signed, since -1 is sometimes passed
DUIterator_Fast(const Node* node, ptrdiff_t offset)
{ _outp = node->_out + offset; debug_only(sample(node)); }
{ _outp = node->_out + offset; DEBUG_ONLY(sample(node)); }
public:
// initialize to garbage; clear _vdui to disable asserts
DUIterator_Fast()
{ /*initialize to garbage*/ debug_only(_vdui = false); }
{ /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); }
DUIterator_Fast(const DUIterator_Fast& that)
{ _outp = that._outp; debug_only(_vdui = false; reset(that)); }
{ _outp = that._outp; DEBUG_ONLY(_vdui = false; reset(that)); }
void operator++(int dummy_to_specify_postfix_op)
{ _outp++; VDUI_ONLY(verify(_node, true)); }
@ -1522,7 +1522,7 @@ class DUIterator_Fast : public DUIterator_Common {
}
void operator=(const DUIterator_Fast& that)
{ _outp = that._outp; debug_only(reset(that)); }
{ _outp = that._outp; DEBUG_ONLY(reset(that)); }
};
DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
@ -1533,7 +1533,7 @@ DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
}
Node* Node::fast_out(DUIterator_Fast& i) const {
I_VDUI_ONLY(i, i.verify(this));
return debug_only(i._last=) *i._outp;
return DEBUG_ONLY(i._last=) *i._outp;
}
@ -1591,7 +1591,7 @@ DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
}
Node* Node::last_out(DUIterator_Last& i) const {
I_VDUI_ONLY(i, i.verify(this));
return debug_only(i._last=) *i._outp;
return DEBUG_ONLY(i._last=) *i._outp;
}
#endif //OPTO_DU_ITERATOR_ASSERT
@ -2035,7 +2035,7 @@ protected:
public:
void set_type(const Type* t) {
assert(t != nullptr, "sanity");
debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
*(const Type**)&_type = t; // cast away const-ness
// If this node is in the hash table, make sure it doesn't need a rehash.
assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");

View File

@ -2971,7 +2971,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is
}
Node *kill = def; // Rename 'def' to more descriptive 'kill'
debug_only( def = (Node*)((intptr_t)0xdeadbeef); )
DEBUG_ONLY( def = (Node*)((intptr_t)0xdeadbeef); )
// After some number of kills there _may_ be a later def
Node *later_def = nullptr;

View File

@ -1835,10 +1835,10 @@ void Parse::merge_common(Parse::Block* target, int pnum) {
// Now _gvn will join that with the meet of current inputs.
// BOTTOM is never permissible here, 'cause pessimistically
// Phis of pointers cannot lose the basic pointer type.
debug_only(const Type* bt1 = phi->bottom_type());
DEBUG_ONLY(const Type* bt1 = phi->bottom_type());
assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
map()->set_req(j, _gvn.transform(phi));
debug_only(const Type* bt2 = phi->bottom_type());
DEBUG_ONLY(const Type* bt2 = phi->bottom_type());
assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
record_for_igvn(phi);
}
@ -1936,7 +1936,7 @@ void Parse::ensure_phis_everywhere() {
// Ensure a phi on all currently known memories.
for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) {
ensure_memory_phi(mms.alias_idx());
debug_only(mms.set_memory()); // keep the iterator happy
DEBUG_ONLY(mms.set_memory()); // keep the iterator happy
}
// Note: This is our only chance to create phis for memory slices.

View File

@ -123,7 +123,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
if( !k ) { // ?Miss?
NOT_PRODUCT( _lookup_misses++ );
_table[key] = n; // Insert into table!
debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
check_grow(); // Grow table if insert hit limit
return nullptr; // Miss!
}
@ -152,7 +152,7 @@ Node *NodeHash::hash_find_insert( Node *n ) {
NOT_PRODUCT( _lookup_misses++ );
key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel?
_table[key] = n; // Insert into table!
debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
check_grow(); // Grow table if insert hit limit
return nullptr; // Miss!
}
@ -188,7 +188,7 @@ void NodeHash::hash_insert( Node *n ) {
key = (key + stride) & (_max-1); // Stride through table w/ relative prime
}
_table[key] = n; // Insert into table!
debug_only(n->enter_hash_lock()); // Lock down the node while in the table.
DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table.
// if( conflict ) { n->dump(); }
}
@ -203,9 +203,9 @@ bool NodeHash::hash_delete( const Node *n ) {
}
uint key = hash & (_max-1);
uint stride = key | 0x01;
debug_only( uint counter = 0; );
DEBUG_ONLY( uint counter = 0; );
for( ; /* (k != nullptr) && (k != _sentinel) */; ) {
debug_only( counter++ );
DEBUG_ONLY( counter++ );
NOT_PRODUCT( _delete_probes++ );
k = _table[key]; // Get hashed value
if( !k ) { // Miss?
@ -215,7 +215,7 @@ bool NodeHash::hash_delete( const Node *n ) {
else if( n == k ) {
NOT_PRODUCT( _delete_hits++ );
_table[key] = _sentinel; // Hit! Label as deleted entry
debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
DEBUG_ONLY(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table.
return true;
}
else {
@ -257,7 +257,7 @@ void NodeHash::grow() {
for( uint i = 0; i < old_max; i++ ) {
Node *m = *old_table++;
if( !m || m == _sentinel ) continue;
debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table.
DEBUG_ONLY(m->exit_hash_lock()); // Unlock the node upon removal from old table.
hash_insert(m);
}
}
@ -289,7 +289,7 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) {
for( uint i = 0; i < max; ++i ) {
Node *n = at(i);
if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) {
debug_only(n->exit_hash_lock()); // Unlock the node when removed
DEBUG_ONLY(n->exit_hash_lock()); // Unlock the node when removed
_table[i] = sentinel_node; // Replace with placeholder
}
}

View File

@ -60,12 +60,12 @@ public:
// Get the register associated with the Node
OptoReg::Name get_reg_first( const Node *n ) const {
debug_only( if( n->_idx >= _node_regs_max_index ) n->dump(); );
DEBUG_ONLY( if( n->_idx >= _node_regs_max_index ) n->dump(); );
assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array");
return _node_regs[n->_idx].first();
}
OptoReg::Name get_reg_second( const Node *n ) const {
debug_only( if( n->_idx >= _node_regs_max_index ) n->dump(); );
DEBUG_ONLY( if( n->_idx >= _node_regs_max_index ) n->dump(); );
assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array");
return _node_regs[n->_idx].second();
}

View File

@ -1944,7 +1944,7 @@ address OptoRuntime::handle_exception_C(JavaThread* current) {
#ifndef PRODUCT
SharedRuntime::_find_handler_ctr++; // find exception handler
#endif
debug_only(NoHandleMark __hm;)
DEBUG_ONLY(NoHandleMark __hm;)
nmethod* nm = nullptr;
address handler_address = nullptr;
{

View File

@ -756,7 +756,7 @@ void Type::Initialize(Compile* current) {
// delete the current Type and return the existing Type. Otherwise stick the
// current Type in the Type table.
const Type *Type::hashcons(void) {
debug_only(base()); // Check the assertion in Type::base().
DEBUG_ONLY(base()); // Check the assertion in Type::base().
// Look up the Type in the Type dictionary
Dict *tdic = type_dict();
Type* old = (Type*)(tdic->Insert(this, this, false));

View File

@ -215,7 +215,7 @@ intptr_t jfieldIDWorkaround::encode_klass_hash(Klass* k, int offset) {
field_klass = super_klass; // super contains the field also
super_klass = field_klass->super();
}
debug_only(NoSafepointVerifier nosafepoint;)
DEBUG_ONLY(NoSafepointVerifier nosafepoint;)
uintptr_t klass_hash = field_klass->identity_hash();
return ((klass_hash & klass_mask) << klass_shift) | checked_mask_in_place;
} else {
@ -235,7 +235,7 @@ bool jfieldIDWorkaround::klass_hash_ok(Klass* k, jfieldID id) {
uintptr_t as_uint = (uintptr_t) id;
intptr_t klass_hash = (as_uint >> klass_shift) & klass_mask;
do {
debug_only(NoSafepointVerifier nosafepoint;)
DEBUG_ONLY(NoSafepointVerifier nosafepoint;)
// Could use a non-blocking query for identity_hash here...
if ((k->identity_hash() & klass_mask) == klass_hash)
return true;
@ -410,7 +410,7 @@ JNI_ENTRY(jfieldID, jni_FromReflectedField(JNIEnv *env, jobject field))
int offset = InstanceKlass::cast(k1)->field_offset( slot );
JNIid* id = InstanceKlass::cast(k1)->jni_id_for(offset);
assert(id != nullptr, "corrupt Field object");
debug_only(id->set_is_static_field_id();)
DEBUG_ONLY(id->set_is_static_field_id();)
// A jfieldID for a static field is a JNIid specifying the field holder and the offset within the Klass*
ret = jfieldIDWorkaround::to_static_jfieldID(id);
return ret;
@ -472,7 +472,7 @@ JNI_ENTRY(jclass, jni_GetSuperclass(JNIEnv *env, jclass sub))
// return mirror for superclass
Klass* super = k->java_super();
// super2 is the value computed by the compiler's getSuperClass intrinsic:
debug_only(Klass* super2 = ( k->is_array_klass()
DEBUG_ONLY(Klass* super2 = ( k->is_array_klass()
? vmClasses::Object_klass()
: k->super() ) );
assert(super == super2,
@ -906,7 +906,7 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
selected_method = m;
} else if (!m->has_itable_index()) {
// non-interface call -- for that little speed boost, don't handlize
debug_only(NoSafepointVerifier nosafepoint;)
DEBUG_ONLY(NoSafepointVerifier nosafepoint;)
// jni_GetMethodID makes sure class is linked and initialized
// so m should have a valid vtable index.
assert(m->valid_vtable_index(), "no valid vtable index");
@ -1995,9 +1995,9 @@ JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz,
// A jfieldID for a static field is a JNIid specifying the field holder and the offset within the Klass*
JNIid* id = fd.field_holder()->jni_id_for(fd.offset());
debug_only(id->set_is_static_field_id();)
DEBUG_ONLY(id->set_is_static_field_id();)
debug_only(id->verify(fd.field_holder()));
DEBUG_ONLY(id->verify(fd.field_holder()));
ret = jfieldIDWorkaround::to_static_jfieldID(id);
return ret;

View File

@ -2320,7 +2320,7 @@ struct JNINativeInterface_* jni_functions_check() {
// make sure the last pointer in the checked table is not null, indicating
// an addition to the JNINativeInterface_ structure without initializing
// it in the checked table.
debug_only(intptr_t *lastPtr = (intptr_t *)((char *)&checked_jni_NativeInterface + \
DEBUG_ONLY(intptr_t *lastPtr = (intptr_t *)((char *)&checked_jni_NativeInterface + \
sizeof(*unchecked_jni_NativeInterface) - sizeof(char *));)
assert(*lastPtr != 0,
"Mismatched JNINativeInterface tables, check for new entries");

View File

@ -444,7 +444,7 @@ struct jvmtiInterface_1_ jvmti</xsl:text>
<xsl:apply-templates select="." mode="functionid"/>
<xsl:text> , current_thread)</xsl:text>
<xsl:value-of select="$space"/>
<xsl:text>debug_only(VMNativeEntryWrapper __vew;)</xsl:text>
<xsl:text>DEBUG_ONLY(VMNativeEntryWrapper __vew;)</xsl:text>
<xsl:if test="count(@callbacksafe)=0 or not(contains(@callbacksafe,'safe'))">
<xsl:value-of select="$space"/>
<xsl:text>PreserveExceptionMark __em(this_thread);</xsl:text>

Some files were not shown because too many files have changed in this diff Show More