diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp index 2334cbdff24..2e53ecb8058 100644 --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -69,7 +69,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); return; } @@ -90,7 +90,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ blr(lr); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { @@ -103,7 +103,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void DivByZeroStub::emit_code(LIR_Assembler* ce) { @@ -274,7 +274,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } @@ -289,7 +289,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { } __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), rscratch2); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index f3b97d23ad3..4a0b557968c 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -1238,11 +1238,11 @@ encode %{ enc_class save_last_PC %{ // preserve mark address mark = __ inst_mark(); - debug_only(int off0 = __ offset()); + DEBUG_ONLY(int off0 = __ offset()); int ret_addr_offset = as_MachCall()->ret_addr_offset(); __ adr(LR, mark + ret_addr_offset); __ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset())); - debug_only(int off1 = __ offset()); + DEBUG_ONLY(int off1 = __ offset()); assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction"); // restore mark __ set_inst_mark(mark); @@ -1251,11 +1251,11 @@ encode %{ enc_class preserve_SP %{ // preserve mark address mark = __ inst_mark(); - debug_only(int off0 = __ offset()); + DEBUG_ONLY(int off0 = __ offset()); // FP is preserved across all calls, even compiled calls. // Use it to preserve SP in places where the callee might change the SP. __ mov(Rmh_SP_save, SP); - debug_only(int off1 = __ offset()); + DEBUG_ONLY(int off1 = __ offset()); assert(off1 - off0 == 4, "correct size prediction"); // restore mark __ set_inst_mark(mark); diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp index bca6c7ca30c..5683bc59d5c 100644 --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp @@ -59,7 +59,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); return; } // Pass the array index on stack because all registers must be preserved @@ -91,7 +91,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void DivByZeroStub::emit_code(LIR_Assembler* ce) { diff --git a/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp b/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp index c6cc0ce406e..52d71ca65c2 100644 --- a/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp +++ b/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp @@ -72,7 +72,7 @@ void NativeNMethodBarrier::verify() const { static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { address barrier_address = nm->code_begin() + nm->frame_complete_offset() - entry_barrier_bytes; NativeNMethodBarrier* barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); + DEBUG_ONLY(barrier->verify()); return barrier; } diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp index d4f5faa29a8..a390a6eeed4 100644 --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp @@ -74,7 +74,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bctrl(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); return; } @@ -98,7 +98,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bctrl(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); } @@ -115,7 +115,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bctrl(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); } @@ -156,7 +156,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { __ bctrl(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); } @@ -179,7 +179,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { __ bctrl(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); } @@ -193,7 +193,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { __ mtctr(R0); __ bctrl(); ce->add_call_info_here(_info); - debug_only( __ illtrap(); ) + DEBUG_ONLY( __ illtrap(); ) } @@ -441,7 +441,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ load_const_optimized(R0, _trap_request); // Pass trap request in R0. __ bctrl(); ce->add_call_info_here(_info); - debug_only(__ illtrap()); + DEBUG_ONLY(__ illtrap()); } diff --git a/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp b/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp index e4684613e25..8ce324a570b 100644 --- a/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp @@ -189,7 +189,7 @@ LIR_Opr FrameMap::_caller_save_fpu_regs[] = {}; FloatRegister FrameMap::nr2floatreg (int rnr) { assert(_init_done, "tables not initialized"); - debug_only(fpu_range_check(rnr);) + DEBUG_ONLY(fpu_range_check(rnr);) return _fpu_regs[rnr]; } diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp index 1b44a169e67..d3bb9cc3c04 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp @@ -108,7 +108,7 @@ static NativeNMethodBarrier* get_nmethod_barrier(nmethod* nm) { } auto barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); + DEBUG_ONLY(barrier->verify()); return barrier; } diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index d55521823ec..ea299181ca7 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -70,7 +70,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); return; } @@ -92,7 +92,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ rt_call(Runtime1::entry_for(stub_id), ra); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { @@ -105,7 +105,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void DivByZeroStub::emit_code(LIR_Assembler* ce) { @@ -258,7 +258,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { @@ -272,7 +272,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { } __ far_call(RuntimeAddress(Runtime1::entry_for(_stub))); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void ArrayCopyStub::emit_code(LIR_Assembler* ce) { diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp index 295e92bbc1b..d8f5fa57816 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.hpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.hpp @@ -300,7 +300,7 @@ public: inline NativeGeneralJump* nativeGeneralJump_at(address addr) { assert_cond(addr != nullptr); NativeGeneralJump* jump = (NativeGeneralJump*)(addr); - debug_only(jump->verify();) + DEBUG_ONLY(jump->verify();) return jump; } diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index c858a4b8cb1..430928a66ed 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -52,7 +52,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); return; } @@ -74,7 +74,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { @@ -88,7 +88,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void CounterOverflowStub::emit_code(LIR_Assembler* ce) { @@ -116,7 +116,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->emit_call_c(Runtime1::entry_for (C1StubId::throw_div0_exception_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { @@ -134,7 +134,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } // Note: pass object in Z_R1_scratch @@ -147,7 +147,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { diff --git a/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp b/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp index 9fa6da8341f..ddba445154a 100644 --- a/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp +++ b/src/hotspot/cpu/s390/c1_FrameMap_s390.cpp @@ -144,13 +144,13 @@ LIR_Opr FrameMap::_caller_save_fpu_regs[] = {}; // c1 rnr -> FloatRegister FloatRegister FrameMap::nr2floatreg (int rnr) { assert(_init_done, "tables not initialized"); - debug_only(fpu_range_check(rnr);) + DEBUG_ONLY(fpu_range_check(rnr);) return _fpu_rnr2reg[rnr]; } void FrameMap::map_float_register(int rnr, FloatRegister reg) { - debug_only(fpu_range_check(rnr);) - debug_only(fpu_range_check(reg->encoding());) + DEBUG_ONLY(fpu_range_check(rnr);) + DEBUG_ONLY(fpu_range_check(reg->encoding());) _fpu_rnr2reg[rnr] = reg; // mapping c1 regnr. -> FloatRegister _fpu_reg2rnr[reg->encoding()] = rnr; // mapping assembler encoding -> c1 regnr. } diff --git a/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp b/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp index 66ccc8de876..721995f41fe 100644 --- a/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp +++ b/src/hotspot/cpu/s390/c1_FrameMap_s390.hpp @@ -107,7 +107,7 @@ static int fpu_reg2rnr (FloatRegister reg) { assert(_init_done, "tables not initialized"); int c1rnr = _fpu_reg2rnr[reg->encoding()]; - debug_only(fpu_range_check(c1rnr);) + DEBUG_ONLY(fpu_range_check(c1rnr);) return c1rnr; } diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp index 85dcc0a4e73..88b3199e4e1 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp @@ -40,7 +40,7 @@ class NativeMethodBarrier: public NativeInstruction { address get_patchable_data_address() const { address inst_addr = get_barrier_start_address() + PATCHABLE_INSTRUCTION_OFFSET; - debug_only(Assembler::is_z_cfi(*((long*)inst_addr))); + DEBUG_ONLY(Assembler::is_z_cfi(*((long*)inst_addr))); return inst_addr + 2; } @@ -91,7 +91,7 @@ static NativeMethodBarrier* get_nmethod_barrier(nmethod* nm) { address barrier_address = nm->code_begin() + nm->frame_complete_offset() - NativeMethodBarrier::BARRIER_TOTAL_LENGTH; auto barrier = reinterpret_cast(barrier_address); - debug_only(barrier->verify()); + DEBUG_ONLY(barrier->verify()); return barrier; } diff --git a/src/hotspot/cpu/s390/interp_masm_s390.cpp b/src/hotspot/cpu/s390/interp_masm_s390.cpp index 4ba99eb9e88..09995334330 100644 --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp @@ -444,7 +444,7 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, // Useful if consumed previously by access via stackTop(). void InterpreterMacroAssembler::popx(int len) { add2reg(Z_esp, len*Interpreter::stackElementSize); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } // Get Address object of stack top. No checks. No pop. @@ -458,38 +458,38 @@ void InterpreterMacroAssembler::pop_i(Register r) { z_l(r, Interpreter::expr_offset_in_bytes(0), Z_esp); add2reg(Z_esp, Interpreter::stackElementSize); assert_different_registers(r, Z_R1_scratch); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } void InterpreterMacroAssembler::pop_ptr(Register r) { z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp); add2reg(Z_esp, Interpreter::stackElementSize); assert_different_registers(r, Z_R1_scratch); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } void InterpreterMacroAssembler::pop_l(Register r) { z_lg(r, Interpreter::expr_offset_in_bytes(0), Z_esp); add2reg(Z_esp, 2*Interpreter::stackElementSize); assert_different_registers(r, Z_R1_scratch); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } void InterpreterMacroAssembler::pop_f(FloatRegister f) { mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), false); add2reg(Z_esp, Interpreter::stackElementSize); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } void InterpreterMacroAssembler::pop_d(FloatRegister f) { mem2freg_opt(f, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)), true); add2reg(Z_esp, 2*Interpreter::stackElementSize); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); } void InterpreterMacroAssembler::push_i(Register r) { assert_different_registers(r, Z_R1_scratch); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); z_st(r, Address(Z_esp)); add2reg(Z_esp, -Interpreter::stackElementSize); } @@ -501,7 +501,7 @@ void InterpreterMacroAssembler::push_ptr(Register r) { void InterpreterMacroAssembler::push_l(Register r) { assert_different_registers(r, Z_R1_scratch); - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); int offset = -Interpreter::stackElementSize; z_stg(r, Address(Z_esp, offset)); clear_mem(Address(Z_esp), Interpreter::stackElementSize); @@ -509,13 +509,13 @@ void InterpreterMacroAssembler::push_l(Register r) { } void InterpreterMacroAssembler::push_f(FloatRegister f) { - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); freg2mem_opt(f, Address(Z_esp), false); add2reg(Z_esp, -Interpreter::stackElementSize); } void InterpreterMacroAssembler::push_d(FloatRegister d) { - debug_only(verify_esp(Z_esp, Z_R1_scratch)); + DEBUG_ONLY(verify_esp(Z_esp, Z_R1_scratch)); int offset = -Interpreter::stackElementSize; freg2mem_opt(d, Address(Z_esp, offset)); add2reg(Z_esp, 2 * offset); diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp index 3ea00681ada..7a4d7c6d6f3 100644 --- a/src/hotspot/cpu/x86/assembler_x86.cpp +++ b/src/hotspot/cpu/x86/assembler_x86.cpp @@ -801,7 +801,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { address ip = inst; bool is_64bit = false; - debug_only(bool has_disp32 = false); + DEBUG_ONLY(bool has_disp32 = false); int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn again_after_prefix: @@ -859,7 +859,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0x8A: // movb r, a case 0x8B: // movl r, a case 0x8F: // popl a - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); break; case 0x68: // pushq #32 @@ -898,10 +898,10 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0x8B: // movw r, a case 0x89: // movw a, r - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); break; case 0xC7: // movw a, #16 - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); tail_size = 2; // the imm16 break; case 0x0F: // several SSE/SSE2 variants @@ -923,7 +923,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0x69: // imul r, a, #32 case 0xC7: // movl a, #32(oop?) tail_size = 4; - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0x0F: // movx..., etc. @@ -932,11 +932,11 @@ address Assembler::locate_operand(address inst, WhichOperand which) { tail_size = 1; case 0x38: // ptest, pmovzxbw ip++; // skip opcode - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0x70: // pshufd r, r/a, #8 - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! case 0x73: // psrldq r, #8 tail_size = 1; break; @@ -961,7 +961,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush case 0xD6: // movq case 0xFE: // paddd - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); break; case 0xAD: // shrd r, a, %cl @@ -976,18 +976,18 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0xC1: // xaddl case 0xC7: // cmpxchg8 case REP16(0x90): // setcc a - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); // fall out of the switch to decode the address break; case 0xC4: // pinsrw r, a, #8 - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); case 0xC5: // pextrw r, r, #8 tail_size = 1; // the imm8 break; case 0xAC: // shrd r, a, #8 - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); tail_size = 1; // the imm8 break; @@ -1004,12 +1004,12 @@ address Assembler::locate_operand(address inst, WhichOperand which) { // also: orl, adcl, sbbl, andl, subl, xorl, cmpl // on 32bit in the case of cmpl, the imm might be an oop tail_size = 4; - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0x83: // addl a, #8; addl r, #8 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! tail_size = 1; break; @@ -1026,7 +1026,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0x9B: switch (0xFF & *ip++) { case 0xD9: // fnstcw a - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); break; default: ShouldNotReachHere(); @@ -1045,7 +1045,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0x87: // xchg r, a case REP4(0x38): // cmp... case 0x85: // test r, a - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0xA8: // testb rax, #8 @@ -1057,7 +1057,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0xC6: // movb a, #8 case 0x80: // cmpb a, #8 case 0x6B: // imul r, a, #8 - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! tail_size = 1; // the imm8 break; @@ -1109,7 +1109,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { break; } ip++; // skip opcode - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0x62: // EVEX_4bytes @@ -1135,7 +1135,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { break; } ip++; // skip opcode - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 @@ -1147,7 +1147,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a - debug_only(has_disp32 = true); + DEBUG_ONLY(has_disp32 = true); break; case 0xE8: // call rdisp32 @@ -1184,7 +1184,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) { default: ip++; } - debug_only(has_disp32 = true); // has both kinds of operands! + DEBUG_ONLY(has_disp32 = true); // has both kinds of operands! break; default: diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 73262b21365..7c0d3ff624d 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -68,7 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); return; } @@ -88,7 +88,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { @@ -101,7 +101,7 @@ void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } void DivByZeroStub::emit_code(LIR_Assembler* ce) { @@ -111,7 +111,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } @@ -399,7 +399,7 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } @@ -413,7 +413,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { } __ call(RuntimeAddress(Runtime1::entry_for(_stub))); ce->add_call_info_here(_info); - debug_only(__ should_not_reach_here()); + DEBUG_ONLY(__ should_not_reach_here()); } diff --git a/src/hotspot/cpu/x86/nativeInst_x86.cpp b/src/hotspot/cpu/x86/nativeInst_x86.cpp index 4ee741077dc..6e0e078b36e 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.cpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp @@ -140,7 +140,7 @@ bool NativeCall::is_displacement_aligned() { // Used in the runtime linkage of calls; see class CompiledIC. // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) void NativeCall::set_destination_mt_safe(address dest) { - debug_only(verify()); + DEBUG_ONLY(verify()); // Make sure patching code is locked. No two threads can patch at the same // time but one may be executing this code. assert(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint() || diff --git a/src/hotspot/cpu/x86/nativeInst_x86.hpp b/src/hotspot/cpu/x86/nativeInst_x86.hpp index d02387aa9ff..f6abadbeb5c 100644 --- a/src/hotspot/cpu/x86/nativeInst_x86.hpp +++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp @@ -503,7 +503,7 @@ class NativeGeneralJump: public NativeInstruction { inline NativeGeneralJump* nativeGeneralJump_at(address address) { NativeGeneralJump* jump = (NativeGeneralJump*)(address); - debug_only(jump->verify();) + DEBUG_ONLY(jump->verify();) return jump; } diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 954a53307c2..25cee7a3094 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -1847,14 +1847,14 @@ encode %{ %} enc_class clear_avx %{ - debug_only(int off0 = __ offset()); + DEBUG_ONLY(int off0 = __ offset()); if (generate_vzeroupper(Compile::current())) { // Clear upper bits of YMM registers to avoid AVX <-> SSE transition penalty // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. __ vzeroupper(); } - debug_only(int off1 = __ offset()); + DEBUG_ONLY(int off1 = __ offset()); assert(off1 - off0 == clear_avx_size(), "correct size prediction"); %} diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 22d3e1862b5..7061e5ac9d8 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -1662,7 +1662,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { } ThreadInVMfromNative tiv(jt); - debug_only(VMNativeEntryWrapper vew;) + DEBUG_ONLY(VMNativeEntryWrapper vew;) VM_LinuxDllLoad op(filename, ebuf, ebuflen); VMThread::execute(&op); diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp index 555ac832aae..e900d5695ae 100644 --- a/src/hotspot/os/posix/signals_posix.cpp +++ b/src/hotspot/os/posix/signals_posix.cpp @@ -147,7 +147,7 @@ public: }; -debug_only(static bool signal_sets_initialized = false); +DEBUG_ONLY(static bool signal_sets_initialized = false); static sigset_t unblocked_sigs, vm_sigs, preinstalled_sigs; // Our own signal handlers should never ever get replaced by a third party one. @@ -1547,7 +1547,7 @@ static void signal_sets_init() { if (!ReduceSignalUsage) { sigaddset(&vm_sigs, BREAK_SIGNAL); } - debug_only(signal_sets_initialized = true); + DEBUG_ONLY(signal_sets_initialized = true); } // These are signals that are unblocked while a thread is running Java. diff --git a/src/hotspot/share/asm/assembler.hpp b/src/hotspot/share/asm/assembler.hpp index 9abd3eb7171..961b5fab700 100644 --- a/src/hotspot/share/asm/assembler.hpp +++ b/src/hotspot/share/asm/assembler.hpp @@ -73,7 +73,7 @@ class Label; */ class Label { private: - enum { PatchCacheSize = 4 debug_only( +4 ) }; + enum { PatchCacheSize = 4 DEBUG_ONLY( +4 ) }; // _loc encodes both the binding state (via its sign) // and the binding locator (via its value) of a label. diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp index 917569e2be6..2a9b6215614 100644 --- a/src/hotspot/share/asm/codeBuffer.cpp +++ b/src/hotspot/share/asm/codeBuffer.cpp @@ -92,7 +92,7 @@ CodeBuffer::CodeBuffer(CodeBlob* blob) DEBUG_ONLY(: Scrubber(this, sizeof(*this) // Provide code buffer with meaningful name initialize_misc(blob->name()); initialize(blob->content_begin(), blob->content_size()); - debug_only(verify_section_allocation();) + DEBUG_ONLY(verify_section_allocation();) } void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { @@ -120,7 +120,7 @@ void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) { _insts.initialize_locs(locs_size / sizeof(relocInfo)); } - debug_only(verify_section_allocation();) + DEBUG_ONLY(verify_section_allocation();) } @@ -494,7 +494,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { prev_cs = cs; } - debug_only(dest_cs->_start = nullptr); // defeat double-initialization assert + DEBUG_ONLY(dest_cs->_start = nullptr); // defeat double-initialization assert dest_cs->initialize(buf+buf_offset, csize); dest_cs->set_end(buf+buf_offset+csize); assert(dest_cs->is_allocated(), "must always be allocated"); @@ -505,7 +505,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { // Done calculating sections; did it come out to the right end? assert(buf_offset == total_content_size(), "sanity"); - debug_only(dest->verify_section_allocation();) + DEBUG_ONLY(dest->verify_section_allocation();) } // Append an oop reference that keeps the class alive. @@ -939,11 +939,11 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) { cb.set_blob(nullptr); // Zap the old code buffer contents, to avoid mistakenly using them. - debug_only(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, + DEBUG_ONLY(Copy::fill_to_bytes(bxp->_total_start, bxp->_total_size, badCodeHeapFreeVal);) // Make certain that the new sections are all snugly inside the new blob. - debug_only(verify_section_allocation();) + DEBUG_ONLY(verify_section_allocation();) #ifndef PRODUCT _decode_begin = nullptr; // sanity diff --git a/src/hotspot/share/asm/codeBuffer.hpp b/src/hotspot/share/asm/codeBuffer.hpp index f855d41b181..b38cc74cc3b 100644 --- a/src/hotspot/share/asm/codeBuffer.hpp +++ b/src/hotspot/share/asm/codeBuffer.hpp @@ -121,8 +121,8 @@ class CodeSection { _locs_own = false; _scratch_emit = false; _skipped_instructions_size = 0; - debug_only(_index = -1); - debug_only(_outer = (CodeBuffer*)badAddress); + DEBUG_ONLY(_index = -1); + DEBUG_ONLY(_outer = (CodeBuffer*)badAddress); } void initialize_outer(CodeBuffer* outer, int8_t index) { @@ -535,7 +535,7 @@ class CodeBuffer: public StackObj DEBUG_ONLY(COMMA private Scrubber) { assert(code_start != nullptr, "sanity"); initialize_misc("static buffer"); initialize(code_start, code_size); - debug_only(verify_section_allocation();) + DEBUG_ONLY(verify_section_allocation();) } // (2) CodeBuffer referring to pre-allocated CodeBlob. diff --git a/src/hotspot/share/c1/c1_FrameMap.hpp b/src/hotspot/share/c1/c1_FrameMap.hpp index 4e4fde0cb4a..f10c4d3f226 100644 --- a/src/hotspot/share/c1/c1_FrameMap.hpp +++ b/src/hotspot/share/c1/c1_FrameMap.hpp @@ -109,19 +109,19 @@ class FrameMap : public CompilationResourceObj { static Register cpu_rnr2reg (int rnr) { assert(_init_done, "tables not initialized"); - debug_only(cpu_range_check(rnr);) + DEBUG_ONLY(cpu_range_check(rnr);) return _cpu_rnr2reg[rnr]; } static int cpu_reg2rnr (Register reg) { assert(_init_done, "tables not initialized"); - debug_only(cpu_range_check(reg->encoding());) + DEBUG_ONLY(cpu_range_check(reg->encoding());) return _cpu_reg2rnr[reg->encoding()]; } static void map_register(int rnr, Register reg) { - debug_only(cpu_range_check(rnr);) - debug_only(cpu_range_check(reg->encoding());) + DEBUG_ONLY(cpu_range_check(rnr);) + DEBUG_ONLY(cpu_range_check(reg->encoding());) _cpu_rnr2reg[rnr] = reg; _cpu_reg2rnr[reg->encoding()] = rnr; } diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 9d4b35024ed..0f87a90a417 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -1363,7 +1363,7 @@ int Runtime1::move_klass_patching(JavaThread* current) { // // NOTE: we are still in Java // - debug_only(NoHandleMark nhm;) + DEBUG_ONLY(NoHandleMark nhm;) { // Enter VM mode ResetNoHandleMark rnhm; @@ -1380,7 +1380,7 @@ int Runtime1::move_mirror_patching(JavaThread* current) { // // NOTE: we are still in Java // - debug_only(NoHandleMark nhm;) + DEBUG_ONLY(NoHandleMark nhm;) { // Enter VM mode ResetNoHandleMark rnhm; @@ -1397,7 +1397,7 @@ int Runtime1::move_appendix_patching(JavaThread* current) { // // NOTE: we are still in Java // - debug_only(NoHandleMark nhm;) + DEBUG_ONLY(NoHandleMark nhm;) { // Enter VM mode ResetNoHandleMark rnhm; diff --git a/src/hotspot/share/ci/ciTypeFlow.cpp b/src/hotspot/share/ci/ciTypeFlow.cpp index 234b4611ea1..6df090a7ce5 100644 --- a/src/hotspot/share/ci/ciTypeFlow.cpp +++ b/src/hotspot/share/ci/ciTypeFlow.cpp @@ -2924,7 +2924,7 @@ void ciTypeFlow::flow_types() { // Continue flow analysis until fixed point reached - debug_only(int max_block = _next_pre_order;) + DEBUG_ONLY(int max_block = _next_pre_order;) while (!work_list_empty()) { Block* blk = work_list_next(); diff --git a/src/hotspot/share/ci/ciTypeFlow.hpp b/src/hotspot/share/ci/ciTypeFlow.hpp index 92db6253aa0..adfb85dc17f 100644 --- a/src/hotspot/share/ci/ciTypeFlow.hpp +++ b/src/hotspot/share/ci/ciTypeFlow.hpp @@ -253,7 +253,7 @@ public: set_type_at_tos(type); } void pop() { - debug_only(set_type_at_tos(bottom_type())); + DEBUG_ONLY(set_type_at_tos(bottom_type())); _stack_size--; } ciType* pop_value() { diff --git a/src/hotspot/share/ci/ciUtilities.inline.hpp b/src/hotspot/share/ci/ciUtilities.inline.hpp index 05e73d8ca64..91f848368ac 100644 --- a/src/hotspot/share/ci/ciUtilities.inline.hpp +++ b/src/hotspot/share/ci/ciUtilities.inline.hpp @@ -37,7 +37,7 @@ ThreadInVMfromNative __tiv(thread); \ HandleMarkCleaner __hm(thread); \ JavaThread* THREAD = thread; /* For exception macros. */ \ - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) @@ -49,10 +49,10 @@ * [TODO] The NoHandleMark line does nothing but declare a function prototype \ * The NoHandkeMark constructor is NOT executed. If the ()'s are \ * removed, causes the NoHandleMark assert to trigger. \ - * debug_only(NoHandleMark __hm();) \ + * DEBUG_ONLY(NoHandleMark __hm();) \ */ \ JavaThread* THREAD = thread; /* For exception macros. */ \ - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) #define EXCEPTION_CONTEXT \ diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index 3e6246d4aee..757f0e72c8c 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -176,7 +176,7 @@ void ClassFileParser::parse_constant_pool_entries(const ClassFileStream* const s const ClassFileStream cfs1 = *stream; const ClassFileStream* const cfs = &cfs1; - debug_only(const u1* const old_current = stream->current();) + DEBUG_ONLY(const u1* const old_current = stream->current();) // Used for batching symbol allocations. const char* names[SymbolTable::symbol_alloc_batch_size]; @@ -5243,7 +5243,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, // it's official set_klass(ik); - debug_only(ik->verify();) + DEBUG_ONLY(ik->verify();) } void ClassFileParser::update_class_name(Symbol* new_class_name) { diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index a2ad1dce5e4..a39164232ae 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -4821,7 +4821,7 @@ bool java_lang_ClassLoader::isAncestor(oop loader, oop cl) { assert(is_instance(loader), "loader must be oop"); assert(cl == nullptr || is_instance(cl), "cl argument must be oop"); oop acl = loader; - debug_only(jint loop_count = 0); + DEBUG_ONLY(jint loop_count = 0); // This loop taken verbatim from ClassLoader.java: do { acl = parent(acl); diff --git a/src/hotspot/share/code/debugInfoRec.cpp b/src/hotspot/share/code/debugInfoRec.cpp index 02cd23407bf..8449f5d6929 100644 --- a/src/hotspot/share/code/debugInfoRec.cpp +++ b/src/hotspot/share/code/debugInfoRec.cpp @@ -140,7 +140,7 @@ DebugInformationRecorder::DebugInformationRecorder(OopRecorder* oop_recorder) add_new_pc_offset(PcDesc::lower_offset_limit); // sentinel record - debug_only(_recording_state = rs_null); + DEBUG_ONLY(_recording_state = rs_null); } @@ -159,7 +159,7 @@ void DebugInformationRecorder::add_safepoint(int pc_offset, OopMap* map) { add_new_pc_offset(pc_offset); assert(_recording_state == rs_null, "nesting of recording calls"); - debug_only(_recording_state = rs_safepoint); + DEBUG_ONLY(_recording_state = rs_safepoint); } void DebugInformationRecorder::add_non_safepoint(int pc_offset) { @@ -169,7 +169,7 @@ void DebugInformationRecorder::add_non_safepoint(int pc_offset) { add_new_pc_offset(pc_offset); assert(_recording_state == rs_null, "nesting of recording calls"); - debug_only(_recording_state = rs_non_safepoint); + DEBUG_ONLY(_recording_state = rs_non_safepoint); } void DebugInformationRecorder::add_new_pc_offset(int pc_offset) { @@ -360,7 +360,7 @@ void DebugInformationRecorder::dump_object_pool(GrowableArray* obje void DebugInformationRecorder::end_scopes(int pc_offset, bool is_safepoint) { assert(_recording_state == (is_safepoint? rs_safepoint: rs_non_safepoint), "nesting of recording calls"); - debug_only(_recording_state = rs_null); + DEBUG_ONLY(_recording_state = rs_null); // Try to compress away an equivalent non-safepoint predecessor. // (This only works because we have previously recognized redundant @@ -413,13 +413,13 @@ DebugToken* DebugInformationRecorder::create_monitor_values(GrowableArrayposition(); } int DebugInformationRecorder::pcs_size() { - debug_only(mark_recorders_frozen()); // mark it "frozen" for asserts + DEBUG_ONLY(mark_recorders_frozen()); // mark it "frozen" for asserts if (last_pc()->pc_offset() != PcDesc::upper_offset_limit) add_new_pc_offset(PcDesc::upper_offset_limit); return _pcs_length * sizeof(PcDesc); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 56ba76a806e..4576a348a4f 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -1122,7 +1122,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method, if (nm != nullptr) { // verify nmethod - debug_only(nm->verify();) // might block + DEBUG_ONLY(nm->verify();) // might block nm->log_new_nmethod(); } @@ -1269,7 +1269,7 @@ void nmethod::post_init() { finalize_relocations(); Universe::heap()->register_nmethod(this); - debug_only(Universe::heap()->verify_nmethod(this)); + DEBUG_ONLY(Universe::heap()->verify_nmethod(this)); CodeCache::commit(this); } @@ -1296,7 +1296,7 @@ nmethod::nmethod( _native_basic_lock_sp_offset(basic_lock_sp_offset) { { - debug_only(NoSafepointVerifier nsv;) + DEBUG_ONLY(NoSafepointVerifier nsv;) assert_locked_or_safepoint(CodeCache_lock); init_defaults(code_buffer, offsets); @@ -1437,7 +1437,7 @@ nmethod::nmethod( { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); { - debug_only(NoSafepointVerifier nsv;) + DEBUG_ONLY(NoSafepointVerifier nsv;) assert_locked_or_safepoint(CodeCache_lock); init_defaults(code_buffer, offsets); @@ -2802,7 +2802,7 @@ PcDesc* PcDescContainer::find_pc_desc_internal(address pc, bool approximate, add } // Take giant steps at first (4096, then 256, then 16, then 1) - const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); + const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ DEBUG_ONLY(-1); const int RADIX = (1 << LOG2_RADIX); for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { while ((mid = lower + step) < upper) { diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 7b19cf75a76..1c536a5d7e1 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -100,7 +100,7 @@ class PcDescCache { typedef PcDesc* PcDescPtr; volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found public: - PcDescCache() { debug_only(_pc_descs[0] = nullptr); } + PcDescCache() { DEBUG_ONLY(_pc_descs[0] = nullptr); } void init_to(PcDesc* initial_pc_desc); PcDesc* find_pc_desc(int pc_offset, bool approximate); void add_pc_desc(PcDesc* pc_desc); diff --git a/src/hotspot/share/code/oopRecorder.cpp b/src/hotspot/share/code/oopRecorder.cpp index af23bf12b43..c37651892cc 100644 --- a/src/hotspot/share/code/oopRecorder.cpp +++ b/src/hotspot/share/code/oopRecorder.cpp @@ -122,7 +122,7 @@ template int ValueRecorder::add_handle(T h, bool make_findable) { template int ValueRecorder::maybe_find_index(T h) { - debug_only(_find_index_calls++); + DEBUG_ONLY(_find_index_calls++); assert(!_complete, "cannot allocate more elements after size query"); maybe_initialize(); if (h == nullptr) return null_index; @@ -134,7 +134,7 @@ template int ValueRecorder::maybe_find_index(T h) { return -1; // We know this handle is completely new. } if (cindex >= first_index && _handles->at(cindex - first_index) == h) { - debug_only(_hit_indexes++); + DEBUG_ONLY(_hit_indexes++); return cindex; } if (!_indexes->cache_location_collision(cloc)) { @@ -151,7 +151,7 @@ template int ValueRecorder::maybe_find_index(T h) { if (cloc != nullptr) { _indexes->set_cache_location_index(cloc, findex); } - debug_only(_missed_indexes++); + DEBUG_ONLY(_missed_indexes++); return findex; } } diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index ad194c71bb2..a828a8356aa 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -78,7 +78,7 @@ relocInfo* relocInfo::finish_prefix(short* prefix_limit) { assert(prefix_limit >= p, "must be a valid span of data"); int plen = checked_cast(prefix_limit - p); if (plen == 0) { - debug_only(_value = 0xFFFF); + DEBUG_ONLY(_value = 0xFFFF); return this; // no data: remove self completely } if (plen == 1 && fits_into_immediate(p[0])) { @@ -342,7 +342,7 @@ address Relocation::old_addr_for(address newa, address Relocation::new_addr_for(address olda, const CodeBuffer* src, CodeBuffer* dest) { - debug_only(const CodeBuffer* src0 = src); + DEBUG_ONLY(const CodeBuffer* src0 = src); int sect = CodeBuffer::SECT_NONE; // Look for olda in the source buffer, and all previous incarnations // if the source buffer has been expanded. diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp index 25cca49e50b..a10f653bbb1 100644 --- a/src/hotspot/share/code/relocInfo.hpp +++ b/src/hotspot/share/code/relocInfo.hpp @@ -574,7 +574,7 @@ class RelocIterator : public StackObj { void set_has_current(bool b) { _datalen = !b ? -1 : 0; - debug_only(_data = nullptr); + DEBUG_ONLY(_data = nullptr); } void set_current(relocInfo& ri) { _current = &ri; diff --git a/src/hotspot/share/code/stubs.cpp b/src/hotspot/share/code/stubs.cpp index 074241ff611..6ae71f93709 100644 --- a/src/hotspot/share/code/stubs.cpp +++ b/src/hotspot/share/code/stubs.cpp @@ -176,14 +176,14 @@ void StubQueue::commit(int committed_code_size) { _queue_end += committed_size; _number_of_stubs++; if (_mutex != nullptr) _mutex->unlock(); - debug_only(stub_verify(s);) + DEBUG_ONLY(stub_verify(s);) } void StubQueue::remove_first() { if (number_of_stubs() == 0) return; Stub* s = first(); - debug_only(stub_verify(s);) + DEBUG_ONLY(stub_verify(s);) stub_finalize(s); _queue_begin += stub_size(s); assert(_queue_begin <= _buffer_limit, "sanity check"); @@ -210,7 +210,7 @@ void StubQueue::remove_first(int n) { void StubQueue::remove_all(){ - debug_only(verify();) + DEBUG_ONLY(verify();) remove_first(number_of_stubs()); assert(number_of_stubs() == 0, "sanity check"); } diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp index ea2c770d66f..88249fc8555 100644 --- a/src/hotspot/share/compiler/oopMap.cpp +++ b/src/hotspot/share/compiler/oopMap.cpp @@ -326,7 +326,7 @@ void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) { assert(reg->value() < _locs_length, "too big reg value for stack size"); assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); - debug_only( _locs_used[reg->value()] = x; ) + DEBUG_ONLY( _locs_used[reg->value()] = x; ) OopMapValue o(reg, x, optional); o.write_on(write_stream()); @@ -511,7 +511,7 @@ void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) // Any reg might be saved by a safepoint handler (see generate_handler_blob). assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id), "already updated this map; do not 'update' it twice!" ); - debug_only(reg_map->_update_for_id = fr->id()); + DEBUG_ONLY(reg_map->_update_for_id = fr->id()); // Check if caller must update oop argument assert((reg_map->include_argument_oops() || diff --git a/src/hotspot/share/compiler/oopMap.hpp b/src/hotspot/share/compiler/oopMap.hpp index 634fb8b0bfa..191996f0f05 100644 --- a/src/hotspot/share/compiler/oopMap.hpp +++ b/src/hotspot/share/compiler/oopMap.hpp @@ -162,7 +162,7 @@ class OopMap: public ResourceObj { bool _has_derived_oops; CompressedWriteStream* _write_stream; - debug_only( OopMapValue::oop_types* _locs_used; int _locs_length;) + DEBUG_ONLY( OopMapValue::oop_types* _locs_used; int _locs_length;) // Accessors int omv_count() const { return _omv_count; } diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp index e8d94b28d18..d2c91b302b5 100644 --- a/src/hotspot/share/gc/parallel/objectStartArray.cpp +++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp @@ -121,7 +121,7 @@ void ObjectStartArray::update_for_block_work(HeapWord* blk_start, assert(start_entry_for_region > end_entry, "Sanity check"); } - debug_only(verify_for_block(blk_start, blk_end);) + DEBUG_ONLY(verify_for_block(blk_start, blk_end);) } void ObjectStartArray::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const { diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index e63ff686312..911384c23bf 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1629,7 +1629,7 @@ void PSParallelCompact::forward_to_new_addr() { } task(nworkers); ParallelScavengeHeap::heap()->workers().run_task(&task); - debug_only(verify_forward();) + DEBUG_ONLY(verify_forward();) } #ifdef ASSERT diff --git a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp index e3ed819ceb1..a6612d5cd2d 100644 --- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp +++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp @@ -45,7 +45,7 @@ void PSPromotionLAB::initialize(MemRegion lab) { // We can be initialized to a zero size! if (free() > 0) { if (ZapUnusedHeapArea) { - debug_only(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord)); + DEBUG_ONLY(Copy::fill_to_words(top(), free()/HeapWordSize, badHeapWord)); } // NOTE! We need to allow space for a filler object. diff --git a/src/hotspot/share/gc/parallel/psPromotionLAB.hpp b/src/hotspot/share/gc/parallel/psPromotionLAB.hpp index e8e42d3754b..60bd4ec250a 100644 --- a/src/hotspot/share/gc/parallel/psPromotionLAB.hpp +++ b/src/hotspot/share/gc/parallel/psPromotionLAB.hpp @@ -55,7 +55,7 @@ class PSPromotionLAB : public CHeapObj { void set_end(HeapWord* value) { _end = value; } // The shared initialize code invokes this. - debug_only(virtual bool lab_is_valid(MemRegion lab) { return false; }); + DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab) { return false; }); PSPromotionLAB() : _top(nullptr), _bottom(nullptr), _end(nullptr), _state(zero_size) { } @@ -95,7 +95,7 @@ class PSYoungPromotionLAB : public PSPromotionLAB { // Not MT safe inline HeapWord* allocate(size_t size); - debug_only(virtual bool lab_is_valid(MemRegion lab);) + DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab);) }; class PSOldPromotionLAB : public PSPromotionLAB { @@ -127,7 +127,7 @@ class PSOldPromotionLAB : public PSPromotionLAB { return nullptr; } - debug_only(virtual bool lab_is_valid(MemRegion lab)); + DEBUG_ONLY(virtual bool lab_is_valid(MemRegion lab)); }; #endif // SHARE_GC_PARALLEL_PSPROMOTIONLAB_HPP diff --git a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp index 5baad7f995a..afaf1aba538 100644 --- a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp +++ b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp @@ -155,7 +155,7 @@ void SerialBlockOffsetTable::update_for_block_work(HeapWord* blk_start, assert(start_card_for_region > end_card, "Sanity check"); } - debug_only(verify_for_block(blk_start, blk_end);) + DEBUG_ONLY(verify_for_block(blk_start, blk_end);) } HeapWord* SerialBlockOffsetTable::block_start_reaching_into_card(const void* addr) const { diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 439563a4b62..8bf47d6b0bb 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -179,7 +179,7 @@ protected: virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); // Verification functions - debug_only(static void check_for_valid_allocation_state();) + DEBUG_ONLY(static void check_for_valid_allocation_state();) public: enum Name { diff --git a/src/hotspot/share/gc/shared/hSpaceCounters.cpp b/src/hotspot/share/gc/shared/hSpaceCounters.cpp index de5dd2912a5..818d7422fba 100644 --- a/src/hotspot/share/gc/shared/hSpaceCounters.cpp +++ b/src/hotspot/share/gc/shared/hSpaceCounters.cpp @@ -82,7 +82,7 @@ void HSpaceCounters::update_all(size_t capacity, size_t used) { update_used(used); } -debug_only( +DEBUG_ONLY( // for security reasons, we do not allow arbitrary reads from // the counters as they may live in shared memory. jlong HSpaceCounters::used() { diff --git a/src/hotspot/share/gc/shared/hSpaceCounters.hpp b/src/hotspot/share/gc/shared/hSpaceCounters.hpp index 63aabf1479b..01310e456f6 100644 --- a/src/hotspot/share/gc/shared/hSpaceCounters.hpp +++ b/src/hotspot/share/gc/shared/hSpaceCounters.hpp @@ -56,7 +56,7 @@ class HSpaceCounters: public CHeapObj { void update_all(size_t capacity, size_t used); - debug_only( + DEBUG_ONLY( // for security reasons, we do not allow arbitrary reads from // the counters as they may live in shared memory. jlong used(); diff --git a/src/hotspot/share/gc/shared/memAllocator.cpp b/src/hotspot/share/gc/shared/memAllocator.cpp index 74ef1f66184..09ac5d25f86 100644 --- a/src/hotspot/share/gc/shared/memAllocator.cpp +++ b/src/hotspot/share/gc/shared/memAllocator.cpp @@ -145,7 +145,7 @@ void MemAllocator::Allocation::verify_before() { // not take out a lock if from tlab, so clear here. JavaThread* THREAD = _thread; // For exception macros. assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending"); - debug_only(check_for_valid_allocation_state()); + DEBUG_ONLY(check_for_valid_allocation_state()); assert(!Universe::heap()->is_stw_gc_active(), "Allocation during GC pause not allowed"); } @@ -327,7 +327,7 @@ HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const { } // Allocation of an oop can always invoke a safepoint. - debug_only(allocation._thread->check_for_valid_safepoint_state()); + DEBUG_ONLY(allocation._thread->check_for_valid_safepoint_state()); if (UseTLAB) { // Try refilling the TLAB and allocating the object in it. diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.cpp b/src/hotspot/share/gc/shared/scavengableNMethods.cpp index 0dff5526911..887ac5f43a2 100644 --- a/src/hotspot/share/gc/shared/scavengableNMethods.cpp +++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp @@ -131,13 +131,13 @@ bool ScavengableNMethods::has_scavengable_oops(nmethod* nm) { void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) { assert_locked_or_safepoint(CodeCache_lock); - debug_only(mark_on_list_nmethods()); + DEBUG_ONLY(mark_on_list_nmethods()); nmethod* prev = nullptr; nmethod* cur = _head; while (cur != nullptr) { ScavengableNMethodsData data = gc_data(cur); - debug_only(data.clear_marked()); + DEBUG_ONLY(data.clear_marked()); assert(data.on_list(), "else shouldn't be on this list"); if (cl != nullptr) { @@ -156,7 +156,7 @@ void ScavengableNMethods::nmethods_do_and_prune(NMethodToOopClosure* cl) { } // Check for stray marks. - debug_only(verify_nmethods()); + DEBUG_ONLY(verify_nmethods()); } void ScavengableNMethods::prune_nmethods_not_into_young() { @@ -166,13 +166,13 @@ void ScavengableNMethods::prune_nmethods_not_into_young() { void ScavengableNMethods::prune_unlinked_nmethods() { assert_locked_or_safepoint(CodeCache_lock); - debug_only(mark_on_list_nmethods()); + DEBUG_ONLY(mark_on_list_nmethods()); nmethod* prev = nullptr; nmethod* cur = _head; while (cur != nullptr) { ScavengableNMethodsData data = gc_data(cur); - debug_only(data.clear_marked()); + DEBUG_ONLY(data.clear_marked()); assert(data.on_list(), "else shouldn't be on this list"); nmethod* const next = data.next(); @@ -187,7 +187,7 @@ void ScavengableNMethods::prune_unlinked_nmethods() { } // Check for stray marks. - debug_only(verify_nmethods()); + DEBUG_ONLY(verify_nmethods()); } // Walk the list of methods which might contain oops to the java heap. diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp index d71e84d33b0..f12b3dc5fa8 100644 --- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp @@ -896,7 +896,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo uint gc_state_idx = Compile::AliasIdxRaw; const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument - debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); + DEBUG_ONLY(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx)); Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered)); Node* stable_and = phase->transform_later(new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED))); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 1d353163463..25b900f8d77 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -190,11 +190,11 @@ void ShenandoahCollectionSet::print_on(outputStream* out) const { byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()), byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); - debug_only(size_t regions = 0;) + DEBUG_ONLY(size_t regions = 0;) for (size_t index = 0; index < _heap->num_regions(); index ++) { if (is_in(index)) { _heap->get_region(index)->print_on(out); - debug_only(regions ++;) + DEBUG_ONLY(regions ++;) } } assert(regions == count(), "Must match"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index 342b599caf5..af661fd1dc4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -309,14 +309,14 @@ private: volatile jint _claimed_index; shenandoah_padding(1); - debug_only(uint _reserved; ) + DEBUG_ONLY(uint _reserved; ) public: using GenericTaskQueueSet::size; public: ParallelClaimableQueueSet(int n) : GenericTaskQueueSet(n), _claimed_index(0) { - debug_only(_reserved = 0; ) + DEBUG_ONLY(_reserved = 0; ) } void clear_claimed() { _claimed_index = 0; } @@ -326,10 +326,10 @@ public: void reserve(uint n) { assert(n <= size(), "Sanity"); _claimed_index = (jint)n; - debug_only(_reserved = n;) + DEBUG_ONLY(_reserved = n;) } - debug_only(uint get_reserved() const { return (uint)_reserved; }) + DEBUG_ONLY(uint get_reserved() const { return (uint)_reserved; }) }; template diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index 62ce860594b..e577ce42c1e 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -311,7 +311,7 @@ void OopMapCacheEntry::deallocate_bit_mask() { assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), "This bit mask should not be in the resource area"); FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); - debug_only(_bit_mask[0] = 0;) + DEBUG_ONLY(_bit_mask[0] = 0;) } } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp index 8dbc3d4fcea..7736d3f4565 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp @@ -504,7 +504,7 @@ void JfrCheckpointManager::begin_epoch_shift() { void JfrCheckpointManager::end_epoch_shift() { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();) + DEBUG_ONLY(const u1 current_epoch = JfrTraceIdEpoch::current();) JfrTraceIdEpoch::end_epoch_shift(); assert(current_epoch != JfrTraceIdEpoch::current(), "invariant"); JfrStringPool::on_epoch_shift(); diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp index a7a21f65944..ca54b297b38 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp @@ -398,7 +398,7 @@ static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const #endif // ASSERT BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) { - debug_only(assert_flush_precondition(cur, used, native, t);) + DEBUG_ONLY(assert_flush_precondition(cur, used, native, t);) const u1* const cur_pos = cur->pos(); req += used; // requested size now encompass the outstanding used size @@ -407,7 +407,7 @@ BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, } BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) { - debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);) + DEBUG_ONLY(assert_flush_regular_precondition(cur, cur_pos, used, req, t);) // A flush is needed before memmove since a non-large buffer is thread stable // (thread local). The flush will not modify memory in addresses above pos() // which is where the "used / uncommitted" data resides. It is therefore both @@ -450,7 +450,7 @@ static BufferPtr restore_shelved_buffer(bool native, Thread* t) { } BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) { - debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);) + DEBUG_ONLY(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);) // Can the "regular" buffer (now shelved) accommodate the requested size? BufferPtr shelved = t->jfr_thread_local()->shelved_buffer(); assert(shelved != nullptr, "invariant"); @@ -480,7 +480,7 @@ static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_inst // even though it might be smaller than the requested size. // Caller needs to ensure if the size was successfully accommodated. BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) { - debug_only(assert_provision_large_precondition(cur, used, req, t);) + DEBUG_ONLY(assert_provision_large_precondition(cur, used, req, t);) assert(t->jfr_thread_local()->shelved_buffer() != nullptr, "invariant"); BufferPtr const buffer = acquire_large(req, t); if (buffer == nullptr) { diff --git a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp index f97838c7869..faf0baf2bcb 100644 --- a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp +++ b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp @@ -83,7 +83,7 @@ static void hook_memory_allocation(const char* allocation, size_t alloc_size) { vm_exit_out_of_memory(alloc_size, OOM_MALLOC_ERROR, "AllocateHeap"); } } - debug_only(add(alloc_size)); + DEBUG_ONLY(add(alloc_size)); } void JfrCHeapObj::on_memory_allocation(const void* allocation, size_t size) { @@ -111,12 +111,12 @@ void* JfrCHeapObj::operator new [](size_t size, const std::nothrow_t& nothrow_c } void JfrCHeapObj::operator delete(void* p, size_t size) { - debug_only(hook_memory_deallocation(size);) + DEBUG_ONLY(hook_memory_deallocation(size);) CHeapObj::operator delete(p); } void JfrCHeapObj::operator delete[](void* p, size_t size) { - debug_only(hook_memory_deallocation(size);) + DEBUG_ONLY(hook_memory_deallocation(size);) CHeapObj::operator delete[](p); } @@ -127,7 +127,7 @@ char* JfrCHeapObj::realloc_array(char* old, size_t size) { } void JfrCHeapObj::free(void* p, size_t size) { - debug_only(hook_memory_deallocation(size);) + DEBUG_ONLY(hook_memory_deallocation(size);) FreeHeap(p); } diff --git a/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp b/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp index 4e66af7f478..814f15017e0 100644 --- a/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp +++ b/src/hotspot/share/jfr/utilities/jfrDoublyLinkedList.hpp @@ -205,7 +205,7 @@ void JfrDoublyLinkedList::append_list(T* const head_node, T* const tail_node, } *lt = tail_node; const T* node = head_node; - debug_only(validate_count_param(node, count);) + DEBUG_ONLY(validate_count_param(node, count);) _count += count; assert(tail() == tail_node, "invariant"); assert(in_list(tail_node), "not in list error"); diff --git a/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.hpp b/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.hpp index ad47c954fd7..af87f2bc5fb 100644 --- a/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.hpp +++ b/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.hpp @@ -45,7 +45,7 @@ class ExclusiveAccessAssert { template class MemoryWriterHost : public StorageHost { - debug_only(AccessAssert _access;) + DEBUG_ONLY(AccessAssert _access;) public: typedef typename Adapter::StorageType StorageType; protected: @@ -53,7 +53,7 @@ class MemoryWriterHost : public StorageHost { MemoryWriterHost(StorageType* storage, Thread* thread); MemoryWriterHost(StorageType* storage, size_t size); MemoryWriterHost(Thread* thread); - debug_only(bool is_acquired() const;) + DEBUG_ONLY(bool is_acquired() const;) public: void acquire(); void release(); diff --git a/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.inline.hpp b/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.inline.hpp index 55229e5d732..ad16b7abde3 100644 --- a/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.inline.hpp +++ b/src/hotspot/share/jfr/writers/jfrMemoryWriterHost.inline.hpp @@ -52,18 +52,18 @@ inline MemoryWriterHost::MemoryWriterHost(Thread* thr template inline void MemoryWriterHost::acquire() { - debug_only(_access.acquire();) + DEBUG_ONLY(_access.acquire();) if (!this->is_valid()) { this->flush(); } - debug_only(is_acquired();) + DEBUG_ONLY(is_acquired();) } template inline void MemoryWriterHost::release() { - debug_only(is_acquired();) + DEBUG_ONLY(is_acquired();) StorageHost::release(); - debug_only(_access.release();) + DEBUG_ONLY(_access.release();) } #ifdef ASSERT diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index cffac62c1c8..a4a8f3bb1d0 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -173,7 +173,7 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) { ThreadInVMfromNative __tiv(thread); \ HandleMarkCleaner __hm(thread); \ JavaThread* THREAD = thread; \ - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) // Native method block that transitions current thread to '_thread_in_vm'. // Note: CompilerThreadCanCallJava must precede JVMCIENV_FROM_JNI so that diff --git a/src/hotspot/share/logging/logConfiguration.cpp b/src/hotspot/share/logging/logConfiguration.cpp index 662c1f1c2bb..b883dbccacf 100644 --- a/src/hotspot/share/logging/logConfiguration.cpp +++ b/src/hotspot/share/logging/logConfiguration.cpp @@ -59,17 +59,17 @@ class ConfigurationLock : public StackObj { private: // Semaphore used as lock static Semaphore _semaphore; - debug_only(static intx _locking_thread_id;) + DEBUG_ONLY(static intx _locking_thread_id;) public: ConfigurationLock() { _semaphore.wait(); - debug_only(_locking_thread_id = os::current_thread_id()); + DEBUG_ONLY(_locking_thread_id = os::current_thread_id()); } ~ConfigurationLock() { - debug_only(_locking_thread_id = -1); + DEBUG_ONLY(_locking_thread_id = -1); _semaphore.signal(); } - debug_only(static bool current_thread_has_lock();) + DEBUG_ONLY(static bool current_thread_has_lock();) }; Semaphore ConfigurationLock::_semaphore(1); diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index e846eb3ddde..156c96d621f 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -165,8 +165,8 @@ uintx Universe::_the_array_interfaces_bitmap = 0; uintx Universe::_the_empty_klass_bitmap = 0; // These variables are guarded by FullGCALot_lock. -debug_only(OopHandle Universe::_fullgc_alot_dummy_array;) -debug_only(int Universe::_fullgc_alot_dummy_next = 0;) +DEBUG_ONLY(OopHandle Universe::_fullgc_alot_dummy_array;) +DEBUG_ONLY(int Universe::_fullgc_alot_dummy_next = 0;) // Heap int Universe::_verify_count = 0; diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 69f8642d6da..35c31330f08 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -117,8 +117,8 @@ class Universe: AllStatic { static intptr_t _non_oop_bits; // array of dummy objects used with +FullGCAlot - debug_only(static OopHandle _fullgc_alot_dummy_array;) - debug_only(static int _fullgc_alot_dummy_next;) + DEBUG_ONLY(static OopHandle _fullgc_alot_dummy_array;) + DEBUG_ONLY(static int _fullgc_alot_dummy_next;) // Compiler/dispatch support static int _base_vtable_size; // Java vtbl size of klass Object (in words) @@ -357,7 +357,7 @@ class Universe: AllStatic { // Change the number of dummy objects kept reachable by the full gc dummy // array; this should trigger relocation in a sliding compaction collector. - debug_only(static bool release_fullgc_alot_dummy();) + DEBUG_ONLY(static bool release_fullgc_alot_dummy();) // The non-oop pattern (see compiledIC.hpp, etc) static void* non_oop_word(); static bool contains_non_oop_word(void* p); diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp index fa1de208804..2c2d629f032 100644 --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -200,7 +200,7 @@ static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre return true; } - debug_only(warning( + DEBUG_ONLY(warning( "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT " size=%zu, executable=%d) failed", p2i(start), p2i(start + size), size, executable);) @@ -371,7 +371,7 @@ void VirtualSpace::shrink_by(size_t size) { aligned_upper_new_high + upper_needs <= upper_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { - debug_only(warning("os::uncommit_memory failed")); + DEBUG_ONLY(warning("os::uncommit_memory failed")); return; } else { _upper_high -= upper_needs; @@ -382,7 +382,7 @@ void VirtualSpace::shrink_by(size_t size) { aligned_middle_new_high + middle_needs <= middle_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { - debug_only(warning("os::uncommit_memory failed")); + DEBUG_ONLY(warning("os::uncommit_memory failed")); return; } else { _middle_high -= middle_needs; @@ -393,7 +393,7 @@ void VirtualSpace::shrink_by(size_t size) { aligned_lower_new_high + lower_needs <= lower_high_boundary(), "must not shrink beyond region"); if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { - debug_only(warning("os::uncommit_memory failed")); + DEBUG_ONLY(warning("os::uncommit_memory failed")); return; } else { _lower_high -= lower_needs; diff --git a/src/hotspot/share/oops/cpCache.hpp b/src/hotspot/share/oops/cpCache.hpp index 6490a012e9a..83af4b88e32 100644 --- a/src/hotspot/share/oops/cpCache.hpp +++ b/src/hotspot/share/oops/cpCache.hpp @@ -76,7 +76,7 @@ class ConstantPoolCache: public MetaspaceObj { Array* _resolved_method_entries; // Sizing - debug_only(friend class ClassVerifier;) + DEBUG_ONLY(friend class ClassVerifier;) public: // specific but defiinitions for ldc diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp index a3db976046d..a17d1ca4e37 100644 --- a/src/hotspot/share/oops/generateOopMap.cpp +++ b/src/hotspot/share/oops/generateOopMap.cpp @@ -2027,7 +2027,7 @@ void GenerateOopMap::ret_jump_targets_do(BytecodeStream *bcs, jmpFct_t jmpFct, i int target_bci = rtEnt->jsrs(i); // Make sure a jrtRet does not set the changed bit for dead basicblock. BasicBlock* jsr_bb = get_basic_block_containing(target_bci - 1); - debug_only(BasicBlock* target_bb = &jsr_bb[1];) + DEBUG_ONLY(BasicBlock* target_bb = &jsr_bb[1];) assert(target_bb == get_basic_block_at(target_bci), "wrong calc. of successor basicblock"); bool alive = jsr_bb->is_alive(); if (TraceNewOopMapGeneration) { diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index 715c8f473d0..32445759491 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -1321,7 +1321,7 @@ void InstanceKlass::initialize_impl(TRAPS) { // Step 9 if (!HAS_PENDING_EXCEPTION) { set_initialization_state_and_notify(fully_initialized, CHECK); - debug_only(vtable().verify(tty, true);) + DEBUG_ONLY(vtable().verify(tty, true);) } else { // Step 10 and 11 @@ -4191,7 +4191,7 @@ JNIid::JNIid(Klass* holder, int offset, JNIid* next) { _holder = holder; _offset = offset; _next = next; - debug_only(_is_static_field_id = false;) + DEBUG_ONLY(_is_static_field_id = false;) } diff --git a/src/hotspot/share/oops/instanceRefKlass.cpp b/src/hotspot/share/oops/instanceRefKlass.cpp index eb507495cf5..b8327492493 100644 --- a/src/hotspot/share/oops/instanceRefKlass.cpp +++ b/src/hotspot/share/oops/instanceRefKlass.cpp @@ -71,10 +71,10 @@ void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) { InstanceKlass* ik = InstanceKlass::cast(k); // Check that we have the right class - debug_only(static bool first_time = true); + DEBUG_ONLY(static bool first_time = true); assert(k == vmClasses::Reference_klass() && first_time, "Invalid update of maps"); - debug_only(first_time = false); + DEBUG_ONLY(first_time = false); assert(ik->nonstatic_oop_map_count() == 1, "just checking"); OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index ac0e125ab4c..b0d2e84335e 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -678,7 +678,7 @@ void Klass::append_to_sibling_list() { if (Universe::is_fully_initialized()) { assert_locked_or_safepoint(Compile_lock); } - debug_only(verify();) + DEBUG_ONLY(verify();) // add ourselves to superklass' subklass list InstanceKlass* super = superklass(); if (super == nullptr) return; // special case: class Object @@ -703,7 +703,7 @@ void Klass::append_to_sibling_list() { return; } } - debug_only(verify();) + DEBUG_ONLY(verify();) } void Klass::clean_subklass() { diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 0c4430b44c3..07f2c559604 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -1256,7 +1256,7 @@ address Method::make_adapters(const methodHandle& mh, TRAPS) { // or adapter that it points to is still live and valid. // This function must not hit a safepoint! address Method::verified_code_entry() { - debug_only(NoSafepointVerifier nsv;) + DEBUG_ONLY(NoSafepointVerifier nsv;) assert(_from_compiled_entry != nullptr, "must be set"); return _from_compiled_entry; } diff --git a/src/hotspot/share/opto/block.cpp b/src/hotspot/share/opto/block.cpp index 5d4ac471303..c33f656047d 100644 --- a/src/hotspot/share/opto/block.cpp +++ b/src/hotspot/share/opto/block.cpp @@ -42,7 +42,7 @@ void Block_Array::grow( uint i ) { if (i < Max()) { return; // No need to grow } - debug_only(_limit = i+1); + DEBUG_ONLY(_limit = i+1); if( i < _size ) return; if( !_size ) { _size = 1; diff --git a/src/hotspot/share/opto/block.hpp b/src/hotspot/share/opto/block.hpp index 4ac6399d3a0..5baa72dfffb 100644 --- a/src/hotspot/share/opto/block.hpp +++ b/src/hotspot/share/opto/block.hpp @@ -48,7 +48,7 @@ struct Tarjan; // allocation I do not need a destructor to reclaim storage. class Block_Array : public ArenaObj { uint _size; // allocated size, as opposed to formal limit - debug_only(uint _limit;) // limit to formal domain + DEBUG_ONLY(uint _limit;) // limit to formal domain Arena *_arena; // Arena to allocate in ReallocMark _nesting; // Safety checks for arena reallocation protected: @@ -57,7 +57,7 @@ protected: public: Block_Array(Arena *a) : _size(OptoBlockListSize), _arena(a) { - debug_only(_limit=0); + DEBUG_ONLY(_limit=0); _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); for( int i = 0; i < OptoBlockListSize; i++ ) { _blocks[i] = nullptr; @@ -69,7 +69,7 @@ public: { assert( i < Max(), "oob" ); return _blocks[i]; } // Extend the mapping: index i maps to Block *n. void map( uint i, Block *n ) { grow(i); _blocks[i] = n; } - uint Max() const { debug_only(return _limit); return _size; } + uint Max() const { DEBUG_ONLY(return _limit); return _size; } }; diff --git a/src/hotspot/share/opto/buildOopMap.cpp b/src/hotspot/share/opto/buildOopMap.cpp index f135df21114..675113163e8 100644 --- a/src/hotspot/share/opto/buildOopMap.cpp +++ b/src/hotspot/share/opto/buildOopMap.cpp @@ -191,7 +191,7 @@ void OopFlow::clone( OopFlow *flow, int max_size ) { OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) { short *callees = NEW_ARENA_ARRAY(A,short,max_size+1); Node **defs = NEW_ARENA_ARRAY(A,Node*,max_size+1); - debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) ); + DEBUG_ONLY( memset(defs,0,(max_size+1)*sizeof(Node*)) ); OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C); assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" ); assert( &flow->_defs [OptoReg::Bad] == defs , "Ok to index at OptoReg::Bad" ); @@ -209,7 +209,7 @@ static void clr_live_bit( int *live, int reg ) { OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) { int framesize = regalloc->_framesize; int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP); - debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0()); + DEBUG_ONLY( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0()); memset(dup_check,0,OptoReg::stack0()) ); OopMap *omap = new OopMap( framesize, max_inarg_slot ); @@ -351,7 +351,7 @@ OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, i } else if( OptoReg::is_valid(_callees[reg])) { // callee-save? // It's a callee-save value assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" ); - debug_only( dup_check[_callees[reg]]=1; ) + DEBUG_ONLY( dup_check[_callees[reg]]=1; ) VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg])); omap->set_callee_saved(r, callee); diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp index bb30dcb2976..3527e46c24b 100644 --- a/src/hotspot/share/opto/callnode.cpp +++ b/src/hotspot/share/opto/callnode.cpp @@ -266,8 +266,8 @@ JVMState::JVMState(ciMethod* method, JVMState* caller) : assert(method != nullptr, "must be valid call site"); _bci = InvocationEntryBci; _reexecute = Reexecute_Undefined; - debug_only(_bci = -99); // random garbage value - debug_only(_map = (SafePointNode*)-1); + DEBUG_ONLY(_bci = -99); // random garbage value + DEBUG_ONLY(_map = (SafePointNode*)-1); _caller = caller; _depth = 1 + (caller == nullptr ? 0 : caller->depth()); _locoff = TypeFunc::Parms; @@ -281,7 +281,7 @@ JVMState::JVMState(int stack_size) : _method(nullptr) { _bci = InvocationEntryBci; _reexecute = Reexecute_Undefined; - debug_only(_map = (SafePointNode*)-1); + DEBUG_ONLY(_map = (SafePointNode*)-1); _caller = nullptr; _depth = 1; _locoff = TypeFunc::Parms; @@ -323,14 +323,14 @@ bool JVMState::same_calls_as(const JVMState* that) const { //------------------------------debug_start------------------------------------ uint JVMState::debug_start() const { - debug_only(JVMState* jvmroot = of_depth(1)); + DEBUG_ONLY(JVMState* jvmroot = of_depth(1)); assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); return of_depth(1)->locoff(); } //-------------------------------debug_end------------------------------------- uint JVMState::debug_end() const { - debug_only(JVMState* jvmroot = of_depth(1)); + DEBUG_ONLY(JVMState* jvmroot = of_depth(1)); assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); return endoff(); } @@ -1465,7 +1465,7 @@ void SafePointNode::push_monitor(const FastLockNode *lock) { void SafePointNode::pop_monitor() { // Delete last monitor from debug info - debug_only(int num_before_pop = jvms()->nof_monitors()); + DEBUG_ONLY(int num_before_pop = jvms()->nof_monitors()); const int MonitorEdges = 2; assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); int scloff = jvms()->scloff(); diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp index 4885d44a13d..92f6c938dba 100644 --- a/src/hotspot/share/opto/cfgnode.cpp +++ b/src/hotspot/share/opto/cfgnode.cpp @@ -2233,7 +2233,7 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { } // One unique input. - debug_only(Node* ident = Identity(phase)); + DEBUG_ONLY(Node* ident = Identity(phase)); // The unique input must eventually be detected by the Identity call. #ifdef ASSERT if (ident != uin && !ident->is_top() && !must_wait_for_region_in_irreducible_loop(phase)) { diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp index 11e1797d034..7832cbd42f8 100644 --- a/src/hotspot/share/opto/chaitin.cpp +++ b/src/hotspot/share/opto/chaitin.cpp @@ -1323,7 +1323,7 @@ void PhaseChaitin::Simplify( ) { bool bound = lrgs(lo_score)._is_bound; // Find cheapest guy - debug_only( int lo_no_simplify=0; ); + DEBUG_ONLY( int lo_no_simplify=0; ); for (uint i = _hi_degree; i; i = lrgs(i)._next) { assert(!_ifg->_yanked->test(i), ""); // It's just vaguely possible to move hi-degree to lo-degree without @@ -1335,7 +1335,7 @@ void PhaseChaitin::Simplify( ) { lo_score = i; break; } - debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; ); + DEBUG_ONLY( if( lrgs(i)._was_lo ) lo_no_simplify=i; ); double iscore = lrgs(i).score(); double iarea = lrgs(i)._area; double icost = lrgs(i)._cost; @@ -1577,7 +1577,7 @@ uint PhaseChaitin::Select( ) { // Remove neighbor colors IndexSet *s = _ifg->neighbors(lidx); - debug_only(RegMask orig_mask = lrg->mask();) + DEBUG_ONLY(RegMask orig_mask = lrg->mask();) if (!s->is_empty()) { IndexSetIterator elements(s); @@ -1706,8 +1706,8 @@ uint PhaseChaitin::Select( ) { ttyLocker ttyl; tty->print("L%d spilling with neighbors: ", lidx); s->dump(); - debug_only(tty->print(" original mask: ")); - debug_only(orig_mask.dump()); + DEBUG_ONLY(tty->print(" original mask: ")); + DEBUG_ONLY(orig_mask.dump()); dump_lrg(lidx); } #endif diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index 4b74420f996..cc3d3479c81 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -82,11 +82,11 @@ public: // set makes it not valid. void set_degree( uint degree ) { _eff_degree = degree; - debug_only(_degree_valid = 1;) + DEBUG_ONLY(_degree_valid = 1;) assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers"); } // Made a change that hammered degree - void invalid_degree() { debug_only(_degree_valid=0;) } + void invalid_degree() { DEBUG_ONLY(_degree_valid=0;) } // Incrementally modify degree. If it was correct, it should remain correct void inc_degree( uint mod ) { _eff_degree += mod; @@ -128,15 +128,15 @@ public: // count of bits in the current mask. int get_invalid_mask_size() const { return _mask_size; } const RegMask &mask() const { return _mask; } - void set_mask( const RegMask &rm ) { _mask = rm; debug_only(_msize_valid=0;)} - void AND( const RegMask &rm ) { _mask.AND(rm); debug_only(_msize_valid=0;)} - void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); debug_only(_msize_valid=0;)} - void Clear() { _mask.Clear() ; debug_only(_msize_valid=1); _mask_size = 0; } - void Set_All() { _mask.Set_All(); debug_only(_msize_valid=1); _mask_size = RegMask::CHUNK_SIZE; } + void set_mask( const RegMask &rm ) { _mask = rm; DEBUG_ONLY(_msize_valid=0;)} + void AND( const RegMask &rm ) { _mask.AND(rm); DEBUG_ONLY(_msize_valid=0;)} + void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); DEBUG_ONLY(_msize_valid=0;)} + void Clear() { _mask.Clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; } + void Set_All() { _mask.Set_All(); DEBUG_ONLY(_msize_valid=1); _mask_size = RegMask::CHUNK_SIZE; } - void Insert( OptoReg::Name reg ) { _mask.Insert(reg); debug_only(_msize_valid=0;) } - void Remove( OptoReg::Name reg ) { _mask.Remove(reg); debug_only(_msize_valid=0;) } - void clear_to_sets() { _mask.clear_to_sets(_num_regs); debug_only(_msize_valid=0;) } + void Insert( OptoReg::Name reg ) { _mask.Insert(reg); DEBUG_ONLY(_msize_valid=0;) } + void Remove( OptoReg::Name reg ) { _mask.Remove(reg); DEBUG_ONLY(_msize_valid=0;) } + void clear_to_sets() { _mask.clear_to_sets(_num_regs); DEBUG_ONLY(_msize_valid=0;) } private: // Number of registers this live range uses when it colors diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index e4e82cbddab..effc7e21354 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -474,7 +474,7 @@ void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_Lis remove_useless_late_inlines( &_string_late_inlines, useful); remove_useless_late_inlines( &_boxing_late_inlines, useful); remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful); - debug_only(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);) + DEBUG_ONLY(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);) } // ============================================================================ diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index 1fb34e799b2..3a6a81f656e 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -4567,7 +4567,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist, } } } else { - debug_only(n->dump();) + DEBUG_ONLY(n->dump();) assert(false, "EA: unexpected node"); continue; } diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp index df9177a4938..20feca26ede 100644 --- a/src/hotspot/share/opto/graphKit.cpp +++ b/src/hotspot/share/opto/graphKit.cpp @@ -72,8 +72,8 @@ GraphKit::GraphKit() { _exceptions = nullptr; set_map(nullptr); - debug_only(_sp = -99); - debug_only(set_bci(-99)); + DEBUG_ONLY(_sp = -99); + DEBUG_ONLY(set_bci(-99)); } @@ -196,7 +196,7 @@ bool GraphKit::has_exception_handler() { void GraphKit::set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop) { assert(!has_saved_ex_oop(ex_map), "clear ex-oop before setting again"); ex_map->add_req(ex_oop); - debug_only(verify_exception_state(ex_map)); + DEBUG_ONLY(verify_exception_state(ex_map)); } inline static Node* common_saved_ex_oop(SafePointNode* ex_map, bool clear_it) { @@ -296,7 +296,7 @@ JVMState* GraphKit::transfer_exceptions_into_jvms() { _map = clone_map(); _map->set_next_exception(nullptr); clear_saved_ex_oop(_map); - debug_only(verify_map()); + DEBUG_ONLY(verify_map()); } else { // ...or created from scratch JVMState* jvms = new (C) JVMState(_method, nullptr); @@ -672,7 +672,7 @@ ciInstance* GraphKit::builtin_throw_exception(Deoptimization::DeoptReason reason //----------------------------PreserveJVMState--------------------------------- PreserveJVMState::PreserveJVMState(GraphKit* kit, bool clone_map) { - debug_only(kit->verify_map()); + DEBUG_ONLY(kit->verify_map()); _kit = kit; _map = kit->map(); // preserve the map _sp = kit->sp(); @@ -780,7 +780,7 @@ void GraphKit::set_map_clone(SafePointNode* m) { _map = m; _map = clone_map(); _map->set_next_exception(nullptr); - debug_only(verify_map()); + DEBUG_ONLY(verify_map()); } @@ -1537,7 +1537,7 @@ Node* GraphKit::memory(uint alias_idx) { Node* GraphKit::reset_memory() { Node* mem = map()->memory(); // do not use this node for any more parsing! - debug_only( map()->set_memory((Node*)nullptr) ); + DEBUG_ONLY( map()->set_memory((Node*)nullptr) ); return _gvn.transform( mem ); } @@ -1574,7 +1574,7 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr()); assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); const TypePtr* adr_type = nullptr; // debug-mode-only argument - debug_only(adr_type = C->get_adr_type(adr_idx)); + DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx)); Node* mem = memory(adr_idx); Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data); ld = _gvn.transform(ld); @@ -1602,7 +1602,7 @@ Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr()); assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); const TypePtr* adr_type = nullptr; - debug_only(adr_type = C->get_adr_type(adr_idx)); + DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx)); Node *mem = memory(adr_idx); Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access); if (unaligned) { diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp index d1b58526a6d..28773d75333 100644 --- a/src/hotspot/share/opto/graphKit.hpp +++ b/src/hotspot/share/opto/graphKit.hpp @@ -145,7 +145,7 @@ class GraphKit : public Phase { _sp = jvms->sp(); _bci = jvms->bci(); _method = jvms->has_method() ? jvms->method() : nullptr; } - void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); } + void set_map(SafePointNode* m) { _map = m; DEBUG_ONLY(verify_map()); } void set_sp(int sp) { assert(sp >= 0, "sp must be non-negative: %d", sp); _sp = sp; } void clean_stack(int from_sp); // clear garbage beyond from_sp to top @@ -226,14 +226,14 @@ class GraphKit : public Phase { if (ex_map != nullptr) { _exceptions = ex_map->next_exception(); ex_map->set_next_exception(nullptr); - debug_only(verify_exception_state(ex_map)); + DEBUG_ONLY(verify_exception_state(ex_map)); } return ex_map; } // Add an exception, using the given JVM state, without commoning. void push_exception_state(SafePointNode* ex_map) { - debug_only(verify_exception_state(ex_map)); + DEBUG_ONLY(verify_exception_state(ex_map)); ex_map->set_next_exception(_exceptions); _exceptions = ex_map; } diff --git a/src/hotspot/share/opto/idealKit.cpp b/src/hotspot/share/opto/idealKit.cpp index 8c26e1cc39d..dd7e9ae52b7 100644 --- a/src/hotspot/share/opto/idealKit.cpp +++ b/src/hotspot/share/opto/idealKit.cpp @@ -354,7 +354,7 @@ Node* IdealKit::load(Node* ctl, assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); const TypePtr* adr_type = nullptr; // debug-mode-only argument - debug_only(adr_type = C->get_adr_type(adr_idx)); + DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx)); Node* mem = memory(adr_idx); Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access); return transform(ld); @@ -366,7 +366,7 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt, bool mismatched) { assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); const TypePtr* adr_type = nullptr; - debug_only(adr_type = C->get_adr_type(adr_idx)); + DEBUG_ONLY(adr_type = C->get_adr_type(adr_idx)); Node *mem = memory(adr_idx); Node* st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo, require_atomic_access); if (mismatched) { diff --git a/src/hotspot/share/opto/indexSet.cpp b/src/hotspot/share/opto/indexSet.cpp index 7f02f01c83f..367f5b78af2 100644 --- a/src/hotspot/share/opto/indexSet.cpp +++ b/src/hotspot/share/opto/indexSet.cpp @@ -121,7 +121,7 @@ IndexSet::BitBlock *IndexSet::alloc_block_containing(uint element) { // Add a BitBlock to the free list. void IndexSet::free_block(uint i) { - debug_only(check_watch("free block", i)); + DEBUG_ONLY(check_watch("free block", i)); assert(i < _max_blocks, "block index too large"); BitBlock *block = _blocks[i]; assert(block != &_empty_block, "cannot free the empty block"); diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index c3f411eea94..195d48b37c6 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -359,7 +359,7 @@ void PhaseIdealLoop::insert_loop_limit_check_predicate(ParsePredicateSuccessProj // for this loop if (TraceLoopLimitCheck) { tty->print_cr("Counted Loop Limit Check generated:"); - debug_only( bol->dump(2); ) + DEBUG_ONLY( bol->dump(2); ) } #endif } diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index 1bd35f9dcc9..1fe59c9156b 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -225,7 +225,7 @@ static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_me if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) { // Can not bypass initialization of the instance // we are looking. - debug_only(intptr_t offset;) + DEBUG_ONLY(intptr_t offset;) assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity"); InitializeNode* init = alloc->as_Allocate()->initialization(); // We are looking for stored value, return Initialize node @@ -1328,7 +1328,7 @@ void PhaseMacroExpand::expand_allocate_common( // No initial test, just fall into next case assert(allocation_has_use || !expand_fast_path, "Should already have been handled"); toobig_false = ctrl; - debug_only(slow_region = NodeSentinel); + DEBUG_ONLY(slow_region = NodeSentinel); } // If we are here there are several possibilities diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index e34a43cc1e2..0849b40ad7e 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -128,7 +128,7 @@ Matcher::Matcher() idealreg2mhdebugmask[Op_RegFlags] = nullptr; idealreg2mhdebugmask[Op_RegVectMask] = nullptr; - debug_only(_mem_node = nullptr;) // Ideal memory node consumed by mach node + DEBUG_ONLY(_mem_node = nullptr;) // Ideal memory node consumed by mach node } //------------------------------warp_incoming_stk_arg------------------------ @@ -1184,7 +1184,7 @@ Node *Matcher::xform( Node *n, int max_stack ) { n->_idx); C->set_node_notes_at(m->_idx, nn); } - debug_only(match_alias_type(C, n, m)); + DEBUG_ONLY(match_alias_type(C, n, m)); } n = m; // n is now a new-space node mstack.set_node(n); @@ -1591,7 +1591,7 @@ MachNode *Matcher::match_tree( const Node *n ) { } } - debug_only( _mem_node = save_mem_node; ) + DEBUG_ONLY( _mem_node = save_mem_node; ) return m; } @@ -1965,9 +1965,9 @@ void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* ma assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand"); mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]); Node *mem1 = (Node*)1; - debug_only(Node *save_mem_node = _mem_node;) + DEBUG_ONLY(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst(s, newrule, mem1) ); - debug_only(_mem_node = save_mem_node;) + DEBUG_ONLY(_mem_node = save_mem_node;) } return; } @@ -1979,7 +1979,7 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac if( s->_leaf->is_Load() ) { Node *mem2 = s->_leaf->in(MemNode::Memory); assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" ); - debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;) + DEBUG_ONLY( if( mem == (Node*)1 ) _mem_node = s->_leaf;) mem = mem2; } if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) { @@ -2023,9 +2023,9 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac // --> ReduceInst( newrule ) mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]); Node *mem1 = (Node*)1; - debug_only(Node *save_mem_node = _mem_node;) + DEBUG_ONLY(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst( newstate, newrule, mem1 ) ); - debug_only(_mem_node = save_mem_node;) + DEBUG_ONLY(_mem_node = save_mem_node;) } } assert( mach->_opnds[num_opnds-1], "" ); @@ -2056,7 +2056,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) { if( s->_leaf->is_Load() ) { assert( mem == (Node*)1, "multiple Memories being matched at once?" ); mem = s->_leaf->in(MemNode::Memory); - debug_only(_mem_node = s->_leaf;) + DEBUG_ONLY(_mem_node = s->_leaf;) } handle_precedence_edges(s->_leaf, mach); @@ -2085,9 +2085,9 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) { // Reduce the instruction, and add a direct pointer from this // machine instruction to the newly reduced one. Node *mem1 = (Node*)1; - debug_only(Node *save_mem_node = _mem_node;) + DEBUG_ONLY(Node *save_mem_node = _mem_node;) mach->add_req( ReduceInst( kid, newrule, mem1 ) ); - debug_only(_mem_node = save_mem_node;) + DEBUG_ONLY(_mem_node = save_mem_node;) } } } diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp index 157cc1866a0..a751022f752 100644 --- a/src/hotspot/share/opto/memnode.hpp +++ b/src/hotspot/share/opto/memnode.hpp @@ -71,7 +71,7 @@ protected: _unsafe_access(false), _barrier_data(0) { init_class_id(Class_Mem); - debug_only(_adr_type=at; adr_type();) + DEBUG_ONLY(_adr_type=at; adr_type();) } MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) : Node(c0,c1,c2,c3), @@ -80,7 +80,7 @@ protected: _unsafe_access(false), _barrier_data(0) { init_class_id(Class_Mem); - debug_only(_adr_type=at; adr_type();) + DEBUG_ONLY(_adr_type=at; adr_type();) } MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) : Node(c0,c1,c2,c3,c4), @@ -89,7 +89,7 @@ protected: _unsafe_access(false), _barrier_data(0) { init_class_id(Class_Mem); - debug_only(_adr_type=at; adr_type();) + DEBUG_ONLY(_adr_type=at; adr_type();) } virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const { return nullptr; } @@ -273,7 +273,7 @@ public: // Following method is copied from TypeNode: void set_type(const Type* t) { assert(t != nullptr, "sanity"); - debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); + DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); *(const Type**)&_type = t; // cast away const-ness // If this node is in the hash table, make sure it doesn't need a rehash. assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); @@ -1497,7 +1497,7 @@ class MergeMemStream : public StackObj { MergeMemStream(MergeMemNode* mm) { mm->iteration_setup(); init(mm); - debug_only(_cnt2 = 999); + DEBUG_ONLY(_cnt2 = 999); } // iterate in parallel over two merges // only iterates through non-empty elements of mm2 diff --git a/src/hotspot/share/opto/multnode.hpp b/src/hotspot/share/opto/multnode.hpp index 25dad70a50a..dff2caed38d 100644 --- a/src/hotspot/share/opto/multnode.hpp +++ b/src/hotspot/share/opto/multnode.hpp @@ -71,7 +71,7 @@ public: // Optimistic setting. Need additional checks in Node::is_dead_loop_safe(). if (con != TypeFunc::Memory || src->is_Start()) init_flags(Flag_is_dead_loop_safe); - debug_only(check_con()); + DEBUG_ONLY(check_con()); } const uint _con; // The field in the tuple we are projecting const bool _is_io_use; // Used to distinguish between the projections diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 76e19360910..a635abebec1 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -322,7 +322,7 @@ Node::Node(uint req) #endif { assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" ); - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); if (req == 0) { _in = nullptr; @@ -341,7 +341,7 @@ Node::Node(Node *n0) , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); _in[0] = n0; if (n0 != nullptr) n0->add_out((Node *)this); @@ -354,7 +354,7 @@ Node::Node(Node *n0, Node *n1) , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -369,7 +369,7 @@ Node::Node(Node *n0, Node *n1, Node *n2) , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -386,7 +386,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3) , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -405,7 +405,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4) , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -427,7 +427,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -451,7 +451,7 @@ Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, , _parse_idx(_idx) #endif { - debug_only( verify_construction() ); + DEBUG_ONLY( verify_construction() ); NOT_PRODUCT(nodes_created++); assert( is_not_dead(n0), "can not use dead node"); assert( is_not_dead(n1), "can not use dead node"); @@ -489,7 +489,7 @@ Node *Node::clone() const { n->_outcnt = 0; n->_outmax = 0; // Unlock this guy, since he is not in any hash table. - debug_only(n->_hash_lock = 0); + DEBUG_ONLY(n->_hash_lock = 0); // Walk the old node's input list to duplicate its edges uint i; for( i = 0; i < len(); i++ ) { @@ -525,11 +525,11 @@ Node *Node::clone() const { n->set_idx(C->next_unique()); // Get new unique index as well NOT_PRODUCT(n->_igv_idx = C->next_igv_idx()); - debug_only( n->verify_construction() ); + DEBUG_ONLY( n->verify_construction() ); NOT_PRODUCT(nodes_created++); // Do not patch over the debug_idx of a clone, because it makes it // impossible to break on the clone's moment of creation. - //debug_only( n->set_debug_idx( debug_idx() ) ); + //DEBUG_ONLY( n->set_debug_idx( debug_idx() ) ); C->copy_node_notes_to(n, (Node*) this); @@ -942,7 +942,7 @@ void Node::disconnect_inputs(Compile* C) { #endif // Node::destruct requires all out edges be deleted first - // debug_only(destruct();) // no reuse benefit expected + // DEBUG_ONLY(destruct();) // no reuse benefit expected C->record_dead_node(_idx); } diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 1cb9009ef27..e1a4ee22661 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -427,11 +427,11 @@ public: assert(_outcnt > 0,"oob"); #if OPTO_DU_ITERATOR_ASSERT // Record that a change happened here. - debug_only(_last_del = _out[i]; ++_del_tick); + DEBUG_ONLY(_last_del = _out[i]; ++_del_tick); #endif _out[i] = _out[--_outcnt]; // Smash the old edge so it can't be used accidentally. - debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); + DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); } #ifdef ASSERT @@ -533,10 +533,10 @@ private: } while (*--outp != n); *outp = _out[--_outcnt]; // Smash the old edge so it can't be used accidentally. - debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); + DEBUG_ONLY(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); // Record that a change happened here. #if OPTO_DU_ITERATOR_ASSERT - debug_only(_last_del = n; ++_del_tick); + DEBUG_ONLY(_last_del = n; ++_del_tick); #endif } // Close gap after removing edge. @@ -593,7 +593,7 @@ public: } // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) void swap_edges(uint i1, uint i2) { - debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); + DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); // Def-Use info is unchanged Node* n1 = in(i1); Node* n2 = in(i2); @@ -1431,15 +1431,15 @@ class DUIterator : public DUIterator_Common { #endif DUIterator(const Node* node, int dummy_to_avoid_conversion) - { _idx = 0; debug_only(sample(node)); } + { _idx = 0; DEBUG_ONLY(sample(node)); } public: // initialize to garbage; clear _vdui to disable asserts DUIterator() - { /*initialize to garbage*/ debug_only(_vdui = false); } + { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); } DUIterator(const DUIterator& that) - { _idx = that._idx; debug_only(_vdui = false; reset(that)); } + { _idx = that._idx; DEBUG_ONLY(_vdui = false; reset(that)); } void operator++(int dummy_to_specify_postfix_op) { _idx++; VDUI_ONLY(verify_increment()); } @@ -1451,7 +1451,7 @@ class DUIterator : public DUIterator_Common { { VDUI_ONLY(verify_finish()); } void operator=(const DUIterator& that) - { _idx = that._idx; debug_only(reset(that)); } + { _idx = that._idx; DEBUG_ONLY(reset(that)); } }; DUIterator Node::outs() const @@ -1461,7 +1461,7 @@ DUIterator& Node::refresh_out_pos(DUIterator& i) const bool Node::has_out(DUIterator& i) const { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } Node* Node::out(DUIterator& i) const - { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } + { I_VDUI_ONLY(i, i.verify(this)); return DEBUG_ONLY(i._last=) _out[i._idx]; } // Faster DU iterator. Disallows insertions into the out array. @@ -1496,15 +1496,15 @@ class DUIterator_Fast : public DUIterator_Common { // Note: offset must be signed, since -1 is sometimes passed DUIterator_Fast(const Node* node, ptrdiff_t offset) - { _outp = node->_out + offset; debug_only(sample(node)); } + { _outp = node->_out + offset; DEBUG_ONLY(sample(node)); } public: // initialize to garbage; clear _vdui to disable asserts DUIterator_Fast() - { /*initialize to garbage*/ debug_only(_vdui = false); } + { /*initialize to garbage*/ DEBUG_ONLY(_vdui = false); } DUIterator_Fast(const DUIterator_Fast& that) - { _outp = that._outp; debug_only(_vdui = false; reset(that)); } + { _outp = that._outp; DEBUG_ONLY(_vdui = false; reset(that)); } void operator++(int dummy_to_specify_postfix_op) { _outp++; VDUI_ONLY(verify(_node, true)); } @@ -1522,7 +1522,7 @@ class DUIterator_Fast : public DUIterator_Common { } void operator=(const DUIterator_Fast& that) - { _outp = that._outp; debug_only(reset(that)); } + { _outp = that._outp; DEBUG_ONLY(reset(that)); } }; DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { @@ -1533,7 +1533,7 @@ DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { } Node* Node::fast_out(DUIterator_Fast& i) const { I_VDUI_ONLY(i, i.verify(this)); - return debug_only(i._last=) *i._outp; + return DEBUG_ONLY(i._last=) *i._outp; } @@ -1591,7 +1591,7 @@ DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { } Node* Node::last_out(DUIterator_Last& i) const { I_VDUI_ONLY(i, i.verify(this)); - return debug_only(i._last=) *i._outp; + return DEBUG_ONLY(i._last=) *i._outp; } #endif //OPTO_DU_ITERATOR_ASSERT @@ -2035,7 +2035,7 @@ protected: public: void set_type(const Type* t) { assert(t != nullptr, "sanity"); - debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); + DEBUG_ONLY(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); *(const Type**)&_type = t; // cast away const-ness // If this node is in the hash table, make sure it doesn't need a rehash. assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 9fe5ad562b0..1cd6ebabc4b 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -2971,7 +2971,7 @@ void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is } Node *kill = def; // Rename 'def' to more descriptive 'kill' - debug_only( def = (Node*)((intptr_t)0xdeadbeef); ) + DEBUG_ONLY( def = (Node*)((intptr_t)0xdeadbeef); ) // After some number of kills there _may_ be a later def Node *later_def = nullptr; diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp index f7330d10df3..6fa0b0f497d 100644 --- a/src/hotspot/share/opto/parse1.cpp +++ b/src/hotspot/share/opto/parse1.cpp @@ -1835,10 +1835,10 @@ void Parse::merge_common(Parse::Block* target, int pnum) { // Now _gvn will join that with the meet of current inputs. // BOTTOM is never permissible here, 'cause pessimistically // Phis of pointers cannot lose the basic pointer type. - debug_only(const Type* bt1 = phi->bottom_type()); + DEBUG_ONLY(const Type* bt1 = phi->bottom_type()); assert(bt1 != Type::BOTTOM, "should not be building conflict phis"); map()->set_req(j, _gvn.transform(phi)); - debug_only(const Type* bt2 = phi->bottom_type()); + DEBUG_ONLY(const Type* bt2 = phi->bottom_type()); assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow"); record_for_igvn(phi); } @@ -1936,7 +1936,7 @@ void Parse::ensure_phis_everywhere() { // Ensure a phi on all currently known memories. for (MergeMemStream mms(merged_memory()); mms.next_non_empty(); ) { ensure_memory_phi(mms.alias_idx()); - debug_only(mms.set_memory()); // keep the iterator happy + DEBUG_ONLY(mms.set_memory()); // keep the iterator happy } // Note: This is our only chance to create phis for memory slices. diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp index 9be15b153b3..82e5b3f9d85 100644 --- a/src/hotspot/share/opto/phaseX.cpp +++ b/src/hotspot/share/opto/phaseX.cpp @@ -123,7 +123,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { if( !k ) { // ?Miss? NOT_PRODUCT( _lookup_misses++ ); _table[key] = n; // Insert into table! - debug_only(n->enter_hash_lock()); // Lock down the node while in the table. + DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table. check_grow(); // Grow table if insert hit limit return nullptr; // Miss! } @@ -152,7 +152,7 @@ Node *NodeHash::hash_find_insert( Node *n ) { NOT_PRODUCT( _lookup_misses++ ); key = (first_sentinel == 0) ? key : first_sentinel; // ?saw sentinel? _table[key] = n; // Insert into table! - debug_only(n->enter_hash_lock()); // Lock down the node while in the table. + DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table. check_grow(); // Grow table if insert hit limit return nullptr; // Miss! } @@ -188,7 +188,7 @@ void NodeHash::hash_insert( Node *n ) { key = (key + stride) & (_max-1); // Stride through table w/ relative prime } _table[key] = n; // Insert into table! - debug_only(n->enter_hash_lock()); // Lock down the node while in the table. + DEBUG_ONLY(n->enter_hash_lock()); // Lock down the node while in the table. // if( conflict ) { n->dump(); } } @@ -203,9 +203,9 @@ bool NodeHash::hash_delete( const Node *n ) { } uint key = hash & (_max-1); uint stride = key | 0x01; - debug_only( uint counter = 0; ); + DEBUG_ONLY( uint counter = 0; ); for( ; /* (k != nullptr) && (k != _sentinel) */; ) { - debug_only( counter++ ); + DEBUG_ONLY( counter++ ); NOT_PRODUCT( _delete_probes++ ); k = _table[key]; // Get hashed value if( !k ) { // Miss? @@ -215,7 +215,7 @@ bool NodeHash::hash_delete( const Node *n ) { else if( n == k ) { NOT_PRODUCT( _delete_hits++ ); _table[key] = _sentinel; // Hit! Label as deleted entry - debug_only(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table. + DEBUG_ONLY(((Node*)n)->exit_hash_lock()); // Unlock the node upon removal from table. return true; } else { @@ -257,7 +257,7 @@ void NodeHash::grow() { for( uint i = 0; i < old_max; i++ ) { Node *m = *old_table++; if( !m || m == _sentinel ) continue; - debug_only(m->exit_hash_lock()); // Unlock the node upon removal from old table. + DEBUG_ONLY(m->exit_hash_lock()); // Unlock the node upon removal from old table. hash_insert(m); } } @@ -289,7 +289,7 @@ void NodeHash::remove_useless_nodes(VectorSet &useful) { for( uint i = 0; i < max; ++i ) { Node *n = at(i); if(n != nullptr && n != sentinel_node && !useful.test(n->_idx)) { - debug_only(n->exit_hash_lock()); // Unlock the node when removed + DEBUG_ONLY(n->exit_hash_lock()); // Unlock the node when removed _table[i] = sentinel_node; // Replace with placeholder } } diff --git a/src/hotspot/share/opto/regalloc.hpp b/src/hotspot/share/opto/regalloc.hpp index 86877e18325..c724406a168 100644 --- a/src/hotspot/share/opto/regalloc.hpp +++ b/src/hotspot/share/opto/regalloc.hpp @@ -60,12 +60,12 @@ public: // Get the register associated with the Node OptoReg::Name get_reg_first( const Node *n ) const { - debug_only( if( n->_idx >= _node_regs_max_index ) n->dump(); ); + DEBUG_ONLY( if( n->_idx >= _node_regs_max_index ) n->dump(); ); assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array"); return _node_regs[n->_idx].first(); } OptoReg::Name get_reg_second( const Node *n ) const { - debug_only( if( n->_idx >= _node_regs_max_index ) n->dump(); ); + DEBUG_ONLY( if( n->_idx >= _node_regs_max_index ) n->dump(); ); assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array"); return _node_regs[n->_idx].second(); } diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 6d24fb26cd2..fcb0ac38ace 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -1944,7 +1944,7 @@ address OptoRuntime::handle_exception_C(JavaThread* current) { #ifndef PRODUCT SharedRuntime::_find_handler_ctr++; // find exception handler #endif - debug_only(NoHandleMark __hm;) + DEBUG_ONLY(NoHandleMark __hm;) nmethod* nm = nullptr; address handler_address = nullptr; { diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 4556555acea..163f94ee959 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -756,7 +756,7 @@ void Type::Initialize(Compile* current) { // delete the current Type and return the existing Type. Otherwise stick the // current Type in the Type table. const Type *Type::hashcons(void) { - debug_only(base()); // Check the assertion in Type::base(). + DEBUG_ONLY(base()); // Check the assertion in Type::base(). // Look up the Type in the Type dictionary Dict *tdic = type_dict(); Type* old = (Type*)(tdic->Insert(this, this, false)); diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index d2916fad185..430d6997e77 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -215,7 +215,7 @@ intptr_t jfieldIDWorkaround::encode_klass_hash(Klass* k, int offset) { field_klass = super_klass; // super contains the field also super_klass = field_klass->super(); } - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) uintptr_t klass_hash = field_klass->identity_hash(); return ((klass_hash & klass_mask) << klass_shift) | checked_mask_in_place; } else { @@ -235,7 +235,7 @@ bool jfieldIDWorkaround::klass_hash_ok(Klass* k, jfieldID id) { uintptr_t as_uint = (uintptr_t) id; intptr_t klass_hash = (as_uint >> klass_shift) & klass_mask; do { - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) // Could use a non-blocking query for identity_hash here... if ((k->identity_hash() & klass_mask) == klass_hash) return true; @@ -410,7 +410,7 @@ JNI_ENTRY(jfieldID, jni_FromReflectedField(JNIEnv *env, jobject field)) int offset = InstanceKlass::cast(k1)->field_offset( slot ); JNIid* id = InstanceKlass::cast(k1)->jni_id_for(offset); assert(id != nullptr, "corrupt Field object"); - debug_only(id->set_is_static_field_id();) + DEBUG_ONLY(id->set_is_static_field_id();) // A jfieldID for a static field is a JNIid specifying the field holder and the offset within the Klass* ret = jfieldIDWorkaround::to_static_jfieldID(id); return ret; @@ -472,7 +472,7 @@ JNI_ENTRY(jclass, jni_GetSuperclass(JNIEnv *env, jclass sub)) // return mirror for superclass Klass* super = k->java_super(); // super2 is the value computed by the compiler's getSuperClass intrinsic: - debug_only(Klass* super2 = ( k->is_array_klass() + DEBUG_ONLY(Klass* super2 = ( k->is_array_klass() ? vmClasses::Object_klass() : k->super() ) ); assert(super == super2, @@ -906,7 +906,7 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive selected_method = m; } else if (!m->has_itable_index()) { // non-interface call -- for that little speed boost, don't handlize - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) // jni_GetMethodID makes sure class is linked and initialized // so m should have a valid vtable index. assert(m->valid_vtable_index(), "no valid vtable index"); @@ -1995,9 +1995,9 @@ JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz, // A jfieldID for a static field is a JNIid specifying the field holder and the offset within the Klass* JNIid* id = fd.field_holder()->jni_id_for(fd.offset()); - debug_only(id->set_is_static_field_id();) + DEBUG_ONLY(id->set_is_static_field_id();) - debug_only(id->verify(fd.field_holder())); + DEBUG_ONLY(id->verify(fd.field_holder())); ret = jfieldIDWorkaround::to_static_jfieldID(id); return ret; diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp index aa158490eab..14d9c36c9fd 100644 --- a/src/hotspot/share/prims/jniCheck.cpp +++ b/src/hotspot/share/prims/jniCheck.cpp @@ -2320,7 +2320,7 @@ struct JNINativeInterface_* jni_functions_check() { // make sure the last pointer in the checked table is not null, indicating // an addition to the JNINativeInterface_ structure without initializing // it in the checked table. - debug_only(intptr_t *lastPtr = (intptr_t *)((char *)&checked_jni_NativeInterface + \ + DEBUG_ONLY(intptr_t *lastPtr = (intptr_t *)((char *)&checked_jni_NativeInterface + \ sizeof(*unchecked_jni_NativeInterface) - sizeof(char *));) assert(*lastPtr != 0, "Mismatched JNINativeInterface tables, check for new entries"); diff --git a/src/hotspot/share/prims/jvmtiEnter.xsl b/src/hotspot/share/prims/jvmtiEnter.xsl index bbca1ccc6e1..d1274158be4 100644 --- a/src/hotspot/share/prims/jvmtiEnter.xsl +++ b/src/hotspot/share/prims/jvmtiEnter.xsl @@ -444,7 +444,7 @@ struct jvmtiInterface_1_ jvmti , current_thread) - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) PreserveExceptionMark __em(this_thread); diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index 7a0c9a428c5..b2af12b08a6 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -199,7 +199,7 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) { MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative __tiv(current_thread); VM_ENTRY_BASE(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread) - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) JvmtiVTMSTransitionDisabler disabler(thread); ThreadsListHandle tlh(current_thread); diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index 32dd6fc13e7..13822f73f77 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -384,7 +384,7 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) { MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread)); ThreadInVMfromNative __tiv(current_thread); VM_ENTRY_BASE(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread) - debug_only(VMNativeEntryWrapper __vew;) + DEBUG_ONLY(VMNativeEntryWrapper __vew;) JvmtiEnv *jvmti_env = JvmtiEnv::create_a_jvmti(version); *penv = jvmti_env->jvmti_external(); // actual type is jvmtiEnv* -- not to be confused with JvmtiEnv* diff --git a/src/hotspot/share/prims/jvmtiThreadState.cpp b/src/hotspot/share/prims/jvmtiThreadState.cpp index e9d496fc696..b4119825bd3 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.cpp +++ b/src/hotspot/share/prims/jvmtiThreadState.cpp @@ -102,7 +102,7 @@ JvmtiThreadState::JvmtiThreadState(JavaThread* thread, oop thread_oop) { // The thread state list manipulation code must not have safepoints. // See periodic_clean_up(). - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) _prev = nullptr; _next = _head; @@ -154,7 +154,7 @@ JvmtiThreadState::~JvmtiThreadState() { { // The thread state list manipulation code must not have safepoints. // See periodic_clean_up(). - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) if (_prev == nullptr) { assert(_head == this, "sanity check"); @@ -759,7 +759,7 @@ void JvmtiThreadState::add_env(JvmtiEnvBase *env) { // add this environment thread state to the end of the list (order is important) { // list deallocation (which occurs at a safepoint) cannot occur simultaneously - debug_only(NoSafepointVerifier nosafepoint;) + DEBUG_ONLY(NoSafepointVerifier nosafepoint;) JvmtiEnvThreadStateIterator it(this); JvmtiEnvThreadState* previous_ets = nullptr; diff --git a/src/hotspot/share/prims/perf.cpp b/src/hotspot/share/prims/perf.cpp index f5f91e4614f..0b051bbc60f 100644 --- a/src/hotspot/share/prims/perf.cpp +++ b/src/hotspot/share/prims/perf.cpp @@ -111,7 +111,7 @@ PERF_ENTRY(jobject, Perf_CreateLong(JNIEnv *env, jobject perf, jstring name, char* name_utf = nullptr; if (units <= 0 || units > PerfData::U_Last) { - debug_only(warning("unexpected units argument, units = %d", units)); + DEBUG_ONLY(warning("unexpected units argument, units = %d", units)); THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); } @@ -150,7 +150,7 @@ PERF_ENTRY(jobject, Perf_CreateLong(JNIEnv *env, jobject perf, jstring name, break; default: /* Illegal Argument */ - debug_only(warning("unexpected variability value: %d", variability)); + DEBUG_ONLY(warning("unexpected variability value: %d", variability)); THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); break; } @@ -179,14 +179,14 @@ PERF_ENTRY(jobject, Perf_CreateByteArray(JNIEnv *env, jobject perf, // check for valid variability classification if (variability != PerfData::V_Constant && variability != PerfData::V_Variable) { - debug_only(warning("unexpected variability value: %d", variability)); + DEBUG_ONLY(warning("unexpected variability value: %d", variability)); THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); } // check for valid units if (units != PerfData::U_String) { // only String based ByteArray objects are currently supported - debug_only(warning("unexpected units value: %d", variability)); + DEBUG_ONLY(warning("unexpected units value: %d", variability)); THROW_NULL(vmSymbols::java_lang_IllegalArgumentException()); } diff --git a/src/hotspot/share/prims/upcallLinker.cpp b/src/hotspot/share/prims/upcallLinker.cpp index 911f0f3ad2c..bc6a56dab05 100644 --- a/src/hotspot/share/prims/upcallLinker.cpp +++ b/src/hotspot/share/prims/upcallLinker.cpp @@ -104,7 +104,7 @@ JavaThread* UpcallLinker::on_entry(UpcallStub::FrameData* context) { context->jfa.copy(thread->frame_anchor()); thread->frame_anchor()->clear(); - debug_only(thread->inc_java_call_counter()); + DEBUG_ONLY(thread->inc_java_call_counter()); thread->set_active_handles(context->new_handles); // install new handle block and reset Java frame linkage return thread; @@ -118,7 +118,7 @@ void UpcallLinker::on_exit(UpcallStub::FrameData* context) { // restore previous handle block thread->set_active_handles(context->old_handles); - debug_only(thread->dec_java_call_counter()); + DEBUG_ONLY(thread->dec_java_call_counter()); thread->frame_anchor()->copy(&context->jfa); diff --git a/src/hotspot/share/runtime/handles.cpp b/src/hotspot/share/runtime/handles.cpp index f747b4dc76e..cd3bd3eb3e0 100644 --- a/src/hotspot/share/runtime/handles.cpp +++ b/src/hotspot/share/runtime/handles.cpp @@ -133,7 +133,7 @@ void HandleMark::initialize(Thread* thread) { _hwm = _area->_hwm; _max = _area->_max; _size_in_bytes = _area->_size_in_bytes; - debug_only(_area->_handle_mark_nesting++); + DEBUG_ONLY(_area->_handle_mark_nesting++); assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks"); // Link this in the thread diff --git a/src/hotspot/share/runtime/handles.hpp b/src/hotspot/share/runtime/handles.hpp index 8ed16d33a2f..d2020e34121 100644 --- a/src/hotspot/share/runtime/handles.hpp +++ b/src/hotspot/share/runtime/handles.hpp @@ -188,8 +188,8 @@ class HandleArea: public Arena { public: // Constructor HandleArea(MemTag mem_tag, HandleArea* prev) : Arena(mem_tag, Tag::tag_ha, Chunk::tiny_size) { - debug_only(_handle_mark_nesting = 0); - debug_only(_no_handle_mark_nesting = 0); + DEBUG_ONLY(_handle_mark_nesting = 0); + DEBUG_ONLY(_no_handle_mark_nesting = 0); _prev = prev; } @@ -212,7 +212,7 @@ class HandleArea: public Arena { // Garbage collection support void oops_do(OopClosure* f); - debug_only(bool no_handle_mark_active() { return _no_handle_mark_nesting > 0; }) + DEBUG_ONLY(bool no_handle_mark_active() { return _no_handle_mark_nesting > 0; }) }; diff --git a/src/hotspot/share/runtime/handles.inline.hpp b/src/hotspot/share/runtime/handles.inline.hpp index 669a940eca9..4d3dd527c0d 100644 --- a/src/hotspot/share/runtime/handles.inline.hpp +++ b/src/hotspot/share/runtime/handles.inline.hpp @@ -80,7 +80,7 @@ inline void HandleMark::push() { // This is intentionally a NOP. pop_and_restore will reset // values to the HandleMark further down the stack, typically // in JavaCalls::call_helper. - debug_only(_area->_handle_mark_nesting++); + DEBUG_ONLY(_area->_handle_mark_nesting++); } inline void HandleMark::pop_and_restore() { @@ -95,7 +95,7 @@ inline void HandleMark::pop_and_restore() { _area->_chunk = _chunk; _area->_hwm = _hwm; _area->_max = _max; - debug_only(_area->_handle_mark_nesting--); + DEBUG_ONLY(_area->_handle_mark_nesting--); } inline HandleMarkCleaner::HandleMarkCleaner(Thread* thread) { diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp index 403ff1d9ea2..c52c2664faa 100644 --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp @@ -259,12 +259,12 @@ class VMNativeEntryWrapper { // in the codecache. #define VM_LEAF_BASE(result_type, header) \ - debug_only(NoHandleMark __hm;) \ + DEBUG_ONLY(NoHandleMark __hm;) \ os::verify_stack_alignment(); \ /* begin of body */ #define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \ - debug_only(ResetNoHandleMark __rnhm;) \ + DEBUG_ONLY(ResetNoHandleMark __rnhm;) \ HandleMarkCleaner __hm(thread); \ JavaThread* THREAD = thread; /* For exception macros. */ \ os::verify_stack_alignment(); \ @@ -286,7 +286,7 @@ class VMNativeEntryWrapper { MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ ThreadInVMfromJava __tiv(current); \ VM_ENTRY_BASE(result_type, header, current) \ - debug_only(VMEntryWrapper __vew;) + DEBUG_ONLY(VMEntryWrapper __vew;) // JRT_LEAF currently can be called from either _thread_in_Java or // _thread_in_native mode. @@ -305,7 +305,7 @@ class VMNativeEntryWrapper { #define JRT_LEAF(result_type, header) \ result_type header { \ VM_LEAF_BASE(result_type, header) \ - debug_only(NoSafepointVerifier __nsv;) + DEBUG_ONLY(NoSafepointVerifier __nsv;) #define JRT_ENTRY_NO_ASYNC(result_type, header) \ @@ -314,7 +314,7 @@ class VMNativeEntryWrapper { MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current)); \ ThreadInVMfromJava __tiv(current, false /* check asyncs */); \ VM_ENTRY_BASE(result_type, header, current) \ - debug_only(VMEntryWrapper __vew;) + DEBUG_ONLY(VMEntryWrapper __vew;) // Same as JRT Entry but allows for return value after the safepoint // to get back into Java from the VM @@ -329,14 +329,14 @@ class VMNativeEntryWrapper { assert(current == JavaThread::current(), "Must be"); \ ThreadInVMfromJava __tiv(current); \ JavaThread* THREAD = current; /* For exception macros. */ \ - debug_only(VMEntryWrapper __vew;) + DEBUG_ONLY(VMEntryWrapper __vew;) #define JRT_BLOCK_NO_ASYNC \ { \ assert(current == JavaThread::current(), "Must be"); \ ThreadInVMfromJava __tiv(current, false /* check asyncs */); \ JavaThread* THREAD = current; /* For exception macros. */ \ - debug_only(VMEntryWrapper __vew;) + DEBUG_ONLY(VMEntryWrapper __vew;) #define JRT_BLOCK_END } @@ -360,7 +360,7 @@ extern "C" { \ assert(thread == Thread::current(), "JNIEnv is only valid in same thread"); \ MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ - debug_only(VMNativeEntryWrapper __vew;) \ + DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -385,7 +385,7 @@ extern "C" { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ - debug_only(VMNativeEntryWrapper __vew;) \ + DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -395,7 +395,7 @@ extern "C" { \ JavaThread* thread = JavaThread::current(); \ MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, thread)); \ ThreadInVMfromNative __tiv(thread); \ - debug_only(VMNativeEntryWrapper __vew;) \ + DEBUG_ONLY(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 012cea19bb0..04bd0871073 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -91,7 +91,7 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei _anchor.copy(_thread->frame_anchor()); _thread->frame_anchor()->clear(); - debug_only(_thread->inc_java_call_counter()); + DEBUG_ONLY(_thread->inc_java_call_counter()); _thread->set_active_handles(new_handles); // install new handle block and reset Java frame linkage MACOS_AARCH64_ONLY(_thread->enable_wx(WXExec)); @@ -109,7 +109,7 @@ JavaCallWrapper::~JavaCallWrapper() { _thread->frame_anchor()->zap(); - debug_only(_thread->dec_java_call_counter()); + DEBUG_ONLY(_thread->dec_java_call_counter()); // Old thread-local info. has been restored. We are not back in the VM. ThreadStateTransition::transition_from_java(_thread, _thread_in_vm); diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index e4f377a1217..8a6da52d762 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -553,7 +553,7 @@ void JavaThread::interrupt() { // All callers should have 'this' thread protected by a // ThreadsListHandle so that it cannot terminate and deallocate // itself. - debug_only(check_for_dangling_thread_pointer(this);) + DEBUG_ONLY(check_for_dangling_thread_pointer(this);) // For Windows _interrupt_event WINDOWS_ONLY(osthread()->set_interrupted(true);) @@ -569,7 +569,7 @@ void JavaThread::interrupt() { } bool JavaThread::is_interrupted(bool clear_interrupted) { - debug_only(check_for_dangling_thread_pointer(this);) + DEBUG_ONLY(check_for_dangling_thread_pointer(this);) if (_threadObj.peek() == nullptr) { // If there is no j.l.Thread then it is impossible to have diff --git a/src/hotspot/share/runtime/jfieldIDWorkaround.hpp b/src/hotspot/share/runtime/jfieldIDWorkaround.hpp index e44fb064813..68db2e36d45 100644 --- a/src/hotspot/share/runtime/jfieldIDWorkaround.hpp +++ b/src/hotspot/share/runtime/jfieldIDWorkaround.hpp @@ -157,7 +157,7 @@ class jfieldIDWorkaround: AllStatic { static jfieldID to_jfieldID(InstanceKlass* k, int offset, bool is_static) { if (is_static) { JNIid *id = k->jni_id_for(offset); - debug_only(id->set_is_static_field_id()); + DEBUG_ONLY(id->set_is_static_field_id()); return jfieldIDWorkaround::to_static_jfieldID(id); } else { return jfieldIDWorkaround::to_instance_jfieldID(k, offset); diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp index b4fc32947dd..e7564467a81 100644 --- a/src/hotspot/share/runtime/jniHandles.cpp +++ b/src/hotspot/share/runtime/jniHandles.cpp @@ -342,9 +342,9 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType block->_next = nullptr; block->_pop_frame_link = nullptr; // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle - debug_only(block->_last = nullptr); - debug_only(block->_free_list = nullptr); - debug_only(block->_allocate_before_rebuild = -1); + DEBUG_ONLY(block->_last = nullptr); + DEBUG_ONLY(block->_free_list = nullptr); + DEBUG_ONLY(block->_allocate_before_rebuild = -1); return block; } diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index a58be403827..496ad372c38 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -243,7 +243,7 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b } OSReturn os::set_priority(Thread* thread, ThreadPriority p) { - debug_only(Thread::check_for_dangling_thread_pointer(thread);) + DEBUG_ONLY(Thread::check_for_dangling_thread_pointer(thread);) if ((p >= MinPriority && p <= MaxPriority) || (p == CriticalPriority && thread->is_ConcurrentGC_thread())) { diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index a42576b562d..183ecea03ca 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -92,7 +92,7 @@ Thread::Thread(MemTag mem_tag) { new HandleMark(this); // plain initialization - debug_only(_owned_locks = nullptr;) + DEBUG_ONLY(_owned_locks = nullptr;) NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); @@ -379,7 +379,7 @@ bool Thread::is_JavaThread_protected_by_TLH(const JavaThread* target) { } void Thread::set_priority(Thread* thread, ThreadPriority priority) { - debug_only(check_for_dangling_thread_pointer(thread);) + DEBUG_ONLY(check_for_dangling_thread_pointer(thread);) // Can return an error! (void)os::set_priority(thread, priority); } @@ -488,7 +488,7 @@ void Thread::print_on(outputStream* st, bool print_extended_info) const { } ThreadsSMRSupport::print_info_on(this, st); st->print(" "); - debug_only(if (WizardMode) print_owned_locks_on(st);) + DEBUG_ONLY(if (WizardMode) print_owned_locks_on(st);) } void Thread::print() const { print_on(tty); } diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index 7a2c8d52969..a042a390925 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -455,7 +455,7 @@ class AbstractDumpWriter : public CHeapObj { void AbstractDumpWriter::write_fast(const void* s, size_t len) { assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large"); assert(buffer_size() - position() >= len, "Must fit"); - debug_only(_sub_record_left -= len); + DEBUG_ONLY(_sub_record_left -= len); memcpy(buffer() + position(), s, len); set_position(position() + len); } @@ -467,7 +467,7 @@ bool AbstractDumpWriter::can_write_fast(size_t len) { // write raw bytes void AbstractDumpWriter::write_raw(const void* s, size_t len) { assert(!_in_dump_segment || (_sub_record_left >= len), "sub-record too large"); - debug_only(_sub_record_left -= len); + DEBUG_ONLY(_sub_record_left -= len); // flush buffer to make room. while (len > buffer_size() - position()) { @@ -591,8 +591,8 @@ void AbstractDumpWriter::start_sub_record(u1 tag, u4 len) { return; } - debug_only(_sub_record_left = len); - debug_only(_sub_record_ended = false); + DEBUG_ONLY(_sub_record_left = len); + DEBUG_ONLY(_sub_record_ended = false); write_u1(tag); } @@ -601,7 +601,7 @@ void AbstractDumpWriter::end_sub_record() { assert(_in_dump_segment, "must be in dump segment"); assert(_sub_record_left == 0, "sub-record not written completely"); assert(!_sub_record_ended, "Must not have ended yet"); - debug_only(_sub_record_ended = true); + DEBUG_ONLY(_sub_record_ended = true); } // Supports I/O operations for a dump diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index 89ee69242c9..d320e17fafb 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -228,7 +228,7 @@ void ThreadService::current_thread_exiting(JavaThread* jt, bool daemon) { // FIXME: JVMTI should call this function Handle ThreadService::get_current_contended_monitor(JavaThread* thread) { assert(thread != nullptr, "should be non-null"); - debug_only(Thread::check_for_dangling_thread_pointer(thread);) + DEBUG_ONLY(Thread::check_for_dangling_thread_pointer(thread);) // This function can be called on a target JavaThread that is not // the caller and we are not at a safepoint. So it is possible for diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index 31e797fc192..86b7ed5f917 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -622,7 +622,7 @@ class GrowableArrayMetadata { uintptr_t _bits; // resource area nesting at creation - debug_only(GrowableArrayNestingCheck _nesting_check;) + DEBUG_ONLY(GrowableArrayNestingCheck _nesting_check;) // Resource allocation static uintptr_t bits() { @@ -645,19 +645,19 @@ public: // Resource allocation GrowableArrayMetadata() : _bits(bits()) - debug_only(COMMA _nesting_check(true)) { + DEBUG_ONLY(COMMA _nesting_check(true)) { } // Arena allocation GrowableArrayMetadata(Arena* arena) : _bits(bits(arena)) - debug_only(COMMA _nesting_check(arena)) { + DEBUG_ONLY(COMMA _nesting_check(arena)) { } // CHeap allocation GrowableArrayMetadata(MemTag mem_tag) : _bits(bits(mem_tag)) - debug_only(COMMA _nesting_check(false)) { + DEBUG_ONLY(COMMA _nesting_check(false)) { } #ifdef ASSERT @@ -725,7 +725,7 @@ class GrowableArray : public GrowableArrayWithAllocator> { GrowableArrayMetadata _metadata; - void init_checks() const { debug_only(_metadata.init_checks(this);) } + void init_checks() const { DEBUG_ONLY(_metadata.init_checks(this);) } // Where are we going to allocate memory? bool on_C_heap() const { return _metadata.on_C_heap(); } @@ -734,7 +734,7 @@ class GrowableArray : public GrowableArrayWithAllocator> { E* allocate() { if (on_resource_area()) { - debug_only(_metadata.on_resource_area_alloc_check()); + DEBUG_ONLY(_metadata.on_resource_area_alloc_check()); return allocate(this->_capacity); } @@ -743,7 +743,7 @@ class GrowableArray : public GrowableArrayWithAllocator> { } assert(on_arena(), "Sanity"); - debug_only(_metadata.on_arena_alloc_check()); + DEBUG_ONLY(_metadata.on_arena_alloc_check()); return allocate(this->_capacity, _metadata.arena()); } diff --git a/src/hotspot/share/utilities/macros.hpp b/src/hotspot/share/utilities/macros.hpp index 5c3cfaa0dd5..a5caf316aa3 100644 --- a/src/hotspot/share/utilities/macros.hpp +++ b/src/hotspot/share/utilities/macros.hpp @@ -377,13 +377,10 @@ #define DEBUG_ONLY(code) code #define NOT_DEBUG(code) #define NOT_DEBUG_RETURN /*next token must be ;*/ -// Historical. -#define debug_only(code) code #else // ASSERT #define DEBUG_ONLY(code) #define NOT_DEBUG(code) code #define NOT_DEBUG_RETURN {} -#define debug_only(code) #endif // ASSERT #ifdef _LP64