8300255: Introduce interface for GC oop verification in the assembler

Co-authored-by: Martin Doerr <mdoerr@openjdk.org>
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Co-authored-by: Yadong Wang <yadongwang@openjdk.org>
Reviewed-by: fyang, aboldtch, coleenp
This commit is contained in:
Erik Österlund 2023-02-13 15:50:54 +00:00
parent 99b6c0eb48
commit f4d4fa500c
18 changed files with 124 additions and 68 deletions

View File

@ -299,3 +299,18 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
__ bind(method_live);
}
void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if the oop is in the right area of memory
__ mov(tmp2, (intptr_t) Universe::verify_oop_mask());
__ andr(tmp1, obj, tmp2);
__ mov(tmp2, (intptr_t) Universe::verify_oop_bits());
// Compare tmp1 and tmp2. We don't use a compare
// instruction here because the flags register is live.
__ eor(tmp1, tmp1, tmp2);
__ cbnz(tmp1, error);
// make sure klass is 'reasonable', which is not zero.
__ load_klass(obj, obj); // get klass
__ cbz(obj, error); // if klass is NULL it is broken
}

View File

@ -72,6 +72,8 @@ public:
virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
virtual bool supports_instruction_patching() {
NMethodPatchingType patching_type = nmethod_patching_type();
return patching_type == NMethodPatchingType::conc_instruction_and_data_patch ||

View File

@ -447,3 +447,17 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
#undef __
#endif // COMPILER2
#define __ masm->
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if mask is good.
// verifies that ZAddressBadMask & r0 == 0
__ ldr(tmp2, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
__ andr(tmp1, obj, tmp2);
__ cbnz(tmp1, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -97,6 +97,8 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP

View File

@ -568,29 +568,8 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ cbz(r0, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if mask is good.
// verifies that ZAddressBadMask & r0 == 0
__ ldr(c_rarg3, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
__ andr(c_rarg2, r0, c_rarg3);
__ cbnz(c_rarg2, error);
}
#endif
// Check if the oop is in the right area of memory
__ mov(c_rarg3, (intptr_t) Universe::verify_oop_mask());
__ andr(c_rarg2, r0, c_rarg3);
__ mov(c_rarg3, (intptr_t) Universe::verify_oop_bits());
// Compare c_rarg2 and c_rarg3. We don't use a compare
// instruction here because the flags register is live.
__ eor(c_rarg2, c_rarg2, c_rarg3);
__ cbnz(c_rarg2, error);
// make sure klass is 'reasonable', which is not zero.
__ load_klass(r0, r0); // get klass
__ cbz(r0, error); // if klass is NULL it is broken
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, r0, c_rarg2, c_rarg3, error);
// return if everything seems ok
__ bind(exit);

View File

@ -731,7 +731,10 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
__ verify_coop(from_reg->as_register(), FILE_AND_LINE);
} else {
__ std(from_reg->as_register(), offset, base);
__ verify_oop(from_reg->as_register(), FILE_AND_LINE);
if (VerifyOops) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->check_oop(_masm, from_reg->as_register(), FILE_AND_LINE); // kills R0
}
}
break;
}
@ -772,7 +775,10 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
__ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
} else {
__ stdx(from_reg->as_register(), base, disp);
__ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
if (VerifyOops) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->check_oop(_masm, from_reg->as_register(), FILE_AND_LINE); // kills R0
}
}
break;
}
@ -813,7 +819,10 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
} else {
__ ld(to_reg->as_register(), offset, base);
}
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
if (VerifyOops) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->check_oop(_masm, to_reg->as_register(), FILE_AND_LINE); // kills R0
}
break;
}
case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break;
@ -844,7 +853,10 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
} else {
__ ldx(to_reg->as_register(), base, disp);
}
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
if (VerifyOops) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->check_oop(_masm, to_reg->as_register(), FILE_AND_LINE); // kills R0
}
break;
}
case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break;

View File

@ -255,3 +255,7 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
__ block_comment("} c2i_entry_barrier (c2i_entry_barrier)");
}
void BarrierSetAssembler::check_oop(MacroAssembler *masm, Register oop, const char* msg) {
__ verify_oop(oop, msg);
}

View File

@ -69,6 +69,8 @@ public:
virtual void nmethod_entry_barrier(MacroAssembler* masm, Register tmp);
virtual void c2i_entry_barrier(MacroAssembler* masm, Register tmp1, Register tmp2, Register tmp3);
virtual void check_oop(MacroAssembler *masm, Register oop, const char* msg);
};
#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP

View File

@ -308,3 +308,17 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(method_live);
}
void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if the oop is in the right area of memory
__ mv(tmp2, (intptr_t) Universe::verify_oop_mask());
__ andr(tmp1, obj, tmp2);
__ mv(tmp2, (intptr_t) Universe::verify_oop_bits());
// Compare tmp1 and tmp2.
__ bne(tmp1, tmp2, error);
// Make sure klass is 'reasonable', which is not zero.
__ load_klass(obj, obj, tmp1); // get klass
__ beqz(obj, error); // if klass is NULL it is broken
}

View File

@ -74,6 +74,8 @@ public:
virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
virtual bool supports_instruction_patching() {
NMethodPatchingType patching_type = nmethod_patching_type();
return patching_type == NMethodPatchingType::conc_instruction_and_data_patch ||

View File

@ -444,3 +444,17 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
#undef __
#endif // COMPILER1
#define __ masm->
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if mask is good.
// verifies that ZAddressBadMask & obj == 0
__ ld(tmp2, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
__ andr(tmp1, obj, tmp2);
__ bnez(tmp1, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -99,6 +99,8 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_RISCV_GC_Z_ZBARRIERSETASSEMBLER_RISCV_HPP

View File

@ -614,27 +614,8 @@ class StubGenerator: public StubCodeGenerator {
// make sure object is 'reasonable'
__ beqz(x10, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if mask is good.
// verifies that ZAddressBadMask & x10 == 0
__ ld(c_rarg3, Address(xthread, ZThreadLocalData::address_bad_mask_offset()));
__ andr(c_rarg2, x10, c_rarg3);
__ bnez(c_rarg2, error);
}
#endif
// Check if the oop is in the right area of memory
__ mv(c_rarg3, (intptr_t) Universe::verify_oop_mask());
__ andr(c_rarg2, x10, c_rarg3);
__ mv(c_rarg3, (intptr_t) Universe::verify_oop_bits());
// Compare c_rarg2 and c_rarg3.
__ bne(c_rarg2, c_rarg3, error);
// make sure klass is 'reasonable', which is not zero.
__ load_klass(x10, x10); // get klass
__ beqz(x10, error); // if klass is NULL it is broken
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, x10, c_rarg2, c_rarg3, error);
// return if everything seems ok
__ bind(exit);

View File

@ -366,3 +366,18 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
__ pop(tmp1);
#endif
}
void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if the oop is in the right area of memory
__ movptr(tmp1, obj);
__ movptr(tmp2, (intptr_t) Universe::verify_oop_mask());
__ andptr(tmp1, tmp2);
__ movptr(tmp2, (intptr_t) Universe::verify_oop_bits());
__ cmpptr(tmp1, tmp2);
__ jcc(Assembler::notZero, error);
// make sure klass is 'reasonable', which is not zero.
__ load_klass(obj, obj, tmp1); // get klass
__ testptr(obj, obj);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
}

View File

@ -64,6 +64,8 @@ public:
virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_X86_GC_SHARED_BARRIERSETASSEMBLER_X86_HPP

View File

@ -29,6 +29,7 @@
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "gc/z/zThreadLocalData.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
@ -700,3 +701,14 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
#undef __
#endif // COMPILER2
#define __ masm->
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
// Check if metadata bits indicate a bad oop
__ testptr(obj, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, error);
BarrierSetAssembler::check_oop(masm, obj, tmp1, tmp2, error);
}
#undef __

View File

@ -96,6 +96,8 @@ public:
void generate_c2_load_barrier_stub(MacroAssembler* masm,
ZLoadBarrierStubC2* stub) const;
#endif // COMPILER2
void check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error);
};
#endif // CPU_X86_GC_Z_ZBARRIERSETASSEMBLER_X86_HPP

View File

@ -1093,26 +1093,8 @@ address StubGenerator::generate_verify_oop() {
__ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
#if INCLUDE_ZGC
if (UseZGC) {
// Check if metadata bits indicate a bad oop
__ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, error);
}
#endif
// Check if the oop is in the right area of memory
__ movptr(c_rarg2, rax);
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
__ andptr(c_rarg2, c_rarg3);
__ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
__ cmpptr(c_rarg2, c_rarg3);
__ jcc(Assembler::notZero, error);
// make sure klass is 'reasonable', which is not zero.
__ load_klass(rax, rax, rscratch1); // get klass
__ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
bs_asm->check_oop(_masm, rax, c_rarg2, c_rarg3, error);
// return if everything seems ok
__ bind(exit);