8369506: Bytecode rewriting causes Java heap corruption on AArch64

Co-authored-by: Man Cao <manc@openjdk.org>
Co-authored-by: Chuck Rasbold <rasbold@openjdk.org>
Reviewed-by: shade, aph, manc
This commit is contained in:
Justin King 2025-10-16 19:59:13 +00:00
parent 1392a0b460
commit 18fd047702
3 changed files with 25 additions and 3 deletions

View File

@ -1704,3 +1704,14 @@ void InterpreterMacroAssembler::load_method_entry(Register cache, Register index
add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
lea(cache, Address(cache, index));
}
#ifdef ASSERT
void InterpreterMacroAssembler::verify_field_offset(Register reg) {
// Verify the field offset is not in the header, implicitly checks for 0
Label L;
subs(zr, reg, oopDesc::base_offset_in_bytes());
br(Assembler::GE, L);
stop("bad field offset");
bind(L);
}
#endif

View File

@ -319,6 +319,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
void verify_field_offset(Register reg) NOT_DEBUG_RETURN;
};
#endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP

View File

@ -168,6 +168,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
int byte_no)
{
assert_different_registers(bc_reg, temp_reg);
if (!RewriteBytecodes) return;
Label L_patch_done;
@ -231,9 +232,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
__ stop("patching the wrong bytecode");
__ bind(L_okay);
#endif
// patch bytecode
__ strb(bc_reg, at_bcp(0));
// Patch bytecode with release store to coordinate with ResolvedFieldEntry loads
// in fast bytecode codelets. load_field_entry has a memory barrier that gains
// the needed ordering, together with control dependency on entering the fast codelet
// itself.
__ lea(temp_reg, at_bcp(0));
__ stlrb(bc_reg, temp_reg);
__ bind(L_patch_done);
}
@ -3094,6 +3098,7 @@ void TemplateTable::fast_storefield(TosState state)
// R1: field offset, R2: field holder, R5: flags
load_resolved_field_entry(r2, r2, noreg, r1, r5);
__ verify_field_offset(r1);
{
Label notVolatile;
@ -3183,6 +3188,8 @@ void TemplateTable::fast_accessfield(TosState state)
__ load_field_entry(r2, r1);
__ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
__ verify_field_offset(r1);
__ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
// r0: object
@ -3249,7 +3256,9 @@ void TemplateTable::fast_xaccess(TosState state)
__ ldr(r0, aaddress(0));
// access constant pool cache
__ load_field_entry(r2, r3, 2);
__ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
__ verify_field_offset(r1);
// 8179954: We need to make sure that the code generated for
// volatile accesses forms a sequentially-consistent set of