diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 607912e6e494f..6f8795494a2bb 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -1704,3 +1704,14 @@ void InterpreterMacroAssembler::load_method_entry(Register cache, Register index add(cache, cache, Array::base_offset_in_bytes()); lea(cache, Address(cache, index)); } + +#ifdef ASSERT +void InterpreterMacroAssembler::verify_field_offset(Register reg) { + // Verify the field offset is not in the header, implicitly checks for 0 + Label L; + subs(zr, reg, oopDesc::base_offset_in_bytes()); + br(Assembler::GE, L); + stop("bad field offset"); + bind(L); +} +#endif diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index e896a2a943041..e07e6e49f535d 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -319,6 +319,8 @@ class InterpreterMacroAssembler: public MacroAssembler { void load_resolved_indy_entry(Register cache, Register index); void load_field_entry(Register cache, Register index, int bcp_offset = 1); void load_method_entry(Register cache, Register index, int bcp_offset = 1); + + void verify_field_offset(Register reg) NOT_DEBUG_RETURN; }; #endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index 5195432f54ea2..f4774f31bbd42 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -168,6 +168,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no) { + assert_different_registers(bc_reg, temp_reg); if (!RewriteBytecodes) return; Label L_patch_done; @@ -231,9 +232,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, __ stop("patching the wrong bytecode"); __ bind(L_okay); #endif - - // patch bytecode - __ strb(bc_reg, at_bcp(0)); + // Patch bytecode with release store to coordinate with ResolvedFieldEntry loads + // in fast bytecode codelets. load_field_entry has a memory barrier that gains + // the needed ordering, together with control dependency on entering the fast codelet + // itself. + __ lea(temp_reg, at_bcp(0)); + __ stlrb(bc_reg, temp_reg); __ bind(L_patch_done); } @@ -3094,6 +3098,7 @@ void TemplateTable::fast_storefield(TosState state) // R1: field offset, R2: field holder, R5: flags load_resolved_field_entry(r2, r2, noreg, r1, r5); + __ verify_field_offset(r1); { Label notVolatile; @@ -3183,6 +3188,8 @@ void TemplateTable::fast_accessfield(TosState state) __ load_field_entry(r2, r1); __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + __ verify_field_offset(r1); + __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); // r0: object @@ -3249,7 +3256,9 @@ void TemplateTable::fast_xaccess(TosState state) __ ldr(r0, aaddress(0)); // access constant pool cache __ load_field_entry(r2, r3, 2); + __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + __ verify_field_offset(r1); // 8179954: We need to make sure that the code generated for // volatile accesses forms a sequentially-consistent set of