diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index f14dda0f812..a277a689280 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -5349,7 +5349,7 @@ void MacroAssembler::encode_klass_not_null_for_aot(Register dst, Register src) { // we have to load the klass base from the AOT constants area but // not the shift because it is not allowed to change int shift = CompressedKlassPointers::shift(); - assert(shift >= 0 && shift < 4, "unexpected compressd klass shift!"); + assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!"); if (dst != src) { // we can load the base into dst, subtract it formthe src and shift down lea(dst, ExternalAddress(CompressedKlassPointers::base_addr())); @@ -5415,7 +5415,7 @@ void MacroAssembler::decode_klass_not_null_for_aot(Register dst, Register src) { // we have to load the klass base from the AOT constants area but // not the shift because it is not allowed to change int shift = CompressedKlassPointers::shift(); - assert(shift >= 0 && shift < 4, "unexpected compressd klass shift!"); + assert(shift >= 0 && shift <= CompressedKlassPointers::max_shift(), "unexpected compressed klass shift!"); if (dst != src) { // we can load the base into dst then add the offset with a suitable shift lea(dst, ExternalAddress(CompressedKlassPointers::base_addr())); diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp index 11ea02621d7..714904ab3df 100644 --- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp @@ -25,6 +25,7 @@ #include "asm/assembler.inline.hpp" #include "asm/macroAssembler.inline.hpp" +#include "code/aotCodeCache.hpp" #include "code/compiledIC.hpp" #include "code/vtableStubs.hpp" #include "interp_masm_aarch64.hpp" @@ -196,7 +197,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { temp_reg, temp_reg2, itable_index, L_no_such_interface); // Reduce "estimate" such that "padding" does not drop below 8. - const ptrdiff_t estimate = 144; + const ptrdiff_t estimate = AOTCodeCache::is_on_for_dump() ? 148 : 144; const ptrdiff_t codesize = __ pc() - start_pc; slop_delta = (int)(estimate - codesize); slop_bytes += slop_delta; diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp index 81fa83dbe8d..8fc22596d01 100644 --- a/src/hotspot/share/code/relocInfo.cpp +++ b/src/hotspot/share/code/relocInfo.cpp @@ -187,14 +187,17 @@ RelocIterator::RelocIterator(CodeBlob* cb) { _code = nullptr; } _current = cb->relocation_begin() - 1; - _end = cb->relocation_end(); - _addr = cb->content_begin(); + _end = cb->relocation_end(); + _addr = cb->content_begin(); _section_start[CodeBuffer::SECT_CONSTS] = cb->content_begin(); - _section_start[CodeBuffer::SECT_INSTS] = cb->code_begin(); + _section_start[CodeBuffer::SECT_INSTS ] = cb->code_begin(); + _section_start[CodeBuffer::SECT_STUBS ] = cb->code_end(); + + _section_end [CodeBuffer::SECT_CONSTS] = cb->code_begin(); + _section_end [CodeBuffer::SECT_INSTS ] = cb->code_end(); + _section_end [CodeBuffer::SECT_STUBS ] = cb->code_end(); - _section_end[CodeBuffer::SECT_CONSTS] = cb->code_begin(); - _section_start[CodeBuffer::SECT_INSTS] = cb->code_end(); assert(!has_current(), "just checking"); set_limits(nullptr, nullptr); } diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeCompressedOopsTest.java b/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeCompressedOopsTest.java index 524b613b5c3..267b9a267b2 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeCompressedOopsTest.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeCompressedOopsTest.java @@ -26,7 +26,10 @@ * @test * @summary Sanity test of AOT Code Cache with compressed oops configurations * @requires vm.cds.supports.aot.code.caching - * @requires vm.flagless + * @requires vm.compMode != "Xcomp" + * @comment The test verifies AOT checks during VM startup and not code generation. + * No need to run it with -Xcomp. It takes a lot of time to complete all + * subtests with this flag. * @library /test/lib /test/setup_aot * @build AOTCodeCompressedOopsTest JavacBenchApp * @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar @@ -198,7 +201,7 @@ public class AOTCodeCompressedOopsTest { } if (aotCacheShift != currentShift) { out.shouldContain("AOT Code Cache disabled: it was created with different CompressedOops::shift()"); - } else if (aotCacheBase != currentBase) { + } else if ((aotCacheBase == 0 || currentBase == 0) && (aotCacheBase != currentBase)) { out.shouldContain("AOTStubCaching is disabled: incompatible CompressedOops::base()"); } else { out.shouldMatch("Read \\d+ entries table at offset \\d+ from AOT Code Cache"); diff --git a/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeFlags.java b/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeFlags.java index d47daaff68c..92e0808b4d1 100644 --- a/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeFlags.java +++ b/test/hotspot/jtreg/runtime/cds/appcds/aotCode/AOTCodeFlags.java @@ -26,7 +26,11 @@ * @test * @summary Sanity test of combinations of the AOT Code Caching diagnostic flags * @requires vm.cds.supports.aot.code.caching - * @requires vm.flagless + * @requires vm.compiler1.enabled & vm.compiler2.enabled + * @comment Both C1 and C2 JIT compilers are required because the test verifies + * compiler's runtime blobs generation. + * @requires vm.opt.VerifyOops == null | vm.opt.VerifyOops == false + * @comment VerifyOops flag switch off AOT code generation. Skip it. * @library /test/lib /test/setup_aot * @build AOTCodeFlags JavacBenchApp * @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar