From b1fa1ecc988fb07f191892a459625c2c8f2de3b5 Mon Sep 17 00:00:00 2001 From: Andrew Dinn Date: Wed, 9 Jul 2025 08:48:07 +0000 Subject: [PATCH] 8360707: Globally enumerate all blobs, stubs and entries Reviewed-by: kvn, fyang, asmehra --- .../cpu/aarch64/c1_CodeStubs_aarch64.cpp | 44 +- .../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 34 +- .../cpu/aarch64/c1_LIRGenerator_aarch64.cpp | 8 +- .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 4 +- .../cpu/aarch64/c1_Runtime1_aarch64.cpp | 110 +- src/hotspot/cpu/aarch64/frame_aarch64.cpp | 4 +- src/hotspot/cpu/aarch64/runtime_aarch64.cpp | 12 +- .../cpu/aarch64/sharedRuntime_aarch64.cpp | 32 +- .../cpu/aarch64/stubGenerator_aarch64.cpp | 392 +++--- src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp | 50 +- src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp | 22 +- src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp | 6 +- src/hotspot/cpu/arm/c1_Runtime1_arm.cpp | 92 +- src/hotspot/cpu/arm/runtime_arm.cpp | 4 +- src/hotspot/cpu/arm/sharedRuntime_arm.cpp | 14 +- src/hotspot/cpu/arm/stubDeclarations_arm.hpp | 2 +- src/hotspot/cpu/arm/stubGenerator_arm.cpp | 136 +-- .../cpu/arm/stubRoutinesCrypto_arm.cpp | 10 +- src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp | 42 +- src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp | 20 +- src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp | 8 +- src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp | 6 +- src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp | 112 +- src/hotspot/cpu/ppc/runtime_ppc.cpp | 2 +- src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp | 18 +- src/hotspot/cpu/ppc/stubGenerator_ppc.cpp | 254 ++-- src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp | 44 +- .../riscv/c1_LIRAssembler_arraycopy_riscv.cpp | 2 +- .../cpu/riscv/c1_LIRAssembler_riscv.cpp | 24 +- .../cpu/riscv/c1_LIRGenerator_riscv.cpp | 8 +- .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 4 +- src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp | 110 +- src/hotspot/cpu/riscv/frame_riscv.cpp | 4 +- src/hotspot/cpu/riscv/runtime_riscv.cpp | 4 +- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 16 +- src/hotspot/cpu/riscv/stubGenerator_riscv.cpp | 294 ++--- src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp | 52 +- src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp | 16 +- src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp | 8 +- .../cpu/s390/c1_MacroAssembler_s390.cpp | 4 +- src/hotspot/cpu/s390/c1_Runtime1_s390.cpp | 112 +- src/hotspot/cpu/s390/runtime_s390.cpp | 2 +- src/hotspot/cpu/s390/sharedRuntime_s390.cpp | 14 +- src/hotspot/cpu/s390/stubGenerator_s390.cpp | 188 +-- src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp | 52 +- src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp | 18 +- src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp | 8 +- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 4 +- src/hotspot/cpu/x86/c1_Runtime1_x86.cpp | 112 +- .../x86/c2_stubGenerator_x86_64_string.cpp | 4 +- src/hotspot/cpu/x86/frame_x86.cpp | 4 +- src/hotspot/cpu/x86/runtime_x86_64.cpp | 12 +- src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 32 +- src/hotspot/cpu/x86/stubDeclarations_x86.hpp | 4 +- src/hotspot/cpu/x86/stubGenerator_x86_64.cpp | 256 ++-- src/hotspot/cpu/x86/stubGenerator_x86_64.hpp | 40 +- .../cpu/x86/stubGenerator_x86_64_adler.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_aes.cpp | 24 +- .../x86/stubGenerator_x86_64_arraycopy.cpp | 134 +- .../cpu/x86/stubGenerator_x86_64_cbrt.cpp | 2 +- .../cpu/x86/stubGenerator_x86_64_chacha.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_cos.cpp | 4 +- .../x86/stubGenerator_x86_64_dilithium.cpp | 10 +- .../cpu/x86/stubGenerator_x86_64_exp.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_fmod.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_ghash.cpp | 6 +- .../cpu/x86/stubGenerator_x86_64_kyber.cpp | 14 +- .../cpu/x86/stubGenerator_x86_64_log.cpp | 6 +- .../cpu/x86/stubGenerator_x86_64_poly1305.cpp | 4 +- .../x86/stubGenerator_x86_64_poly_mont.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_pow.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_sha3.cpp | 12 +- .../cpu/x86/stubGenerator_x86_64_sin.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_tan.cpp | 4 +- .../cpu/x86/stubGenerator_x86_64_tanh.cpp | 2 +- src/hotspot/cpu/zero/sharedRuntime_zero.cpp | 6 +- src/hotspot/cpu/zero/stubGenerator_zero.cpp | 16 +- src/hotspot/share/c1/c1_CodeStubs.hpp | 12 +- src/hotspot/share/c1/c1_LIRGenerator.cpp | 6 +- src/hotspot/share/c1/c1_Runtime1.cpp | 144 ++- src/hotspot/share/c1/c1_Runtime1.hpp | 32 +- src/hotspot/share/code/aotCodeCache.cpp | 27 +- src/hotspot/share/code/aotCodeCache.hpp | 15 + src/hotspot/share/code/codeBlob.hpp | 3 + src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp | 4 +- .../shenandoah/c1/shenandoahBarrierSetC1.cpp | 10 +- src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp | 4 +- src/hotspot/share/opto/compile.cpp | 7 +- src/hotspot/share/opto/compile.hpp | 6 +- src/hotspot/share/opto/output.cpp | 3 +- src/hotspot/share/opto/runtime.cpp | 22 +- src/hotspot/share/opto/runtime.hpp | 28 +- src/hotspot/share/runtime/init.cpp | 7 +- src/hotspot/share/runtime/sharedRuntime.cpp | 33 +- src/hotspot/share/runtime/sharedRuntime.hpp | 63 +- .../share/runtime/stubCodeGenerator.cpp | 10 +- .../share/runtime/stubCodeGenerator.hpp | 16 +- .../share/runtime/stubDeclarations.hpp | 409 ++++--- src/hotspot/share/runtime/stubInfo.cpp | 1080 +++++++++++++++++ src/hotspot/share/runtime/stubInfo.hpp | 684 +++++++++++ src/hotspot/share/runtime/stubRoutines.cpp | 142 +-- src/hotspot/share/runtime/stubRoutines.hpp | 63 +- 102 files changed, 3863 insertions(+), 2129 deletions(-) create mode 100644 src/hotspot/share/runtime/stubInfo.cpp create mode 100644 src/hotspot/share/runtime/stubInfo.hpp diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp index 2e53ecb8058..954e4abee14 100644 --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -56,7 +56,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ mov_metadata(rscratch1, m); ce->store_parameter(rscratch1, 1); ce->store_parameter(_bci, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -65,7 +65,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -78,13 +78,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { __ mov(rscratch1, _index->as_jint()); } - C1StubId stub_id; + StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = C1StubId::throw_index_exception_id; + stub_id = StubId::c1_throw_index_exception_id; } else { assert(_array != LIR_Opr::nullOpr(), "sanity"); __ mov(rscratch2, _array->as_pointer_register()); - stub_id = C1StubId::throw_range_check_failed_id; + stub_id = StubId::c1_throw_range_check_failed_id; } __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id))); __ blr(lr); @@ -99,7 +99,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -111,7 +111,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_throw_div0_exception_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); #ifdef ASSERT @@ -123,14 +123,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -166,7 +166,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == r19, "length must in r19,"); assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == r0, "result must in r0"); @@ -189,7 +189,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == r19, "length must in r19,"); assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == r0, "result must in r0"); @@ -201,11 +201,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - C1StubId enter_id; + StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = C1StubId::monitorenter_id; + enter_id = StubId::c1_monitorenter_id; } else { - enter_id = C1StubId::monitorenter_nofpu_id; + enter_id = StubId::c1_monitorenter_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -222,11 +222,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - C1StubId exit_id; + StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = C1StubId::monitorexit_id; + exit_id = StubId::c1_monitorexit_id; } else { - exit_id = C1StubId::monitorexit_nofpu_id; + exit_id = StubId::c1_monitorexit_nofpu_id; } __ adr(lr, _continuation); __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); @@ -254,7 +254,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -264,9 +264,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index 9e96002187f..31f1e97002a 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -320,19 +320,19 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(C1StubId::access_field_patching_id); + target = Runtime1::entry_for(StubId::c1_access_field_patching_id); reloc_type = relocInfo::section_word_type; break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(C1StubId::load_klass_patching_id); + target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); + target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); + target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); @@ -374,7 +374,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(r0); // search an exception handler (r0: exception oop, r3: throwing pc) - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -431,7 +431,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); __ remove_frame(initial_frame_size_in_bytes()); - __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); + __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -874,19 +874,19 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(C1StubId::access_field_patching_id); + target = Runtime1::entry_for(StubId::c1_access_field_patching_id); reloc_type = relocInfo::section_word_type; break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(C1StubId::load_klass_patching_id); + target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); + target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); + target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); @@ -1358,7 +1358,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ br(Assembler::EQ, *success_target); __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbzw(klass_RInfo, *failure_target); @@ -1369,7 +1369,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbz(k_RInfo, *failure_target); @@ -1447,7 +1447,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize))); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize))); // result is a boolean __ cbzw(k_RInfo, *failure_target); @@ -2033,7 +2033,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - C1StubId unwind_id; + StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -2052,9 +2052,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(r0); // search an exception handler (r0: exception oop, r3: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = C1StubId::handle_exception_id; + unwind_id = StubId::c1_handle_exception_id; } else { - unwind_id = C1StubId::handle_exception_nofpu_id; + unwind_id = StubId::c1_handle_exception_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); @@ -2325,7 +2325,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); __ PUSH(src, dst); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ POP(src, dst); __ cbnz(src, cont); diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp index 5fee15c8400..f0d09edd0a9 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp @@ -1245,7 +1245,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1276,14 +1276,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; @@ -1318,7 +1318,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // Intrinsic for Class::isInstance address LIRGenerator::isInstance_entry() { - return Runtime1::entry_for(C1StubId::is_instance_of_id); + return Runtime1::entry_for(StubId::c1_is_instance_of_id); } void LIRGenerator::do_If(If* x) { diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index afa2ddb47b4..b0f9d6d6c41 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -271,7 +271,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == r0, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); @@ -312,7 +312,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == r0, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp index a6aab24349a..350b6f68196 100644 --- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp @@ -99,10 +99,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (frame_size() == no_frame_size) { leave(); far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == (int)C1StubId::forward_exception_id) { + } else if (_stub_id == (int)StubId::c1_forward_exception_id) { should_not_reach_here(); } else { - far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); + far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_forward_exception_id))); } bind(L); } @@ -350,8 +350,8 @@ void Runtime1::initialize_pd() { // return: offset in 64-bit words. uint Runtime1::runtime_blob_current_thread_offset(frame f) { CodeBlob* cb = f.cb(); - assert(cb == Runtime1::blob_for(C1StubId::monitorenter_id) || - cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id), "must be"); + assert(cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || + cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id), "must be"); assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame"); int offset = cpu_reg_save_offsets[rthread->encoding()]; return offset / 2; // SP offsets are in halfwords @@ -377,7 +377,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -389,7 +389,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = nullptr; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -409,12 +409,12 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ str(zr, Address(rthread, JavaThread::vm_result_oop_offset())); __ str(zr, Address(rthread, JavaThread::vm_result_metadata_offset())); break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { // At this point all registers except exception oop (r0) and // exception pc (lr) are dead. const int frame_size = 2 /*fp, return address*/; @@ -472,13 +472,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ str(r0, Address(rfp, 1*BytesPerWord)); switch (id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); + restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: break; default: ShouldNotReachHere(); } @@ -630,7 +630,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { const Register exception_oop = r0; const Register exception_pc = r3; @@ -647,7 +647,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMap* oop_map = nullptr; switch (id) { { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -655,31 +655,31 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { Register klass = r3; // Incoming Register obj = r0; // Result - if (id == C1StubId::new_instance_id) { + if (id == StubId::c1_new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == C1StubId::fast_new_instance_id) { + } else if (id == StubId::c1_fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); + assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -698,7 +698,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: { Register bci = r0, method = r1; __ enter(); @@ -716,14 +716,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { Register length = r19; // Incoming Register klass = r3; // Incoming Register obj = r0; // Result - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -736,7 +736,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { Register t0 = obj; __ ldrw(t0, Address(klass, Klass::layout_helper_offset())); __ asrw(t0, t0, Klass::_lh_array_tag_shift); - int tag = ((id == C1StubId::new_type_array_id) + int tag = ((id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ mov(rscratch1, tag); @@ -751,7 +751,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ enter(); OopMap* map = save_live_registers(sasm); int call_offset; - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -769,7 +769,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // r0,: klass // r19,: rank @@ -789,7 +789,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -821,19 +821,19 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // Typical calling sequence: // __ push(klass_RInfo); // object klass or other subclass @@ -882,10 +882,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorenter_nofpu_id: + case StubId::c1_monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments, requires_pop_epilogue_return); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -903,7 +903,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::is_instance_of_id: + case StubId::c1_is_instance_of_id: { // Mirror: c_rarg0 // Object: c_rarg1 @@ -952,10 +952,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: + case StubId::c1_monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -975,7 +975,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); OopMap* oop_map = save_live_registers(sasm); @@ -992,13 +992,13 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. @@ -1006,54 +1006,54 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); // tos + 0: link // + 1: return address @@ -1061,7 +1061,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); @@ -1079,7 +1079,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::dtrace_object_alloc_id: + case StubId::c1_dtrace_object_alloc_id: { // c_rarg0: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); save_live_registers(sasm); diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp index 7ffba17bab3..db72218d6d7 100644 --- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp @@ -441,8 +441,8 @@ JavaThread** frame::saved_thread_address(const frame& f) { JavaThread** thread_addr; #ifdef COMPILER1 - if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) || - cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) { + if (cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || + cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id)) { thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f)); } else #endif diff --git a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp index 3fcb0e70b57..d45f9865bd2 100644 --- a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp @@ -61,8 +61,8 @@ class SimpleRuntimeFrame { //------------------------------generate_uncommon_trap_blob-------------------- UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)OptoStubId::uncommon_trap_id, name); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, BlobId::c2_uncommon_trap_id); if (blob != nullptr) { return blob->as_uncommon_trap_blob(); } @@ -254,7 +254,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { UncommonTrapBlob *ut_blob = UncommonTrapBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); - AOTCodeCache::store_code_blob(*ut_blob, AOTCodeEntry::C2Blob, (uint)OptoStubId::uncommon_trap_id, name); + AOTCodeCache::store_code_blob(*ut_blob, AOTCodeEntry::C2Blob, BlobId::c2_uncommon_trap_id); return ut_blob; } @@ -291,8 +291,8 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)OptoStubId::exception_id, name); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)BlobId::c2_exception_id, name); if (blob != nullptr) { return blob->as_exception_blob(); } @@ -398,7 +398,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // Set exception blob ExceptionBlob* ex_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); - AOTCodeCache::store_code_blob(*ex_blob, AOTCodeEntry::C2Blob, (uint)OptoStubId::exception_id, name); + AOTCodeCache::store_code_blob(*ex_blob, AOTCodeEntry::C2Blob, BlobId::c2_exception_id); return ex_blob; } #endif // COMPILER2 diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index da2f939e7af..00f97ed04e4 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -2201,8 +2201,8 @@ void SharedRuntime::generate_deopt_blob() { pad += 512; // Increase the buffer size when compiling for JVMCI } #endif - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)SharedStubId::deopt_id, name); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id); if (blob != nullptr) { _deopt_blob = blob->as_deoptimization_blob(); return; @@ -2572,7 +2572,7 @@ void SharedRuntime::generate_deopt_blob() { } #endif - AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, (uint)SharedStubId::deopt_id, name); + AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id); } // Number of stack slots between incoming argument block and the start of @@ -2598,12 +2598,12 @@ VMReg SharedRuntime::thread_register() { // Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. // -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(is_polling_page_id(id), "expected a polling page stub id"); // Allocate space for the code. Setup code generation tools. const char* name = SharedRuntime::stub_name(id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_safepoint_blob(); } @@ -2617,8 +2617,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal address start = __ pc(); address call_pc = nullptr; int frame_size_in_words; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); - RegisterSaver reg_save(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); + RegisterSaver reg_save(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */); // When the signal occurred, the LR was either signed and stored on the stack (in which // case it will be restored from the stack before being used) or unsigned and not stored @@ -2721,7 +2721,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // Fill-out other meta info SafepointBlob* sp_blob = SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); - AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return sp_blob; } @@ -2733,12 +2733,12 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_resolve_id(id), "expected a resolve stub id"); const char* name = SharedRuntime::stub_name(id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_runtime_stub(); } @@ -2819,7 +2819,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // frame_size_words or bytes?? RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); - AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return rs_blob; } @@ -2839,7 +2839,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // otherwise assume that stack unwinding will be initiated, so // caller saved registers were assumed volatile in the compiler. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); @@ -2863,7 +2863,7 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru const char* timer_msg = "SharedRuntime generate_throw_exception"; TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_runtime_stub(); } @@ -2928,7 +2928,7 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru frame_complete, (framesize >> (LogBytesPerWord - LogBytesPerInt)), oop_maps, false); - AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return stub; } @@ -2959,7 +2959,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { int insts_size = 1024; int locs_size = 64; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id); CodeBuffer code(name, insts_size, locs_size); OopMapSet* oop_maps = new OopMapSet(); MacroAssembler* masm = new MacroAssembler(&code); @@ -2998,7 +2998,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() { int insts_size = 1024; int locs_size = 64; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id); CodeBuffer code(name, insts_size, locs_size); OopMapSet* oop_maps = new OopMapSet(); MacroAssembler* masm = new MacroAssembler(&code); diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index 8241e94c108..0a7d9af9bff 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -202,7 +202,7 @@ class StubGenerator: public StubCodeGenerator { (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -422,7 +422,7 @@ class StubGenerator: public StubCodeGenerator { // r0: exception oop address generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -478,7 +478,7 @@ class StubGenerator: public StubCodeGenerator { // so it just needs to be generated code with no x86 prolog address generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -568,7 +568,7 @@ class StubGenerator: public StubCodeGenerator { // [tos + 4]: saved r0 // [tos + 5]: saved rscratch1 address generate_verify_oop() { - StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubId stub_id = StubId::stubgen_verify_oop_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -617,7 +617,7 @@ class StubGenerator: public StubCodeGenerator { } // Generate indices for iota vector. - address generate_iota_indices(StubGenStubId stub_id) { + address generate_iota_indices(StubId stub_id) { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -662,7 +662,7 @@ class StubGenerator: public StubCodeGenerator { Register base = r10, cnt = r11; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::zero_blocks_id; + StubId stub_id = StubId::stubgen_zero_blocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -802,32 +802,32 @@ class StubGenerator: public StubCodeGenerator { // // s and d are adjusted to point to the remaining words to copy // - void generate_copy_longs(StubGenStubId stub_id, DecoratorSet decorators, Label &start, Register s, Register d, Register count) { + void generate_copy_longs(StubId stub_id, DecoratorSet decorators, Label &start, Register s, Register d, Register count) { BasicType type; copy_direction direction; switch (stub_id) { - case copy_byte_f_id: + case StubId::stubgen_copy_byte_f_id: direction = copy_forwards; type = T_BYTE; break; - case copy_byte_b_id: + case StubId::stubgen_copy_byte_b_id: direction = copy_backwards; type = T_BYTE; break; - case copy_oop_f_id: + case StubId::stubgen_copy_oop_f_id: direction = copy_forwards; type = T_OBJECT; break; - case copy_oop_b_id: + case StubId::stubgen_copy_oop_b_id: direction = copy_backwards; type = T_OBJECT; break; - case copy_oop_uninit_f_id: + case StubId::stubgen_copy_oop_uninit_f_id: direction = copy_forwards; type = T_OBJECT; break; - case copy_oop_uninit_b_id: + case StubId::stubgen_copy_oop_uninit_b_id: direction = copy_backwards; type = T_OBJECT; break; @@ -1526,7 +1526,7 @@ class StubGenerator: public StubCodeGenerator { // can be used by the corresponding conjoint copy // method // - address generate_disjoint_copy(StubGenStubId stub_id, address *entry) { + address generate_disjoint_copy(StubId stub_id, address *entry) { Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_reg = RegSet::of(s, d, count); int size; @@ -1534,72 +1534,72 @@ class StubGenerator: public StubCodeGenerator { bool is_oop; bool dest_uninitialized; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: size = sizeof(jbyte); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jbyte_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id: size = sizeof(jbyte); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: size = sizeof(jshort); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jshort_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id: size = sizeof(jshort); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: size = sizeof(jint); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jint_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jint_disjoint_arraycopy_id: size = sizeof(jint); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: // since this is always aligned we can (should!) use the same - // stub as for case arrayof_jlong_disjoint_arraycopy + // stub as for case StubId::stubgen_arrayof_jlong_disjoint_arraycopy ShouldNotReachHere(); break; - case arrayof_jlong_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id: size = sizeof(jlong); aligned = true; is_oop = false; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case arrayof_oop_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = true; break; - case arrayof_oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; @@ -1682,7 +1682,7 @@ class StubGenerator: public StubCodeGenerator { // entry is set to the no-overlap entry point so it can be used by // some other conjoint copy method // - address generate_conjoint_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { + address generate_conjoint_copy(StubId stub_id, address nooverlap_target, address *entry) { Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_regs = RegSet::of(s, d, count); int size; @@ -1690,72 +1690,72 @@ class StubGenerator: public StubCodeGenerator { bool is_oop; bool dest_uninitialized; switch (stub_id) { - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: size = sizeof(jbyte); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jbyte_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_arraycopy_id: size = sizeof(jbyte); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: size = sizeof(jshort); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jshort_arraycopy_id: + case StubId::stubgen_arrayof_jshort_arraycopy_id: size = sizeof(jshort); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: size = sizeof(jint); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jint_arraycopy_id: + case StubId::stubgen_arrayof_jint_arraycopy_id: size = sizeof(jint); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: // since this is always aligned we can (should!) use the same - // stub as for case arrayof_jlong_disjoint_arraycopy + // stub as for case StubId::stubgen_arrayof_jlong_disjoint_arraycopy ShouldNotReachHere(); break; - case arrayof_jlong_arraycopy_id: + case StubId::stubgen_arrayof_jlong_arraycopy_id: size = sizeof(jlong); aligned = true; is_oop = false; dest_uninitialized = false; break; - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case arrayof_oop_arraycopy_id: + case StubId::stubgen_arrayof_oop_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = true; break; - case arrayof_oop_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; @@ -1850,13 +1850,13 @@ class StubGenerator: public StubCodeGenerator { // r0 == 0 - success // r0 == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(StubGenStubId stub_id, address *entry) { + address generate_checkcast_copy(StubId stub_id, address *entry) { bool dest_uninitialized; switch (stub_id) { - case checkcast_arraycopy_id: + case StubId::stubgen_checkcast_arraycopy_id: dest_uninitialized = false; break; - case checkcast_arraycopy_uninit_id: + case StubId::stubgen_checkcast_arraycopy_uninit_id: dest_uninitialized = true; break; default: @@ -2071,7 +2071,7 @@ class StubGenerator: public StubCodeGenerator { address short_copy_entry, address int_copy_entry, address long_copy_entry) { - StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; Label L_long_aligned, L_int_aligned, L_short_aligned; Register s = c_rarg0, d = c_rarg1, count = c_rarg2; @@ -2124,7 +2124,7 @@ class StubGenerator: public StubCodeGenerator { address generate_generic_copy(address byte_copy_entry, address short_copy_entry, address int_copy_entry, address oop_copy_entry, address long_copy_entry, address checkcast_copy_entry) { - StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; Label L_failed, L_objArray; Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; @@ -2395,32 +2395,32 @@ class StubGenerator: public StubCodeGenerator { // value: c_rarg1 // count: c_rarg2 treated as signed // - address generate_fill(StubGenStubId stub_id) { + address generate_fill(StubId stub_id) { BasicType t; bool aligned; switch (stub_id) { - case jbyte_fill_id: + case StubId::stubgen_jbyte_fill_id: t = T_BYTE; aligned = false; break; - case jshort_fill_id: + case StubId::stubgen_jshort_fill_id: t = T_SHORT; aligned = false; break; - case jint_fill_id: + case StubId::stubgen_jint_fill_id: t = T_INT; aligned = false; break; - case arrayof_jbyte_fill_id: + case StubId::stubgen_arrayof_jbyte_fill_id: t = T_BYTE; aligned = true; break; - case arrayof_jshort_fill_id: + case StubId::stubgen_arrayof_jshort_fill_id: t = T_SHORT; aligned = true; break; - case arrayof_jint_fill_id: + case StubId::stubgen_arrayof_jint_fill_id: t = T_INT; aligned = true; break; @@ -2590,7 +2590,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_unsafe_setmemory() { __ align(CodeEntryAlignment); - StubCodeMark mark(this, StubGenStubId::unsafe_setmemory_id); + StubCodeMark mark(this, StubId::stubgen_unsafe_setmemory_id); address start = __ pc(); Register dest = c_rarg0, count = c_rarg1, value = c_rarg2; @@ -2688,7 +2688,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2705,7 +2705,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id; StubCodeMark mark(this, stub_id); // pre wbsync is a no-op @@ -2735,44 +2735,44 @@ class StubGenerator: public StubCodeGenerator { address ucm_common_error_exit = generate_unsafecopy_common_error_exit(); UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); - generate_copy_longs(StubGenStubId::copy_byte_f_id, IN_HEAP | IS_ARRAY, copy_f, r0, r1, r15); - generate_copy_longs(StubGenStubId::copy_byte_b_id, IN_HEAP | IS_ARRAY, copy_b, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_byte_f_id, IN_HEAP | IS_ARRAY, copy_f, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_byte_b_id, IN_HEAP | IS_ARRAY, copy_b, r0, r1, r15); - generate_copy_longs(StubGenStubId::copy_oop_f_id, IN_HEAP | IS_ARRAY, copy_obj_f, r0, r1, r15); - generate_copy_longs(StubGenStubId::copy_oop_b_id, IN_HEAP | IS_ARRAY, copy_obj_b, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_oop_f_id, IN_HEAP | IS_ARRAY, copy_obj_f, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_oop_b_id, IN_HEAP | IS_ARRAY, copy_obj_b, r0, r1, r15); - generate_copy_longs(StubGenStubId::copy_oop_uninit_f_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_f, r0, r1, r15); - generate_copy_longs(StubGenStubId::copy_oop_uninit_b_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_b, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_oop_uninit_f_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_f, r0, r1, r15); + generate_copy_longs(StubId::stubgen_copy_oop_uninit_b_id, IN_HEAP | IS_ARRAY | IS_DEST_UNINITIALIZED, copy_obj_uninit_b, r0, r1, r15); StubRoutines::aarch64::_zero_blocks = generate_zero_blocks(); //*** jbyte // Always need aligned and unaligned versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jbyte_disjoint_arraycopy_id, &entry); - StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, entry, nullptr); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubId::stubgen_jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jbyte_arraycopy_id, entry, nullptr); //*** jshort // Always need aligned and unaligned versions - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jshort_disjoint_arraycopy_id, &entry); - StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::jshort_arraycopy_id, entry, &entry_jshort_arraycopy); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jshort_arraycopy_id, entry, nullptr); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubId::stubgen_jshort_arraycopy_id, entry, &entry_jshort_arraycopy); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jshort_arraycopy_id, entry, nullptr); //*** jint // Aligned versions - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); // In 64 bit we need both aligned and unaligned versions of jint arraycopy. // entry_jint_arraycopy always points to the unaligned version - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); - StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubId::stubgen_jint_arraycopy_id, entry, &entry_jint_arraycopy); //*** jlong // It is always aligned - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; @@ -2783,14 +2783,14 @@ class StubGenerator: public StubCodeGenerator { bool aligned = !UseCompressedOops; StubRoutines::_arrayof_oop_disjoint_arraycopy - = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id, &entry); + = generate_disjoint_copy(StubId::stubgen_arrayof_oop_disjoint_arraycopy_id, &entry); StubRoutines::_arrayof_oop_arraycopy - = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); + = generate_conjoint_copy(StubId::stubgen_arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); // Aligned versions without pre-barriers StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit - = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id, &entry); + = generate_disjoint_copy(StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id, &entry); StubRoutines::_arrayof_oop_arraycopy_uninit - = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id, entry, nullptr); + = generate_conjoint_copy(StubId::stubgen_arrayof_oop_arraycopy_uninit_id, entry, nullptr); } StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; @@ -2798,8 +2798,8 @@ class StubGenerator: public StubCodeGenerator { StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_uninit_id, nullptr); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, @@ -2813,12 +2813,12 @@ class StubGenerator: public StubCodeGenerator { entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); - StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); - StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); - StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); - StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); - StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); + StubRoutines::_jbyte_fill = generate_fill(StubId::stubgen_jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubId::stubgen_jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubId::stubgen_jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubId::stubgen_arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubId::stubgen_arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubId::stubgen_arrayof_jint_fill_id); } void generate_math_stubs() { Unimplemented(); } @@ -2832,7 +2832,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_aescrypt_encryptBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); const Register from = c_rarg0; // source array address @@ -2866,7 +2866,7 @@ class StubGenerator: public StubCodeGenerator { address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); Label L_doLast; @@ -2905,7 +2905,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -3010,7 +3010,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cipherBlockChaining_decryptAESCrypt() { assert(UseAES, "need AES cryptographic extension support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); Label L_loadkeys_44, L_loadkeys_52, L_aes_loop, L_rounds_44, L_rounds_52; @@ -3197,7 +3197,7 @@ class StubGenerator: public StubCodeGenerator { // Wide bulk encryption of whole blocks. __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); const address start = __ pc(); __ enter(); @@ -3407,7 +3407,7 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x87); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -3617,13 +3617,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_md5_implCompress(StubGenStubId stub_id) { + address generate_md5_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case md5_implCompress_id: + case StubId::stubgen_md5_implCompress_id: multi_block = false; break; - case md5_implCompressMB_id: + case StubId::stubgen_md5_implCompressMB_id: multi_block = true; break; default: @@ -3770,13 +3770,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha1_implCompress(StubGenStubId stub_id) { + address generate_sha1_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha1_implCompress_id: + case StubId::stubgen_sha1_implCompress_id: multi_block = false; break; - case sha1_implCompressMB_id: + case StubId::stubgen_sha1_implCompressMB_id: multi_block = true; break; default: @@ -3875,13 +3875,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha256_implCompress(StubGenStubId stub_id) { + address generate_sha256_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha256_implCompress_id: + case StubId::stubgen_sha256_implCompress_id: multi_block = false; break; - case sha256_implCompressMB_id: + case StubId::stubgen_sha256_implCompressMB_id: multi_block = true; break; default: @@ -4031,13 +4031,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha512_implCompress(StubGenStubId stub_id) { + address generate_sha512_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha512_implCompress_id: + case StubId::stubgen_sha512_implCompress_id: multi_block = false; break; - case sha512_implCompressMB_id: + case StubId::stubgen_sha512_implCompressMB_id: multi_block = true; break; default: @@ -4281,13 +4281,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg3 - int offset // c_rarg4 - int limit // - address generate_sha3_implCompress(StubGenStubId stub_id) { + address generate_sha3_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha3_implCompress_id: + case StubId::stubgen_sha3_implCompress_id: multi_block = false; break; - case sha3_implCompressMB_id: + case StubId::stubgen_sha3_implCompressMB_id: multi_block = true; break; default: @@ -4567,7 +4567,7 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x0E0D0C0F0A09080BUL); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubId stub_id = StubId::stubgen_chacha20Block_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5203,7 +5203,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberNtt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberNtt_id; + StubId stub_id = StubId::stubgen_kyberNtt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5441,7 +5441,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberInverseNtt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberInverseNtt_id; + StubId stub_id = StubId::stubgen_kyberInverseNtt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5728,7 +5728,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberNttMult() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberNttMult_id; + StubId stub_id = StubId::stubgen_kyberNttMult_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5845,7 +5845,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberAddPoly_2() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberAddPoly_2_id; + StubId stub_id = StubId::stubgen_kyberAddPoly_2_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5930,7 +5930,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberAddPoly_3() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberAddPoly_3_id; + StubId stub_id = StubId::stubgen_kyberAddPoly_3_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6038,7 +6038,7 @@ class StubGenerator: public StubCodeGenerator { __ emit_int64(0x0f000f000f000f00); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyber12To16_id; + StubId stub_id = StubId::stubgen_kyber12To16_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6241,7 +6241,7 @@ class StubGenerator: public StubCodeGenerator { address generate_kyberBarrettReduce() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::kyberBarrettReduce_id; + StubId stub_id = StubId::stubgen_kyberBarrettReduce_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6488,7 +6488,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dilithiumAlmostNtt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::dilithiumAlmostNtt_id; + StubId stub_id = StubId::stubgen_dilithiumAlmostNtt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6695,7 +6695,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dilithiumAlmostInverseNtt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::dilithiumAlmostInverseNtt_id; + StubId stub_id = StubId::stubgen_dilithiumAlmostInverseNtt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6808,7 +6808,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dilithiumNttMult() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::dilithiumNttMult_id; + StubId stub_id = StubId::stubgen_dilithiumNttMult_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6872,7 +6872,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dilithiumMontMulByConstant() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::dilithiumMontMulByConstant_id; + StubId stub_id = StubId::stubgen_dilithiumMontMulByConstant_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6936,7 +6936,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dilithiumDecomposePoly() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::dilithiumDecomposePoly_id; + StubId stub_id = StubId::stubgen_dilithiumDecomposePoly_id; StubCodeMark mark(this, stub_id); address start = __ pc(); Label L_loop; @@ -7205,13 +7205,13 @@ class StubGenerator: public StubCodeGenerator { // c_rarg3 - int offset // c_rarg4 - int limit // - address generate_sha3_implCompress_gpr(StubGenStubId stub_id) { + address generate_sha3_implCompress_gpr(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha3_implCompress_id: + case StubId::stubgen_sha3_implCompress_id: multi_block = false; break; - case sha3_implCompressMB_id: + case StubId::stubgen_sha3_implCompressMB_id: multi_block = true; break; default: @@ -7456,7 +7456,7 @@ class StubGenerator: public StubCodeGenerator { assert(UseCRC32Intrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7498,7 +7498,7 @@ class StubGenerator: public StubCodeGenerator { assert(UseCRC32CIntrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32C_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7537,7 +7537,7 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesAdler32() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubId stub_id = StubId::stubgen_updateBytesAdler32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7759,7 +7759,7 @@ class StubGenerator: public StubCodeGenerator { */ address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubId stub_id = StubId::stubgen_multiplyToLen_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7792,7 +7792,7 @@ class StubGenerator: public StubCodeGenerator { // faster than multiply_to_len on some CPUs and slower on others, but // multiply_to_len shows a bit better overall results __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubId stub_id = StubId::stubgen_squareToLen_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7826,7 +7826,7 @@ class StubGenerator: public StubCodeGenerator { address generate_mulAdd() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubId stub_id = StubId::stubgen_mulAdd_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7857,7 +7857,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -7980,7 +7980,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -8089,7 +8089,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::count_positives_id; + StubId stub_id = StubId::stubgen_count_positives_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -8351,7 +8351,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::large_array_equals_id; + StubId stub_id = StubId::stubgen_large_array_equals_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -8477,25 +8477,25 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id; + StubId stub_id; switch (eltype) { case T_BOOLEAN: - stub_id = StubGenStubId::large_arrays_hashcode_boolean_id; + stub_id = StubId::stubgen_large_arrays_hashcode_boolean_id; break; case T_BYTE: - stub_id = StubGenStubId::large_arrays_hashcode_byte_id; + stub_id = StubId::stubgen_large_arrays_hashcode_byte_id; break; case T_CHAR: - stub_id = StubGenStubId::large_arrays_hashcode_char_id; + stub_id = StubId::stubgen_large_arrays_hashcode_char_id; break; case T_SHORT: - stub_id = StubGenStubId::large_arrays_hashcode_short_id; + stub_id = StubId::stubgen_large_arrays_hashcode_short_id; break; case T_INT: - stub_id = StubGenStubId::large_arrays_hashcode_int_id; + stub_id = StubId::stubgen_large_arrays_hashcode_int_id; break; default: - stub_id = StubGenStubId::NO_STUBID; + stub_id = StubId::NO_STUBID; ShouldNotReachHere(); }; @@ -8738,7 +8738,7 @@ class StubGenerator: public StubCodeGenerator { address generate_dsin_dcos(bool isCos) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = (isCos ? StubGenStubId::dcos_id : StubGenStubId::dsin_id); + StubId stub_id = (isCos ? StubId::stubgen_dcos_id : StubId::stubgen_dsin_id); StubCodeMark mark(this, stub_id); address start = __ pc(); __ generate_dsin_dcos(isCos, (address)StubRoutines::aarch64::_npio2_hw, @@ -8790,7 +8790,7 @@ class StubGenerator: public StubCodeGenerator { // r11 = tmp2 address generate_compare_long_string_different_encoding(bool isLU) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = (isLU ? StubGenStubId::compare_long_string_LU_id : StubGenStubId::compare_long_string_UL_id); + StubId stub_id = (isLU ? StubId::stubgen_compare_long_string_LU_id : StubId::stubgen_compare_long_string_UL_id); StubCodeMark mark(this, stub_id); address entry = __ pc(); Label SMALL_LOOP, TAIL, TAIL_LOAD_16, LOAD_LAST, DIFF1, DIFF2, @@ -8900,7 +8900,7 @@ class StubGenerator: public StubCodeGenerator { // v1 = temporary float register address generate_float16ToFloat() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::hf2f_id; + StubId stub_id = StubId::stubgen_hf2f_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); @@ -8914,7 +8914,7 @@ class StubGenerator: public StubCodeGenerator { // v1 = temporary float register address generate_floatToFloat16() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::f2hf_id; + StubId stub_id = StubId::stubgen_f2hf_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("Entry:"); @@ -8925,7 +8925,7 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -8991,7 +8991,7 @@ class StubGenerator: public StubCodeGenerator { // r11 = tmp2 address generate_compare_long_string_same_encoding(bool isLL) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = (isLL ? StubGenStubId::compare_long_string_LL_id : StubGenStubId::compare_long_string_UU_id); + StubId stub_id = (isLL ? StubId::stubgen_compare_long_string_LL_id : StubId::stubgen_compare_long_string_UU_id); StubCodeMark mark(this, stub_id); address entry = __ pc(); Register result = r0, str1 = r1, cnt1 = r2, str2 = r3, cnt2 = r4, @@ -9122,12 +9122,12 @@ class StubGenerator: public StubCodeGenerator { // p0 = pgtmp1 // p1 = pgtmp2 address generate_compare_long_string_sve(string_compare_mode mode) { - StubGenStubId stub_id; + StubId stub_id; switch (mode) { - case LL: stub_id = StubGenStubId::compare_long_string_LL_id; break; - case LU: stub_id = StubGenStubId::compare_long_string_LU_id; break; - case UL: stub_id = StubGenStubId::compare_long_string_UL_id; break; - case UU: stub_id = StubGenStubId::compare_long_string_UU_id; break; + case LL: stub_id = StubId::stubgen_compare_long_string_LL_id; break; + case LU: stub_id = StubId::stubgen_compare_long_string_LU_id; break; + case UL: stub_id = StubId::stubgen_compare_long_string_UL_id; break; + case UU: stub_id = StubId::stubgen_compare_long_string_UU_id; break; default: ShouldNotReachHere(); } @@ -9258,18 +9258,18 @@ class StubGenerator: public StubCodeGenerator { // larger and a bit less readable, however, most of extra operations are // issued during loads or branches, so, penalty is minimal address generate_string_indexof_linear(bool str1_isL, bool str2_isL) { - StubGenStubId stub_id; + StubId stub_id; if (str1_isL) { if (str2_isL) { - stub_id = StubGenStubId::string_indexof_linear_ll_id; + stub_id = StubId::stubgen_string_indexof_linear_ll_id; } else { - stub_id = StubGenStubId::string_indexof_linear_ul_id; + stub_id = StubId::stubgen_string_indexof_linear_ul_id; } } else { if (str2_isL) { ShouldNotReachHere(); } else { - stub_id = StubGenStubId::string_indexof_linear_uu_id; + stub_id = StubId::stubgen_string_indexof_linear_uu_id; } } __ align(CodeEntryAlignment); @@ -9571,7 +9571,7 @@ class StubGenerator: public StubCodeGenerator { // Clobbers: r0, r1, r3, rscratch1, rflags, v0-v6 address generate_large_byte_array_inflate() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::large_byte_array_inflate_id; + StubId stub_id = StubId::stubgen_large_byte_array_inflate_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); Label LOOP, LOOP_START, LOOP_PRFM, LOOP_PRFM_START, DONE; @@ -9637,7 +9637,7 @@ class StubGenerator: public StubCodeGenerator { // that) and keep the data in little-endian bit order through the // calculation, bit-reversing the inputs and outputs. - StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_id; StubCodeMark mark(this, stub_id); __ align(wordSize * 2); address p = __ pc(); @@ -9704,7 +9704,7 @@ class StubGenerator: public StubCodeGenerator { address generate_ghash_processBlocks_wide() { address small = generate_ghash_processBlocks(); - StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_wide_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_wide_id; StubCodeMark mark(this, stub_id); __ align(wordSize * 2); address p = __ pc(); @@ -9816,7 +9816,7 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubId stub_id = StubId::stubgen_base64_encodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10085,7 +10085,7 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubId stub_id = StubId::stubgen_base64_decodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10202,7 +10202,7 @@ class StubGenerator: public StubCodeGenerator { // Support for spin waits. address generate_spin_wait() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::spin_wait_id; + StubId stub_id = StubId::stubgen_spin_wait_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10213,7 +10213,7 @@ class StubGenerator: public StubCodeGenerator { } void generate_lookup_secondary_supers_table_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; StubCodeMark mark(this, stub_id); const Register @@ -10242,7 +10242,7 @@ class StubGenerator: public StubCodeGenerator { // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10397,7 +10397,7 @@ class StubGenerator: public StubCodeGenerator { return; } __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::atomic_entry_points_id; + StubId stub_id = StubId::stubgen_atomic_entry_points_id; StubCodeMark mark(this, stub_id); address first_entry = __ pc(); @@ -10557,7 +10557,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_thaw() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_thaw_id; + StubId stub_id = StubId::stubgen_cont_thaw_id; StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_top); @@ -10568,7 +10568,7 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; // TODO: will probably need multiple return barriers depending on return type - StubGenStubId stub_id = StubGenStubId::cont_returnBarrier_id; + StubId stub_id = StubId::stubgen_cont_returnBarrier_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10580,7 +10580,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier_exception() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_returnBarrierExc_id; + StubId stub_id = StubId::stubgen_cont_returnBarrierExc_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10591,7 +10591,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubId stub_id = StubId::stubgen_cont_preempt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10674,7 +10674,7 @@ class StubGenerator: public StubCodeGenerator { address generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubId stub_id = StubId::stubgen_poly1305_processBlocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); Label here; @@ -10789,7 +10789,7 @@ class StubGenerator: public StubCodeGenerator { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -10807,7 +10807,7 @@ class StubGenerator: public StubCodeGenerator { // j_rarg0 = jobject receiver // rmethod = result address generate_upcall_stub_load_target() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -11752,7 +11752,7 @@ class StubGenerator: public StubCodeGenerator { #if COMPILER2_OR_JVMCI if (UseSVE == 0) { - StubRoutines::aarch64::_vector_iota_indices = generate_iota_indices(StubGenStubId::vector_iota_indices_id); + StubRoutines::aarch64::_vector_iota_indices = generate_iota_indices(StubId::stubgen_vector_iota_indices_id); } // array equals stub for large arrays. @@ -11796,14 +11796,14 @@ class StubGenerator: public StubCodeGenerator { } if (UseMontgomeryMultiplyIntrinsic) { - StubGenStubId stub_id = StubGenStubId::montgomeryMultiply_id; + StubId stub_id = StubId::stubgen_montgomeryMultiply_id; StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); StubRoutines::_montgomeryMultiply = g.generate_multiply(); } if (UseMontgomerySquareIntrinsic) { - StubGenStubId stub_id = StubGenStubId::montgomerySquare_id; + StubId stub_id = StubId::stubgen_montgomerySquare_id; StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); // We use generate_multiply() rather than generate_square() @@ -11860,30 +11860,30 @@ class StubGenerator: public StubCodeGenerator { } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubId::stubgen_md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubId::stubgen_md5_implCompressMB_id); } if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubId::stubgen_sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubId::stubgen_sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } if (UseSHA3Intrinsics) { StubRoutines::_double_keccak = generate_double_keccak(); if (UseSIMDForSHA3Intrinsic) { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id); + StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubId::stubgen_sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubId::stubgen_sha3_implCompressMB_id); } else { - StubRoutines::_sha3_implCompress = generate_sha3_implCompress_gpr(StubGenStubId::sha3_implCompress_id); - StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress_gpr(StubGenStubId::sha3_implCompressMB_id); + StubRoutines::_sha3_implCompress = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompress_id); + StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress_gpr(StubId::stubgen_sha3_implCompressMB_id); } } @@ -11900,31 +11900,31 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp index 5683bc59d5c..efdc190f09a 100644 --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp @@ -45,7 +45,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_bci, 0); ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1); - __ call(Runtime1::entry_for(C1StubId::counter_overflow_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_counter_overflow_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -56,7 +56,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); DEBUG_ONLY(__ should_not_reach_here()); @@ -72,10 +72,10 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } if (_throw_index_out_of_bounds_exception) { - __ call(Runtime1::entry_for(C1StubId::throw_index_exception_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_throw_index_exception_id), relocInfo::runtime_call_type); } else { __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction? - __ call(Runtime1::entry_for(C1StubId::throw_range_check_failed_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_throw_range_check_failed_id), relocInfo::runtime_call_type); } ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -88,7 +88,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_predicate_failed_trap_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); DEBUG_ONLY(__ should_not_reach_here()); @@ -99,7 +99,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ call(Runtime1::entry_for(C1StubId::throw_div0_exception_id), + __ call(Runtime1::entry_for(StubId::c1_throw_div0_exception_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); DEBUG_ONLY(STOP("DivByZero");) @@ -108,14 +108,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -147,7 +147,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { assert(_klass_reg->as_register() == R1, "runtime call setup"); assert(_length->as_register() == R2, "runtime call setup"); __ bind(_entry); - __ call(Runtime1::entry_for(C1StubId::new_type_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_new_type_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -169,7 +169,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { assert(_klass_reg->as_register() == R1, "runtime call setup"); assert(_length->as_register() == R2, "runtime call setup"); __ bind(_entry); - __ call(Runtime1::entry_for(C1StubId::new_object_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_new_object_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -188,9 +188,9 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ str(lock_reg, Address(SP, BytesPerWord)); } - C1StubId enter_id = ce->compilation()->has_fpu_code() ? - C1StubId::monitorenter_id : - C1StubId::monitorenter_nofpu_id; + StubId enter_id = ce->compilation()->has_fpu_code() ? + StubId::c1_monitorenter_id : + StubId::c1_monitorenter_nofpu_id; __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -209,9 +209,9 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ str(lock_reg, Address(SP)); // Non-blocking leaf routine - no call info needed - C1StubId exit_id = ce->compilation()->has_fpu_code() ? - C1StubId::monitorexit_id : - C1StubId::monitorexit_nofpu_id; + StubId exit_id = ce->compilation()->has_fpu_code() ? + StubId::c1_monitorexit_id : + StubId::c1_monitorexit_nofpu_id; __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); __ b(_continuation); } @@ -321,10 +321,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -350,7 +350,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ mov_slow(Rtemp, _trap_request); ce->verify_reserved_argument_area_size(1); __ str(Rtemp, Address(SP)); - __ call(Runtime1::entry_for(C1StubId::deoptimize_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_deoptimize_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -361,9 +361,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is // probably wrong to do it here. - a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); __ bind(_entry); diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp index 66c7b916f1f..0367f349d65 100644 --- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp @@ -212,7 +212,7 @@ int LIR_Assembler::emit_exception_handler() { // check that there is really an exception __ verify_not_null_oop(Rexception_obj); - __ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id), relocInfo::runtime_call_type); __ should_not_reach_here(); assert(code_offset() - offset <= exception_handler_size(), "overflow"); @@ -252,7 +252,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR - __ jump(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type, Rtemp); + __ jump(Runtime1::entry_for(StubId::c1_unwind_exception_id), relocInfo::runtime_call_type, Rtemp); // Emit the slow path assembly if (stub != nullptr) { @@ -1136,7 +1136,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); if (op->should_profile()) { Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp; @@ -1210,7 +1210,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ cmp(Rtemp, k_RInfo, ne); __ b(*success_target, eq); assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } } else { @@ -1227,7 +1227,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); __ cbz(R0, *failure_target); } @@ -1303,7 +1303,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { } __ b(*success_target, eq); assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); if (!op->should_profile()) { move_regs(R0, res); } else { @@ -1334,7 +1334,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ b(*failure_target, ne); // slow case assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup"); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); if (!op->should_profile()) { move_regs(R0, res); } @@ -1981,9 +1981,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit assert(exceptionPC->as_register() == Rexception_pc, "must match"); info->add_register_oop(exceptionOop); - C1StubId handle_id = compilation()->has_fpu_code() ? - C1StubId::handle_exception_id : - C1StubId::handle_exception_nofpu_id; + StubId handle_id = compilation()->has_fpu_code() ? + StubId::c1_handle_exception_id : + StubId::c1_handle_exception_nofpu_id; Label return_address; __ adr(Rexception_pc, return_address); __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type); @@ -2260,7 +2260,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ mov(altFP_7_11, R1); __ mov(R0, tmp); __ mov(R1, tmp2); - __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp + __ call(Runtime1::entry_for(StubId::c1_slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp __ cmp_32(R0, 0); __ mov(R0, R6); __ mov(R1, altFP_7_11); diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp index ef5691f84a3..c8a3f79960b 100644 --- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp @@ -1057,7 +1057,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); @@ -1086,7 +1086,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1094,7 +1094,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, LIR_OprFact::illegalOpr, info_for_exception); } diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp index 021b47148fa..32da2c24d26 100644 --- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp @@ -64,7 +64,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre reset_last_Java_frame(Rtemp); assert(frame_size() != no_frame_size, "frame must be fixed"); - if (_stub_id != (int)C1StubId::forward_exception_id) { + if (_stub_id != (int)StubId::c1_forward_exception_id) { ldr(R3, Address(Rthread, Thread::pending_exception_offset())); } @@ -80,10 +80,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre // Check for pending exception // unpack_with_exception_in_tls path is taken through // Runtime1::exception_handler_for_pc - if (_stub_id != (int)C1StubId::forward_exception_id) { + if (_stub_id != (int)StubId::c1_forward_exception_id) { assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id"); cmp(R3, 0); - jump(Runtime1::entry_for(C1StubId::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); + jump(Runtime1::entry_for(StubId::c1_forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); } else { #ifdef ASSERT // Should not have pending exception in forward_exception stub @@ -283,7 +283,7 @@ static void restore_sp_for_method_handle(StubAssembler* sasm) { } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) { __ block_comment("generate_handle_exception"); bool save_fpu_registers = false; @@ -293,7 +293,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) OopMap* oop_map = nullptr; switch (id) { - case C1StubId::forward_exception_id: { + case StubId::c1_forward_exception_id: { save_fpu_registers = HaveVFP; oop_map = generate_oop_map(sasm); __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset())); @@ -302,14 +302,14 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) __ str(zero, Address(Rthread, Thread::pending_exception_offset())); break; } - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_id: save_fpu_registers = HaveVFP; // fall-through - case C1StubId::handle_exception_nofpu_id: + case StubId::c1_handle_exception_nofpu_id: // At this point all registers MAY be live. oop_map = save_live_registers(sasm, save_fpu_registers); break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: // At this point all registers except exception oop (R4/R19) and // exception pc (R5/R20) are dead. oop_map = save_live_registers(sasm); // TODO it's not required to save all registers @@ -331,13 +331,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) // Restore the registers that were saved at the beginning, remove // frame and jump to the exception handler. switch (id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: restore_live_registers(sasm, save_fpu_registers); // Note: the restore live registers includes the jump to LR (patched to R0) break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: restore_live_registers_without_return(sasm); // must not jump immediately to handler restore_sp_for_method_handle(sasm); __ ret(); @@ -406,7 +406,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { const bool must_gc_arguments = true; const bool dont_gc_arguments = false; @@ -414,16 +414,16 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { bool save_fpu_registers = HaveVFP; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); // does not return on ARM } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { const Register result = R0; const Register klass = R1; @@ -439,7 +439,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: { OopMap* oop_map = save_live_registers(sasm); __ ldr(R1, Address(SP, arg1_offset)); @@ -451,10 +451,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -466,7 +466,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMap* map = save_live_registers(sasm); int call_offset; - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -480,7 +480,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { __ set_info("new_multi_array", dont_gc_arguments); @@ -503,7 +503,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -524,78 +524,78 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); generate_unwind_exception(sasm); } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // (in) R0 - sub, destroyed, // (in) R1 - super, not changed @@ -628,10 +628,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorenter_nofpu_id: + case StubId::c1_monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_id: { __ set_info("monitorenter", dont_gc_arguments); const Register obj = R1; @@ -646,10 +646,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: + case StubId::c1_monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_id: { __ set_info("monitorexit", dont_gc_arguments); const Register lock = R1; @@ -662,7 +662,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { __ set_info("deoptimize", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); @@ -678,35 +678,35 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); diff --git a/src/hotspot/cpu/arm/runtime_arm.cpp b/src/hotspot/cpu/arm/runtime_arm.cpp index 615a63eac19..ea4c9a76381 100644 --- a/src/hotspot/cpu/arm/runtime_arm.cpp +++ b/src/hotspot/cpu/arm/runtime_arm.cpp @@ -47,7 +47,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { ResourceMark rm; // setup code generation tools - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); #ifdef _LP64 CodeBuffer buffer(name, 2700, 512); #else @@ -210,7 +210,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // setup code generation tools // Measured 8/7/03 at 256 in 32bit debug build - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); CodeBuffer buffer(name, 600, 512); if (buffer.blob() == nullptr) { return nullptr; diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp index 8ba847e7e32..9519c923e22 100644 --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp @@ -1366,7 +1366,7 @@ VMReg SharedRuntime::thread_register() { //------------------------------generate_deopt_blob---------------------------- void SharedRuntime::generate_deopt_blob() { ResourceMark rm; - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); CodeBuffer buffer(name, 1024, 1024); int frame_size_in_words; OopMapSet* oop_maps; @@ -1608,7 +1608,7 @@ void SharedRuntime::generate_deopt_blob() { // setup oopmap, and calls safepoint code to stop the compiled code for // a safepoint. // -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_polling_page_id(id), "expected a polling page stub id"); @@ -1618,7 +1618,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal int frame_size_words; OopMapSet* oop_maps; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); MacroAssembler* masm = new MacroAssembler(&buffer); address start = __ pc(); @@ -1680,7 +1680,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal return SafepointBlob::create(&buffer, oop_maps, frame_size_words); } -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_resolve_id(id), "expected a resolve stub id"); @@ -1744,7 +1744,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // Continuation point for throwing of implicit exceptions that are not handled in // the current activation. Fabricates an exception oop and initiates normal // exception dispatching in this frame. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); @@ -1808,7 +1808,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { framesize // inclusive of return address }; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id); CodeBuffer code(name, 512, 64); MacroAssembler* masm = new MacroAssembler(&code); @@ -1852,7 +1852,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() { framesize // inclusive of return address }; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id); CodeBuffer code(name, 512, 64); MacroAssembler* masm = new MacroAssembler(&code); diff --git a/src/hotspot/cpu/arm/stubDeclarations_arm.hpp b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp index 93ab16791b5..c32afa75efc 100644 --- a/src/hotspot/cpu/arm/stubDeclarations_arm.hpp +++ b/src/hotspot/cpu/arm/stubDeclarations_arm.hpp @@ -45,7 +45,7 @@ do_arch_entry(Arm, initial, atomic_load_long, \ atomic_load_long_entry, atomic_load_long_entry) \ do_stub(initial, atomic_store_long) \ - do_arch_entry(Arm, initial, atomic_load_long, \ + do_arch_entry(Arm, initial, atomic_store_long, \ atomic_store_long_entry, atomic_store_long_entry) \ #define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \ diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp index 5f2fd2e3b6d..c101e26dace 100644 --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -172,7 +172,7 @@ class StubGenerator: public StubCodeGenerator { private: address generate_call_stub(address& return_address) { - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -252,7 +252,7 @@ class StubGenerator: public StubCodeGenerator { // (in) Rexception_obj: exception oop address generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -265,7 +265,7 @@ class StubGenerator: public StubCodeGenerator { // (in) Rexception_pc: return address address generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -315,7 +315,7 @@ class StubGenerator: public StubCodeGenerator { Register tmp = LR; assert(dividend == remainder, "must be"); - StubGenStubId stub_id = StubGenStubId::idiv_irem_id; + StubId stub_id = StubId::stubgen_idiv_irem_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -458,7 +458,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_add() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_add_id; + StubId stub_id = StubId::stubgen_atomic_add_id; StubCodeMark mark(this, stub_id); Label retry; start = __ pc(); @@ -510,7 +510,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_xchg() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_xchg_id; + StubId stub_id = StubId::stubgen_atomic_xchg_id; StubCodeMark mark(this, stub_id); start = __ pc(); Register newval = R0; @@ -561,7 +561,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_cmpxchg() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_id; + StubId stub_id = StubId::stubgen_atomic_cmpxchg_id; StubCodeMark mark(this, stub_id); start = __ pc(); Register cmp = R0; @@ -600,7 +600,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_cmpxchg_long() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_long_id; + StubId stub_id = StubId::stubgen_atomic_cmpxchg_long_id; StubCodeMark mark(this, stub_id); start = __ pc(); Register cmp_lo = R0; @@ -638,7 +638,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_load_long() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_load_long_id; + StubId stub_id = StubId::stubgen_atomic_load_long_id; StubCodeMark mark(this, stub_id); start = __ pc(); Register result_lo = R0; @@ -663,7 +663,7 @@ class StubGenerator: public StubCodeGenerator { address generate_atomic_store_long() { address start; - StubGenStubId stub_id = StubGenStubId::atomic_store_long_id; + StubId stub_id = StubId::stubgen_atomic_store_long_id; StubCodeMark mark(this, stub_id); start = __ pc(); Register newval_lo = R0; @@ -706,7 +706,7 @@ class StubGenerator: public StubCodeGenerator { // raddr: LR, blown by call address generate_partial_subtype_check() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id; + StubId stub_id = StubId::stubgen_partial_subtype_check_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -796,7 +796,7 @@ class StubGenerator: public StubCodeGenerator { // Non-destructive plausibility checks for oops address generate_verify_oop() { - StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubId stub_id = StubId::stubgen_verify_oop_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2027,98 +2027,98 @@ class StubGenerator: public StubCodeGenerator { // to: R1 // count: R2 treated as signed 32-bit int // - address generate_primitive_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) { + address generate_primitive_copy(StubId stub_id, address nooverlap_target = nullptr) { bool aligned; bool status; int bytes_per_count; bool disjoint; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: aligned = false; status = true; bytes_per_count = 1; disjoint = true; break; - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: aligned = false; status = true; bytes_per_count = 2; disjoint = true; break; - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: aligned = false; status = true; bytes_per_count = 4; disjoint = true; break; - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: aligned = false; status = true; bytes_per_count = 8; disjoint = true; break; - case arrayof_jbyte_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 1; disjoint = true; break; - case arrayof_jshort_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 2; disjoint = true; break; - case arrayof_jint_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jint_disjoint_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 4; disjoint = true; break; - case arrayof_jlong_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id: aligned = false; status = set_status; bytes_per_count = 8; disjoint = true; break; - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: aligned = false; status = true; bytes_per_count = 1; disjoint = false; break; - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: aligned = false; status = true; bytes_per_count = 2; disjoint = false; break; - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: aligned = false; status = true; bytes_per_count = 4; disjoint = false; break; - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: aligned = false; status = true; bytes_per_count = 8; disjoint = false; break; - case arrayof_jbyte_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 1; disjoint = false; break; - case arrayof_jshort_arraycopy_id: + case StubId::stubgen_arrayof_jshort_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 2; disjoint = false; break; - case arrayof_jint_arraycopy_id: + case StubId::stubgen_arrayof_jint_arraycopy_id: aligned = true; status = set_status; bytes_per_count = 4; @@ -2301,28 +2301,28 @@ class StubGenerator: public StubCodeGenerator { // to: R1 // count: R2 treated as signed 32-bit int // - address generate_oop_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) { + address generate_oop_copy(StubId stub_id, address nooverlap_target = nullptr) { bool aligned; bool status; bool disjoint; switch (stub_id) { - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: aligned = false; status = true; disjoint = true; break; - case arrayof_oop_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_id: aligned = true; status = set_status; disjoint = true; break; - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: aligned = false; status = true; disjoint = false; break; - case arrayof_oop_arraycopy_id: + case StubId::stubgen_arrayof_oop_arraycopy_id: aligned = true; status = set_status; disjoint = false; @@ -2476,7 +2476,7 @@ class StubGenerator: public StubCodeGenerator { const Register R3_bits = R3; // test copy of low bits __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); const Register tmp = Rtemp; @@ -2604,7 +2604,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_checkcast_copy() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::checkcast_arraycopy_id; + StubId stub_id = StubId::stubgen_checkcast_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2772,7 +2772,7 @@ class StubGenerator: public StubCodeGenerator { const Register R8_temp = R8; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3008,35 +3008,35 @@ class StubGenerator: public StubCodeGenerator { UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); // these need always status in case they are called from generic_arraycopy - StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_disjoint_arraycopy_id); - StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jshort_disjoint_arraycopy_id); - StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jint_disjoint_arraycopy_id); - StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jlong_disjoint_arraycopy_id); - StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id); + StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (StubId::stubgen_oop_disjoint_arraycopy_id); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (StubId::stubgen_arrayof_oop_disjoint_arraycopy_id); // these need always status in case they are called from generic_arraycopy - StubRoutines::_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_arraycopy_id, StubRoutines::_jbyte_disjoint_arraycopy); - StubRoutines::_jshort_arraycopy = generate_primitive_copy(StubGenStubId::jshort_arraycopy_id, StubRoutines::_jshort_disjoint_arraycopy); - StubRoutines::_jint_arraycopy = generate_primitive_copy(StubGenStubId::jint_arraycopy_id, StubRoutines::_jint_disjoint_arraycopy); - StubRoutines::_jlong_arraycopy = generate_primitive_copy(StubGenStubId::jlong_arraycopy_id, StubRoutines::_jlong_disjoint_arraycopy); - StubRoutines::_oop_arraycopy = generate_oop_copy (StubGenStubId::oop_arraycopy_id, StubRoutines::_oop_disjoint_arraycopy); + StubRoutines::_jbyte_arraycopy = generate_primitive_copy(StubId::stubgen_jbyte_arraycopy_id, StubRoutines::_jbyte_disjoint_arraycopy); + StubRoutines::_jshort_arraycopy = generate_primitive_copy(StubId::stubgen_jshort_arraycopy_id, StubRoutines::_jshort_disjoint_arraycopy); + StubRoutines::_jint_arraycopy = generate_primitive_copy(StubId::stubgen_jint_arraycopy_id, StubRoutines::_jint_disjoint_arraycopy); + StubRoutines::_jlong_arraycopy = generate_primitive_copy(StubId::stubgen_jlong_arraycopy_id, StubRoutines::_jlong_disjoint_arraycopy); + StubRoutines::_oop_arraycopy = generate_oop_copy (StubId::stubgen_oop_arraycopy_id, StubRoutines::_oop_disjoint_arraycopy); - StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, StubRoutines::_arrayof_jbyte_disjoint_arraycopy); - StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_arraycopy_id, StubRoutines::_arrayof_jshort_disjoint_arraycopy); + StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jbyte_arraycopy_id, StubRoutines::_arrayof_jbyte_disjoint_arraycopy); + StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jshort_arraycopy_id, StubRoutines::_arrayof_jshort_disjoint_arraycopy); #ifdef _LP64 // since sizeof(jint) < sizeof(HeapWord), there's a different flavor: - StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_arraycopy_id, StubRoutines::_arrayof_jint_disjoint_arraycopy); + StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(StubId::stubgen_arrayof_jint_arraycopy_id, StubRoutines::_arrayof_jint_disjoint_arraycopy); #else StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; #endif if (BytesPerHeapOop < HeapWordSize) { - StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_arraycopy_id, StubRoutines::_arrayof_oop_disjoint_arraycopy); + StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (StubId::stubgen_arrayof_oop_arraycopy_id, StubRoutines::_arrayof_oop_disjoint_arraycopy); } else { StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; } @@ -3051,7 +3051,7 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -3105,22 +3105,22 @@ class StubGenerator: public StubCodeGenerator { #undef __ #define __ masm-> - address generate_cont_thaw(StubGenStubId stub_id) { + address generate_cont_thaw(StubId stub_id) { if (!Continuations::enabled()) return nullptr; Unimplemented(); return nullptr; } address generate_cont_thaw() { - return generate_cont_thaw(StubGenStubId::cont_thaw_id); + return generate_cont_thaw(StubId::stubgen_cont_thaw_id); } address generate_cont_returnBarrier() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrier_id); } address generate_cont_returnBarrier_exception() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrierExc_id); } //--------------------------------------------------------------------------- @@ -3203,31 +3203,31 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp b/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp index b663cfd9298..eb85b0bddf0 100644 --- a/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp +++ b/src/hotspot/cpu/arm/stubRoutinesCrypto_arm.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,7 +119,7 @@ void aes_init() { address generate_aescrypt_encryptBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -317,7 +317,7 @@ address generate_aescrypt_encryptBlock() { address generate_aescrypt_decryptBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -538,7 +538,7 @@ address generate_cipherBlockChaining_encryptAESCrypt() { // [sp+4] Transposition Box reference __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -604,7 +604,7 @@ address generate_cipherBlockChaining_encryptAESCrypt() { address generate_cipherBlockChaining_decryptAESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp index b1cdf38daf3..438521b0a9b 100644 --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp @@ -50,7 +50,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -61,8 +61,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { return; } - address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(C1StubId::throw_index_exception_id) - : Runtime1::entry_for(C1StubId::throw_range_check_failed_id); + address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(StubId::c1_throw_index_exception_id) + : Runtime1::entry_for(StubId::c1_throw_range_check_failed_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -91,7 +91,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -115,7 +115,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ load_const_optimized(R0, md.value()); __ std(R0, -8, R1_SP); - address a = Runtime1::entry_for(C1StubId::counter_overflow_id); + address a = Runtime1::entry_for(StubId::c1_counter_overflow_id); //__ load_const_optimized(R0, a); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a)); __ mtctr(R0); @@ -132,7 +132,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - address stub = Runtime1::entry_for(C1StubId::throw_div0_exception_id); + address stub = Runtime1::entry_for(StubId::c1_throw_div0_exception_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -147,9 +147,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); } if (ImplicitNullChecks || TrapBasedNullChecks) { @@ -181,14 +181,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -218,7 +218,7 @@ NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr re void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address entry = Runtime1::entry_for(C1StubId::new_type_array_id); + address entry = Runtime1::entry_for(StubId::c1_new_type_array_id); //__ load_const_optimized(R0, entry); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended @@ -241,7 +241,7 @@ NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Op void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address entry = Runtime1::entry_for(C1StubId::new_object_array_id); + address entry = Runtime1::entry_for(StubId::c1_new_object_array_id); //__ load_const_optimized(R0, entry); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry)); __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended @@ -254,7 +254,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorenter_id : C1StubId::monitorenter_nofpu_id); + address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? StubId::c1_monitorenter_id : StubId::c1_monitorenter_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register()); @@ -271,7 +271,7 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { if (_compute_lock) { ce->monitor_address(_monitor_ix, _lock_reg); } - address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorexit_id : C1StubId::monitorexit_nofpu_id); + address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? StubId::c1_monitorexit_id : StubId::c1_monitorexit_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); assert(_lock_reg->as_register() == R4_ARG2, ""); @@ -385,12 +385,12 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); + case access_field_id: target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); + case load_mirror_id: target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); + case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } @@ -416,7 +416,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address stub = Runtime1::entry_for(C1StubId::deoptimize_id); + address stub = Runtime1::entry_for(StubId::c1_deoptimize_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index 7dfde40364e..b548444bb48 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -191,7 +191,7 @@ int LIR_Assembler::emit_exception_handler() { } int offset = code_offset(); - address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)); + address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id)); //__ load_const_optimized(R0, entry_point); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); __ mtctr(R0); @@ -241,7 +241,7 @@ int LIR_Assembler::emit_unwind_handler() { } // Dispatch to the unwind logic. - address unwind_stub = Runtime1::entry_for(C1StubId::unwind_exception_id); + address unwind_stub = Runtime1::entry_for(StubId::c1_unwind_exception_id); //__ load_const_optimized(R0, unwind_stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); if (preserve_exception) { __ mr(Rexception, Rexception_save); } @@ -1788,8 +1788,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); add_call_info(pc_for_athrow_offset, info); // for exception handler - address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? C1StubId::handle_exception_id - : C1StubId::handle_exception_nofpu_id); + address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? StubId::c1_handle_exception_id + : StubId::c1_handle_exception_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); __ mtctr(R0); @@ -1980,7 +1980,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, &cont, copyfunc_addr != nullptr ? ©func : &slow, nullptr); - address slow_stc = Runtime1::entry_for(C1StubId::slow_subtype_check_id); + address slow_stc = Runtime1::entry_for(StubId::c1_slow_subtype_check_id); //__ load_const_optimized(tmp, slow_stc, tmp2); __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); __ mtctr(tmp); @@ -2408,7 +2408,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ b(*success); } else { // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address entry = Runtime1::entry_for(C1StubId::slow_subtype_check_id); + address entry = Runtime1::entry_for(StubId::c1_slow_subtype_check_id); // Stub needs fixed registers (tmp1-3). Register original_k_RInfo = op->tmp1()->as_register(); Register original_klass_RInfo = op->tmp2()->as_register(); @@ -2499,7 +2499,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, &done, &failure, nullptr); // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - const address slow_path = Runtime1::entry_for(C1StubId::slow_subtype_check_id); + const address slow_path = Runtime1::entry_for(StubId::c1_slow_subtype_check_id); //__ load_const_optimized(R0, slow_path); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); __ mtctr(R0); @@ -2801,9 +2801,9 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { // Stubs: Called via rt_call, but dest is a stub address (no FunctionDescriptor). - if (dest == Runtime1::entry_for(C1StubId::register_finalizer_id) || - dest == Runtime1::entry_for(C1StubId::new_multi_array_id ) || - dest == Runtime1::entry_for(C1StubId::is_instance_of_id )) { + if (dest == Runtime1::entry_for(StubId::c1_register_finalizer_id) || + dest == Runtime1::entry_for(StubId::c1_new_multi_array_id ) || + dest == Runtime1::entry_for(StubId::c1_is_instance_of_id )) { assert(CodeCache::contains(dest), "simplified call is only for special C1 stubs"); //__ load_const_optimized(R0, dest); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp index 815e5c83a1b..5f030676bcb 100644 --- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp @@ -976,7 +976,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); const LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1011,7 +1011,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1019,7 +1019,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception); } // Following registers are used by slow_subtype_check: LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass @@ -1053,7 +1053,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // Intrinsic for Class::isInstance address LIRGenerator::isInstance_entry() { - return Runtime1::entry_for(C1StubId::is_instance_of_id); + return Runtime1::entry_for(StubId::c1_is_instance_of_id); } diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp index 02e069b6be1..23050bff453 100644 --- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp @@ -307,7 +307,7 @@ void C1_MacroAssembler::initialize_object( if (CURRENT_ENV->dtrace_alloc_probes()) { Unimplemented(); // assert(obj == O0, "must be"); -// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)), +// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id)), // relocInfo::runtime_call_type); } @@ -383,7 +383,7 @@ void C1_MacroAssembler::allocate_array( if (CURRENT_ENV->dtrace_alloc_probes()) { Unimplemented(); //assert(obj == O0, "must be"); - //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)), + //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id)), // relocInfo::runtime_call_type); } @@ -412,7 +412,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) { if (TrapBasedNullChecks) { // SIGTRAP based trap_null_check(r); } else { // explicit - //const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + //const address exception_entry = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); assert(Lnull != nullptr, "must have Label for explicit check"); cmpdi(CR0, r, 0); bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CR0, Assembler::equal), *Lnull); diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp index f1afbdd3a1d..09efa2c841b 100644 --- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp @@ -97,12 +97,12 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, //load_const_optimized(R0, StubRoutines::forward_exception_entry()); //mtctr(R0); //bctr(); - } else if (_stub_id == (int)C1StubId::forward_exception_id) { + } else if (_stub_id == (int)StubId::c1_forward_exception_id) { should_not_reach_here(); } else { // keep stub frame for next call_RT - //load_const_optimized(R0, Runtime1::entry_for(C1StubId::forward_exception_id)); - add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(C1StubId::forward_exception_id))); + //load_const_optimized(R0, Runtime1::entry_for(StubId::c1_forward_exception_id)); + add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(StubId::c1_forward_exception_id))); mtctr(R0); bctr(); } @@ -391,7 +391,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { OopMapSet* oop_maps = nullptr; // For better readability. @@ -400,22 +400,22 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // Stub code & info for the different stubs. switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { - if (id == C1StubId::new_instance_id) { + if (id == StubId::c1_new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == C1StubId::fast_new_instance_id) { + } else if (id == StubId::c1_fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); + assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -425,15 +425,15 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: // Bci and method are on stack. oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2); break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -442,7 +442,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { #ifdef ASSERT // Assert object type is really an array of the proper kind. { - int tag = (id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; + int tag = (id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value; Label ok; __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2); __ srawi(R0, R0, Klass::_lh_array_tag_shift); @@ -456,7 +456,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // We don't support eden allocation. - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3); } else { oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3); @@ -464,7 +464,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { // R4: klass // R5: rank @@ -474,7 +474,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); // This code is called via rt_call. Hence, caller-save registers have been saved. @@ -504,50 +504,50 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded. oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded. oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/, @@ -575,28 +575,28 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); const Register sub_klass = R5, super_klass = R4, @@ -607,7 +607,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::is_instance_of_id: + case StubId::c1_is_instance_of_id: { // Called like a C function, but without FunctionDescriptor (see LIR_Assembler::rt_call). @@ -674,12 +674,12 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorenter_nofpu_id: - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_nofpu_id: + case StubId::c1_monitorenter_id: { __ set_info("monitorenter", dont_gc_arguments); - int save_fpu_registers = (id == C1StubId::monitorenter_id); + int save_fpu_registers = (id == StubId::c1_monitorenter_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -693,15 +693,15 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_nofpu_id: + case StubId::c1_monitorexit_id: { // note: Really a leaf routine but must setup last java sp // => use call_RT for now (speed can be improved by // doing last java sp setup manually). __ set_info("monitorexit", dont_gc_arguments); - int save_fpu_registers = (id == C1StubId::monitorexit_id); + int save_fpu_registers = (id == StubId::c1_monitorexit_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -715,7 +715,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { __ set_info("deoptimize", dont_gc_arguments); __ std(R0, -8, R1_SP); // Pass trap_request on stack. @@ -731,35 +731,35 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case C1StubId::dtrace_object_alloc_id: + case StubId::c1_dtrace_object_alloc_id: { // O0: object __ unimplemented("stub dtrace_object_alloc_id"); __ set_info("dtrace_object_alloc", dont_gc_arguments); @@ -779,7 +779,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); @@ -823,7 +823,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler* sasm) { __ block_comment("generate_handle_exception"); // Save registers, if required. @@ -833,7 +833,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -849,12 +849,12 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) __ ld(Rexception_pc, _abi0(lr), Rexception_pc); __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread); break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Rexception_pc); + oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id, Rexception_pc); break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: // At this point all registers except exception oop and exception pc are dead. oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); @@ -893,13 +893,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) // Restore the registers that were saved at the beginning, remove // the frame and jump to the exception handler. switch (id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: - restore_live_registers(sasm, noreg, noreg, id != C1StubId::handle_exception_nofpu_id); + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: + restore_live_registers(sasm, noreg, noreg, id != StubId::c1_handle_exception_nofpu_id); __ bctr(); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { __ pop_frame(); __ ld(Rexception_pc, _abi0(lr), R1_SP); __ mtlr(Rexception_pc); diff --git a/src/hotspot/cpu/ppc/runtime_ppc.cpp b/src/hotspot/cpu/ppc/runtime_ppc.cpp index 6d9a1dfcb1e..2654075f702 100644 --- a/src/hotspot/cpu/ppc/runtime_ppc.cpp +++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp @@ -71,7 +71,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // Allocate space for the code. ResourceMark rm; // Setup code generation tools. - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 37d6c9e6d51..64bd8dc7812 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -2947,7 +2947,7 @@ void SharedRuntime::generate_deopt_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); CodeBuffer buffer(name, 2048, 1024); InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); Label exec_mode_initialized; @@ -3170,7 +3170,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { // Allocate space for the code. ResourceMark rm; // Setup code generation tools. - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; @@ -3302,7 +3302,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { #endif // COMPILER2 // Generate a special Compile2Runtime blob that saves all registers, and setup oopmap. -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_polling_page_id(id), "expected a polling page stub id"); @@ -3320,7 +3320,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal int frame_size_in_bytes = 0; RegisterSaver::ReturnPCLocation return_pc_location; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); if (cause_return) { // Nothing to do here. The frame has already been popped in MachEpilogNode. // Register LR already contains the return pc. @@ -3330,7 +3330,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc; } - bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); + bool save_vectors = (id == StubId::shared_polling_page_vectors_safepoint_handler_id); // Save registers, fpu state, and flags. Set R31 = return pc. map = RegisterSaver::push_frame_reg_args_and_save_live_registers(masm, @@ -3417,7 +3417,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert(is_resolve_id(id), "expected a resolve stub id"); // allocate space for the code @@ -3521,7 +3521,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // Note: the routine set_pc_not_at_call_for_caller in // SharedRuntime.cpp requires that this code be generated into a // RuntimeStub. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); @@ -3844,7 +3844,7 @@ void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints, // It returns a jobject handle to the event writer. // The handle is dereferenced and the return value is the event writer oop. RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id); CodeBuffer code(name, 512, 64); MacroAssembler* masm = new MacroAssembler(&code); @@ -3881,7 +3881,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { // For c2: call to return a leased buffer. RuntimeStub* SharedRuntime::generate_jfr_return_lease() { - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id); CodeBuffer code(name, 512, 64); MacroAssembler* masm = new MacroAssembler(&code); diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp index a2b25376837..c2f290212bd 100644 --- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp @@ -89,7 +89,7 @@ class StubGenerator: public StubCodeGenerator { // Setup a new c frame, copy java arguments, call template interpreter or // native_entry, and process result. - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -365,7 +365,7 @@ class StubGenerator: public StubCodeGenerator { // within the VM. // address generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -421,7 +421,7 @@ class StubGenerator: public StubCodeGenerator { // (LR is unchanged and is live out). // address generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -706,32 +706,32 @@ class StubGenerator: public StubCodeGenerator { // value: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_fill(StubGenStubId stub_id) { + address generate_fill(StubId stub_id) { BasicType t; bool aligned; switch (stub_id) { - case jbyte_fill_id: + case StubId::stubgen_jbyte_fill_id: t = T_BYTE; aligned = false; break; - case jshort_fill_id: + case StubId::stubgen_jshort_fill_id: t = T_SHORT; aligned = false; break; - case jint_fill_id: + case StubId::stubgen_jint_fill_id: t = T_INT; aligned = false; break; - case arrayof_jbyte_fill_id: + case StubId::stubgen_arrayof_jbyte_fill_id: t = T_BYTE; aligned = true; break; - case arrayof_jshort_fill_id: + case StubId::stubgen_arrayof_jshort_fill_id: t = T_SHORT; aligned = true; break; - case arrayof_jint_fill_id: + case StubId::stubgen_arrayof_jint_fill_id: t = T_INT; aligned = true; break; @@ -984,13 +984,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_byte_copy(StubGenStubId stub_id) { + address generate_disjoint_byte_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: aligned = false; break; - case arrayof_jbyte_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id: aligned = true; break; default: @@ -1154,13 +1154,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_byte_copy(StubGenStubId stub_id) { + address generate_conjoint_byte_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: aligned = false; break; - case arrayof_jbyte_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_arraycopy_id: aligned = true; break; default: @@ -1255,13 +1255,13 @@ class StubGenerator: public StubCodeGenerator { // // 1. check if aligning the backbranch target of loops is beneficial // - address generate_disjoint_short_copy(StubGenStubId stub_id) { + address generate_disjoint_short_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: aligned = false; break; - case arrayof_jshort_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id: aligned = true; break; default: @@ -1432,13 +1432,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_short_copy(StubGenStubId stub_id) { + address generate_conjoint_short_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: aligned = false; break; - case arrayof_jshort_arraycopy_id: + case StubId::stubgen_arrayof_jshort_arraycopy_id: aligned = true; break; default: @@ -1595,13 +1595,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_int_copy(StubGenStubId stub_id) { + address generate_disjoint_int_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: aligned = false; break; - case arrayof_jint_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jint_disjoint_arraycopy_id: aligned = true; break; default: @@ -1736,13 +1736,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_int_copy(StubGenStubId stub_id) { + address generate_conjoint_int_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: aligned = false; break; - case arrayof_jint_arraycopy_id: + case StubId::stubgen_arrayof_jint_arraycopy_id: aligned = true; break; default: @@ -1860,13 +1860,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_disjoint_long_copy(StubGenStubId stub_id) { + address generate_disjoint_long_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: aligned = false; break; - case arrayof_jlong_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id: aligned = true; break; default: @@ -1980,13 +1980,13 @@ class StubGenerator: public StubCodeGenerator { // to: R4_ARG2 // count: R5_ARG3 treated as signed // - address generate_conjoint_long_copy(StubGenStubId stub_id) { + address generate_conjoint_long_copy(StubId stub_id) { bool aligned; switch (stub_id) { - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: aligned = false; break; - case arrayof_jlong_arraycopy_id: + case StubId::stubgen_arrayof_jlong_arraycopy_id: aligned = true; break; default: @@ -2021,23 +2021,23 @@ class StubGenerator: public StubCodeGenerator { // count: R5_ARG3 treated as signed // dest_uninitialized: G1 support // - address generate_conjoint_oop_copy(StubGenStubId stub_id) { + address generate_conjoint_oop_copy(StubId stub_id) { bool aligned; bool dest_uninitialized; switch (stub_id) { - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: aligned = false; dest_uninitialized = false; break; - case arrayof_oop_arraycopy_id: + case StubId::stubgen_arrayof_oop_arraycopy_id: aligned = true; dest_uninitialized = false; break; - case oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: aligned = false; dest_uninitialized = true; break; - case arrayof_oop_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_arraycopy_uninit_id: aligned = true; dest_uninitialized = true; break; @@ -2092,23 +2092,23 @@ class StubGenerator: public StubCodeGenerator { // count: R5_ARG3 treated as signed // dest_uninitialized: G1 support // - address generate_disjoint_oop_copy(StubGenStubId stub_id) { + address generate_disjoint_oop_copy(StubId stub_id) { bool aligned; bool dest_uninitialized; switch (stub_id) { - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: aligned = false; dest_uninitialized = false; break; - case arrayof_oop_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_id: aligned = true; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: aligned = false; dest_uninitialized = true; break; - case arrayof_oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id: aligned = true; dest_uninitialized = true; break; @@ -2184,7 +2184,7 @@ class StubGenerator: public StubCodeGenerator { // ckval: R7 (super_klass) // ret: R3 zero for success; (-1^K) where K is partial transfer count // - address generate_checkcast_copy(StubGenStubId stub_id) { + address generate_checkcast_copy(StubId stub_id) { const Register R3_from = R3_ARG1; // source array address const Register R4_to = R4_ARG2; // destination array address const Register R5_count = R5_ARG3; // elements count @@ -2200,10 +2200,10 @@ class StubGenerator: public StubCodeGenerator { bool dest_uninitialized; switch (stub_id) { - case checkcast_arraycopy_id: + case StubId::stubgen_checkcast_arraycopy_id: dest_uninitialized = false; break; - case checkcast_arraycopy_uninit_id: + case StubId::stubgen_checkcast_arraycopy_uninit_id: dest_uninitialized = true; break; default: @@ -2342,7 +2342,7 @@ class StubGenerator: public StubCodeGenerator { const Register R7_tmp = R7_ARG5; //__ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -2455,7 +2455,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_unsafe_setmemory(address unsafe_byte_fill) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, StubGenStubId::unsafe_setmemory_id); + StubCodeMark mark(this, StubId::stubgen_unsafe_setmemory_id); address start = __ function_entry(); // bump this on entry, not on exit: @@ -2548,7 +2548,7 @@ class StubGenerator: public StubCodeGenerator { const Register temp = R2; //__ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -2751,7 +2751,7 @@ class StubGenerator: public StubCodeGenerator { // R5_ARG3 - round key array address generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -2959,7 +2959,7 @@ class StubGenerator: public StubCodeGenerator { // R5_ARG3 - K (key) in little endian int array address generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3197,14 +3197,14 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_sha256_implCompress(StubGenStubId stub_id) { + address generate_sha256_implCompress(StubId stub_id) { assert(UseSHA, "need SHA instructions"); bool multi_block; switch (stub_id) { - case sha256_implCompress_id: + case StubId::stubgen_sha256_implCompress_id: multi_block = false; break; - case sha256_implCompressMB_id: + case StubId::stubgen_sha256_implCompressMB_id: multi_block = true; break; default: @@ -3219,14 +3219,14 @@ class StubGenerator: public StubCodeGenerator { return start; } - address generate_sha512_implCompress(StubGenStubId stub_id) { + address generate_sha512_implCompress(StubId stub_id) { assert(UseSHA, "need SHA instructions"); bool multi_block; switch (stub_id) { - case sha512_implCompress_id: + case StubId::stubgen_sha512_implCompress_id: multi_block = false; break; - case sha512_implCompressMB_id: + case StubId::stubgen_sha512_implCompressMB_id: multi_block = true; break; default: @@ -3243,7 +3243,7 @@ class StubGenerator: public StubCodeGenerator { address generate_data_cache_writeback() { const Register cacheline = R3_ARG1; - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3257,7 +3257,7 @@ class StubGenerator: public StubCodeGenerator { const Register is_presync = R3_ARG1; Register temp = R4; Label SKIP; - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3278,40 +3278,40 @@ class StubGenerator: public StubCodeGenerator { UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); // non-aligned disjoint versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubGenStubId::jbyte_disjoint_arraycopy_id); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubGenStubId::jshort_disjoint_arraycopy_id); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubGenStubId::jint_disjoint_arraycopy_id); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubGenStubId::jlong_disjoint_arraycopy_id); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubId::stubgen_jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubId::stubgen_jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubId::stubgen_jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_uninit_id); // aligned disjoint versions - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id); - StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(StubId::stubgen_arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(StubId::stubgen_arrayof_oop_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_uninit_id); // non-aligned conjoint versions - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(StubGenStubId::jbyte_arraycopy_id); - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(StubGenStubId::jshort_arraycopy_id); - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(StubGenStubId::jint_arraycopy_id); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(StubGenStubId::jlong_arraycopy_id); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id); + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(StubId::stubgen_jbyte_arraycopy_id); + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(StubId::stubgen_jshort_arraycopy_id); + StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(StubId::stubgen_jint_arraycopy_id); + StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(StubId::stubgen_jlong_arraycopy_id); + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubId::stubgen_oop_arraycopy_id); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubId::stubgen_oop_arraycopy_uninit_id); // aligned conjoint versions - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(StubGenStubId::arrayof_jbyte_arraycopy_id); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(StubGenStubId::arrayof_jshort_arraycopy_id); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(StubGenStubId::arrayof_jint_arraycopy_id); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(StubGenStubId::arrayof_jlong_arraycopy_id); - StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); - StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(StubId::stubgen_arrayof_jbyte_arraycopy_id); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(StubId::stubgen_arrayof_jshort_arraycopy_id); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(StubId::stubgen_arrayof_jint_arraycopy_id); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(StubId::stubgen_arrayof_jlong_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubId::stubgen_arrayof_oop_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubId::stubgen_arrayof_oop_arraycopy_id); // special/generic versions - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_id); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_uninit_id); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(STUB_ENTRY(jbyte_arraycopy()), STUB_ENTRY(jshort_arraycopy()), @@ -3328,12 +3328,12 @@ class StubGenerator: public StubCodeGenerator { // fill routines #ifdef COMPILER2 if (OptimizeFill) { - StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); - StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); - StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); - StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); - StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); - StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); + StubRoutines::_jbyte_fill = generate_fill(StubId::stubgen_jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubId::stubgen_jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubId::stubgen_jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubId::stubgen_arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubId::stubgen_arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubId::stubgen_arrayof_jint_fill_id); } StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(StubRoutines::_jbyte_fill); #endif @@ -3352,7 +3352,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_multiplyToLen() { - StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubId stub_id = StubId::stubgen_multiplyToLen_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3429,7 +3429,7 @@ class StubGenerator: public StubCodeGenerator { */ address generate_mulAdd() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubId stub_id = StubId::stubgen_mulAdd_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3460,7 +3460,7 @@ class StubGenerator: public StubCodeGenerator { */ address generate_squareToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubId stub_id = StubId::stubgen_squareToLen_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -3694,13 +3694,13 @@ class StubGenerator: public StubCodeGenerator { * R3_RET - int crc result */ // Compute CRC32 function. - address generate_CRC32_updateBytes(StubGenStubId stub_id) { + address generate_CRC32_updateBytes(StubId stub_id) { bool is_crc32c; switch (stub_id) { - case updateBytesCRC32_id: + case StubId::stubgen_updateBytesCRC32_id: is_crc32c = false; break; - case updateBytesCRC32C_id: + case StubId::stubgen_updateBytesCRC32C_id: is_crc32c = true; break; default: @@ -3734,7 +3734,7 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); address stub_address = __ pc(); @@ -3832,7 +3832,7 @@ class StubGenerator: public StubCodeGenerator { // Base64 decodeBlock intrinsic address generate_base64_decodeBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubId stub_id = StubId::stubgen_base64_decodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -4421,7 +4421,7 @@ class StubGenerator: public StubCodeGenerator { address generate_base64_encodeBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubId stub_id = StubId::stubgen_base64_encodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ function_entry(); @@ -4712,7 +4712,7 @@ class StubGenerator: public StubCodeGenerator { #endif // VM_LITTLE_ENDIAN void generate_lookup_secondary_supers_table_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; StubCodeMark mark(this, stub_id); const Register @@ -4735,7 +4735,7 @@ void generate_lookup_secondary_supers_table_stub() { // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4753,7 +4753,7 @@ void generate_lookup_secondary_supers_table_stub() { return start; } - address generate_cont_thaw(StubGenStubId stub_id) { + address generate_cont_thaw(StubId stub_id) { if (!Continuations::enabled()) return nullptr; Continuation::thaw_kind kind; @@ -4761,17 +4761,17 @@ void generate_lookup_secondary_supers_table_stub() { bool return_barrier_exception; switch (stub_id) { - case cont_thaw_id: + case StubId::stubgen_cont_thaw_id: kind = Continuation::thaw_top; return_barrier = false; return_barrier_exception = false; break; - case cont_returnBarrier_id: + case StubId::stubgen_cont_returnBarrier_id: kind = Continuation::thaw_return_barrier; return_barrier = true; return_barrier_exception = false; break; - case cont_returnBarrierExc_id: + case StubId::stubgen_cont_returnBarrierExc_id: kind = Continuation::thaw_return_barrier_exception; return_barrier = true; return_barrier_exception = true; @@ -4868,22 +4868,22 @@ void generate_lookup_secondary_supers_table_stub() { } address generate_cont_thaw() { - return generate_cont_thaw(StubGenStubId::cont_thaw_id); + return generate_cont_thaw(StubId::stubgen_cont_thaw_id); } // TODO: will probably need multiple return barriers depending on return type address generate_cont_returnBarrier() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrier_id); } address generate_cont_returnBarrier_exception() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrierExc_id); } address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubId stub_id = StubId::stubgen_cont_preempt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4919,7 +4919,7 @@ void generate_lookup_secondary_supers_table_stub() { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4938,7 +4938,7 @@ void generate_lookup_secondary_supers_table_stub() { // R19_method = result Method* address generate_upcall_stub_load_target() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4983,13 +4983,13 @@ void generate_lookup_secondary_supers_table_stub() { // CRC32 Intrinsics. if (UseCRC32Intrinsics) { StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY); - StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(StubGenStubId::updateBytesCRC32_id); + StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32_id); } // CRC32C Intrinsics. if (UseCRC32CIntrinsics) { StubRoutines::_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY); - StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(StubGenStubId::updateBytesCRC32C_id); + StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32C_id); } if (VM_Version::supports_float16()) { @@ -5071,12 +5071,12 @@ void generate_lookup_secondary_supers_table_stub() { } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } #ifdef VM_LITTLE_ENDIAN @@ -5090,31 +5090,31 @@ void generate_lookup_secondary_supers_table_stub() { } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } }; -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index ea299181ca7..0c16e632e5a 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -57,7 +57,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ mov_metadata(t0, m); ce->store_parameter(t0, 1); ce->store_parameter(_bci, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ j(_continuation); @@ -66,7 +66,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -79,13 +79,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { __ mv(t0, _index->as_jint()); } - C1StubId stub_id; + StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = C1StubId::throw_index_exception_id; + stub_id = StubId::c1_throw_index_exception_id; } else { assert(_array != LIR_Opr::nullOpr(), "sanity"); __ mv(t1, _array->as_pointer_register()); - stub_id = C1StubId::throw_range_check_failed_id; + stub_id = StubId::c1_throw_range_check_failed_id; } // t0 and t1 are used as args in generate_exception_throw, // so use x1/ra as the tmp register for rt_call. @@ -101,7 +101,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ far_call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -113,7 +113,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_throw_div0_exception_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); #ifdef ASSERT @@ -122,14 +122,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { } // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -158,7 +158,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == x9, "length must in x9"); assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); @@ -178,7 +178,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == x9, "length must in x9"); assert(_klass_reg->as_register() == x13, "klass_reg must in x13"); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); @@ -190,11 +190,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - C1StubId enter_id; + StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = C1StubId::monitorenter_id; + enter_id = StubId::c1_monitorenter_id; } else { - enter_id = C1StubId::monitorenter_nofpu_id; + enter_id = StubId::c1_monitorenter_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -210,11 +210,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - C1StubId exit_id; + StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = C1StubId::monitorexit_id; + exit_id = StubId::c1_monitorexit_id; } else { - exit_id = C1StubId::monitorexit_nofpu_id; + exit_id = StubId::c1_monitorexit_nofpu_id; } __ la(ra, _continuation); __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); @@ -239,7 +239,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -248,9 +248,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a = nullptr; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp index 5aa213fba7a..819d6c05654 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp @@ -225,7 +225,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr); PUSH(src, dst); - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); POP(src, dst); __ bnez(dst, cont); diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index c7587765da0..81b829bde9a 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -304,7 +304,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(x10); // search an exception handler (x10: exception oop, x13: throwing pc) - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -360,7 +360,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); __ remove_frame(initial_frame_size_in_bytes()); - __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); + __ far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -1096,7 +1096,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(k_RInfo, Address(sp, 0)); // sub klass __ sd(klass_RInfo, Address(sp, wordSize)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo @@ -1111,7 +1111,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(k_RInfo, Address(sp, 0)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo @@ -1399,7 +1399,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - C1StubId unwind_id; + StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -1418,9 +1418,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(x10); // search an exception handler (x10: exception oop, x13: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = C1StubId::handle_exception_id; + unwind_id = StubId::c1_handle_exception_id; } else { - unwind_id = C1StubId::handle_exception_nofpu_id; + unwind_id = StubId::c1_handle_exception_nofpu_id; } __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id))); __ nop(); @@ -2053,16 +2053,16 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(C1StubId::access_field_patching_id); + target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(C1StubId::load_klass_patching_id); + target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); + target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); + target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); break; default: ShouldNotReachHere(); } @@ -2151,7 +2151,7 @@ void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo, __ subi(sp, sp, 2 * wordSize); // 2: store k_RInfo and klass_RInfo __ sd(klass_RInfo, Address(sp, wordSize)); // sub klass __ sd(k_RInfo, Address(sp, 0)); // super klass - __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); // load result to k_RInfo __ ld(k_RInfo, Address(sp, 0)); __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo diff --git a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp index b9ebd49779e..4c3a6653731 100644 --- a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp @@ -1029,7 +1029,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1061,7 +1061,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub = nullptr; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1069,7 +1069,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; @@ -1104,7 +1104,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // Intrinsic for Class::isInstance address LIRGenerator::isInstance_entry() { - return Runtime1::entry_for(C1StubId::is_instance_of_id); + return Runtime1::entry_for(StubId::c1_is_instance_of_id); } void LIRGenerator::do_If(If* x) { diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 32b99f56909..3ce764b1861 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -281,7 +281,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == x10, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); @@ -321,7 +321,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1 if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == x10, "must be"); - far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + far_call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp index 849417725a7..a06584e9411 100644 --- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp @@ -97,10 +97,10 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres if (frame_size() == no_frame_size) { leave(); far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == (int)C1StubId::forward_exception_id) { + } else if (_stub_id == (int)StubId::c1_forward_exception_id) { should_not_reach_here(); } else { - far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); + far_jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_forward_exception_id))); } bind(L); } @@ -370,8 +370,8 @@ void Runtime1::initialize_pd() { // return: offset in 64-bit words. uint Runtime1::runtime_blob_current_thread_offset(frame f) { CodeBlob* cb = f.cb(); - assert(cb == Runtime1::blob_for(C1StubId::monitorenter_id) || - cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id), "must be"); + assert(cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || + cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id), "must be"); assert(cb != nullptr && cb->is_runtime_stub(), "invalid frame"); int offset = cpu_reg_save_offsets[xthread->encoding()]; return offset / 2; // SP offsets are in halfwords @@ -399,7 +399,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe return oop_maps; } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -411,7 +411,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) OopMap* oop_map = nullptr; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -430,12 +430,12 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ sd(zr, Address(xthread, JavaThread::vm_result_oop_offset())); __ sd(zr, Address(xthread, JavaThread::vm_result_metadata_offset())); break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { // At this point all registers except exception oop (x10) and // exception pc (ra) are dead. const int frame_size = 2 /* fp, return address */; @@ -492,13 +492,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord)); switch (id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); + restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: break; default: ShouldNotReachHere(); } @@ -644,7 +644,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { // for better readability const bool dont_gc_arguments = false; @@ -655,7 +655,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMapSet* oop_maps = nullptr; switch (id) { { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -663,32 +663,32 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { Register klass = x13; // Incoming Register obj = x10; // Result - if (id == C1StubId::new_instance_id) { + if (id == StubId::c1_new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == C1StubId::fast_new_instance_id) { + } else if (id == StubId::c1_fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); + assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -709,7 +709,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: { Register bci = x10; Register method = x11; @@ -733,14 +733,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { Register length = x9; // Incoming Register klass = x13; // Incoming Register obj = x10; // Result - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -753,7 +753,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { Register tmp = obj; __ lwu(tmp, Address(klass, Klass::layout_helper_offset())); __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift); - int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); + int tag = ((id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ mv(t0, tag); __ beq(t0, tmp, ok); __ stop("assert(is an array klass)"); @@ -766,7 +766,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMap* map = save_live_registers(sasm); assert_cond(map != nullptr); int call_offset = 0; - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -785,7 +785,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // x10: klass @@ -808,7 +808,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -842,14 +842,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, @@ -857,7 +857,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // Typical calling sequence: // push klass_RInfo (object klass or other subclass) @@ -903,10 +903,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorenter_nofpu_id: + case StubId::c1_monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments, requires_pop_epilogue_return); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -925,7 +925,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::is_instance_of_id: + case StubId::c1_is_instance_of_id: { // Mirror: x10 // Object: x11 @@ -971,10 +971,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: + case StubId::c1_monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -995,7 +995,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); OopMap* oop_map = save_live_registers(sasm); @@ -1014,14 +1014,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current @@ -1030,7 +1030,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -1038,7 +1038,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -1046,7 +1046,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -1054,7 +1054,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -1062,29 +1062,29 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); // tos + 0: link @@ -1093,7 +1093,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); @@ -1113,7 +1113,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::dtrace_object_alloc_id: + case StubId::c1_dtrace_object_alloc_id: { // c_rarg0: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); save_live_registers(sasm); diff --git a/src/hotspot/cpu/riscv/frame_riscv.cpp b/src/hotspot/cpu/riscv/frame_riscv.cpp index e77375434c2..b677c980c78 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.cpp +++ b/src/hotspot/cpu/riscv/frame_riscv.cpp @@ -414,8 +414,8 @@ JavaThread** frame::saved_thread_address(const frame& f) { JavaThread** thread_addr; #ifdef COMPILER1 - if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) || - cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) { + if (cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || + cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id)) { thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f)); } else #endif diff --git a/src/hotspot/cpu/riscv/runtime_riscv.cpp b/src/hotspot/cpu/riscv/runtime_riscv.cpp index 7c8ca853bc4..e1add8dbb82 100644 --- a/src/hotspot/cpu/riscv/runtime_riscv.cpp +++ b/src/hotspot/cpu/riscv/runtime_riscv.cpp @@ -61,7 +61,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; @@ -283,7 +283,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index 216323c55fc..c4e5a871a88 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -2100,7 +2100,7 @@ void SharedRuntime::generate_deopt_blob() { pad += 512; // Increase the buffer size when compiling for JVMCI } #endif - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); CodeBuffer buffer(name, 2048 + pad, 1024); MacroAssembler* masm = new MacroAssembler(&buffer); int frame_size_in_words = -1; @@ -2483,7 +2483,7 @@ VMReg SharedRuntime::thread_register() { // Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. // -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(is_polling_page_id(id), "expected a polling page stub id"); ResourceMark rm; @@ -2500,8 +2500,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal address start = __ pc(); address call_pc = nullptr; int frame_size_in_words = -1; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); - RegisterSaver reg_saver(id == SharedStubId::polling_page_vectors_safepoint_handler_id /* save_vectors */); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); + RegisterSaver reg_saver(id == StubId::shared_polling_page_vectors_safepoint_handler_id /* save_vectors */); // Save Integer and Float registers. map = reg_saver.save_live_registers(masm, 0, &frame_size_in_words); @@ -2608,7 +2608,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_resolve_id(id), "expected a resolve stub id"); @@ -2705,7 +2705,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // otherwise assume that stack unwinding will be initiated, so // caller saved registers were assumed volatile in the compiler. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); @@ -2816,7 +2816,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { int insts_size = 1024; int locs_size = 64; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id); CodeBuffer code(name, insts_size, locs_size); OopMapSet* oop_maps = new OopMapSet(); MacroAssembler* masm = new MacroAssembler(&code); @@ -2855,7 +2855,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() { int insts_size = 1024; int locs_size = 64; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id); CodeBuffer code(name, insts_size, locs_size); OopMapSet* oop_maps = new OopMapSet(); MacroAssembler* masm = new MacroAssembler(&code); diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index f7d13578caf..a44fe39917c 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -207,7 +207,7 @@ class StubGenerator: public StubCodeGenerator { (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -476,7 +476,7 @@ class StubGenerator: public StubCodeGenerator { // x10: exception oop address generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -529,7 +529,7 @@ class StubGenerator: public StubCodeGenerator { // so it just needs to be generated code with no x86 prolog address generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -616,7 +616,7 @@ class StubGenerator: public StubCodeGenerator { // [tos + 5]: saved t0 address generate_verify_oop() { - StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubId stub_id = StubId::stubgen_verify_oop_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -678,7 +678,7 @@ class StubGenerator: public StubCodeGenerator { const Register base = x28, cnt = x29, tmp1 = x30, tmp2 = x31; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::zero_blocks_id; + StubId stub_id = StubId::stubgen_zero_blocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -731,16 +731,16 @@ class StubGenerator: public StubCodeGenerator { // // s and d are adjusted to point to the remaining words to copy // - void generate_copy_longs(StubGenStubId stub_id, Label &start, + void generate_copy_longs(StubId stub_id, Label &start, Register s, Register d, Register count) { BasicType type; copy_direction direction; switch (stub_id) { - case copy_byte_f_id: + case StubId::stubgen_copy_byte_f_id: direction = copy_forwards; type = T_BYTE; break; - case copy_byte_b_id: + case StubId::stubgen_copy_byte_b_id: direction = copy_backwards; type = T_BYTE; break; @@ -1114,78 +1114,78 @@ class StubGenerator: public StubCodeGenerator { // can be used by the corresponding conjoint copy // method // - address generate_disjoint_copy(StubGenStubId stub_id, address* entry) { + address generate_disjoint_copy(StubId stub_id, address* entry) { size_t size; bool aligned; bool is_oop; bool dest_uninitialized; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: size = sizeof(jbyte); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jbyte_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id: size = sizeof(jbyte); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: size = sizeof(jshort); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jshort_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id: size = sizeof(jshort); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: size = sizeof(jint); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jint_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jint_disjoint_arraycopy_id: size = sizeof(jint); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: // since this is always aligned we can (should!) use the same // stub as for case arrayof_jlong_disjoint_arraycopy ShouldNotReachHere(); break; - case arrayof_jlong_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id: size = sizeof(jlong); aligned = true; is_oop = false; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case arrayof_oop_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = true; break; - case arrayof_oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; @@ -1271,7 +1271,7 @@ class StubGenerator: public StubCodeGenerator { // entry is set to the no-overlap entry point so it can be used by // some other conjoint copy method // - address generate_conjoint_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { + address generate_conjoint_copy(StubId stub_id, address nooverlap_target, address *entry) { const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; RegSet saved_regs = RegSet::of(s, d, count); int size; @@ -1279,72 +1279,72 @@ class StubGenerator: public StubCodeGenerator { bool is_oop; bool dest_uninitialized; switch (stub_id) { - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: size = sizeof(jbyte); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jbyte_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_arraycopy_id: size = sizeof(jbyte); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: size = sizeof(jshort); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jshort_arraycopy_id: + case StubId::stubgen_arrayof_jshort_arraycopy_id: size = sizeof(jshort); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: size = sizeof(jint); aligned = false; is_oop = false; dest_uninitialized = false; break; - case arrayof_jint_arraycopy_id: + case StubId::stubgen_arrayof_jint_arraycopy_id: size = sizeof(jint); aligned = true; is_oop = false; dest_uninitialized = false; break; - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: // since this is always aligned we can (should!) use the same // stub as for case arrayof_jlong_disjoint_arraycopy ShouldNotReachHere(); break; - case arrayof_jlong_arraycopy_id: + case StubId::stubgen_arrayof_jlong_arraycopy_id: size = sizeof(jlong); aligned = true; is_oop = false; dest_uninitialized = false; break; - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case arrayof_oop_arraycopy_id: + case StubId::stubgen_arrayof_oop_arraycopy_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = false; break; - case oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; dest_uninitialized = true; break; - case arrayof_oop_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_arraycopy_uninit_id: size = UseCompressedOops ? sizeof (jint) : sizeof (jlong); aligned = !UseCompressedOops; is_oop = true; @@ -1444,13 +1444,13 @@ class StubGenerator: public StubCodeGenerator { // x10 == 0 - success // x10 == -1^K - failure, where K is partial transfer count // - address generate_checkcast_copy(StubGenStubId stub_id, address* entry) { + address generate_checkcast_copy(StubId stub_id, address* entry) { bool dest_uninitialized; switch (stub_id) { - case checkcast_arraycopy_id: + case StubId::stubgen_checkcast_arraycopy_id: dest_uninitialized = false; break; - case checkcast_arraycopy_uninit_id: + case StubId::stubgen_checkcast_arraycopy_uninit_id: dest_uninitialized = true; break; default: @@ -1653,7 +1653,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_unsafe_setmemory() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_setmemory_id; + StubId stub_id = StubId::stubgen_unsafe_setmemory_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1776,7 +1776,7 @@ class StubGenerator: public StubCodeGenerator { const Register s = c_rarg0, d = c_rarg1, count = c_rarg2; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -1843,7 +1843,7 @@ class StubGenerator: public StubCodeGenerator { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2100,32 +2100,32 @@ class StubGenerator: public StubCodeGenerator { // value: c_rarg1 // count: c_rarg2 treated as signed // - address generate_fill(StubGenStubId stub_id) { + address generate_fill(StubId stub_id) { BasicType t; bool aligned; switch (stub_id) { - case jbyte_fill_id: + case StubId::stubgen_jbyte_fill_id: t = T_BYTE; aligned = false; break; - case jshort_fill_id: + case StubId::stubgen_jshort_fill_id: t = T_SHORT; aligned = false; break; - case jint_fill_id: + case StubId::stubgen_jint_fill_id: t = T_INT; aligned = false; break; - case arrayof_jbyte_fill_id: + case StubId::stubgen_arrayof_jbyte_fill_id: t = T_BYTE; aligned = true; break; - case arrayof_jshort_fill_id: + case StubId::stubgen_arrayof_jshort_fill_id: t = T_SHORT; aligned = true; break; - case arrayof_jint_fill_id: + case StubId::stubgen_arrayof_jint_fill_id: t = T_INT; aligned = true; break; @@ -2301,8 +2301,8 @@ class StubGenerator: public StubCodeGenerator { address entry_jlong_arraycopy = nullptr; address entry_checkcast_arraycopy = nullptr; - generate_copy_longs(StubGenStubId::copy_byte_f_id, copy_f, c_rarg0, c_rarg1, t1); - generate_copy_longs(StubGenStubId::copy_byte_b_id, copy_b, c_rarg0, c_rarg1, t1); + generate_copy_longs(StubId::stubgen_copy_byte_f_id, copy_f, c_rarg0, c_rarg1, t1); + generate_copy_longs(StubId::stubgen_copy_byte_b_id, copy_b, c_rarg0, c_rarg1, t1); address ucm_common_error_exit = generate_unsafecopy_common_error_exit(); UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); @@ -2311,52 +2311,52 @@ class StubGenerator: public StubCodeGenerator { //*** jbyte // Always need aligned and unaligned versions - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jbyte_disjoint_arraycopy_id, &entry); - StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, entry, nullptr); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_jbyte_arraycopy = generate_conjoint_copy(StubId::stubgen_jbyte_arraycopy_id, entry, &entry_jbyte_arraycopy); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jbyte_arraycopy_id, entry, nullptr); //*** jshort // Always need aligned and unaligned versions - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jshort_disjoint_arraycopy_id, &entry); - StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::jshort_arraycopy_id, entry, &entry_jshort_arraycopy); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jshort_arraycopy_id, entry, nullptr); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_jshort_arraycopy = generate_conjoint_copy(StubId::stubgen_jshort_arraycopy_id, entry, &entry_jshort_arraycopy); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jshort_arraycopy_id, entry, nullptr); //*** jint // Aligned versions - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jint_arraycopy_id, entry, &entry_jint_arraycopy); // In 64 bit we need both aligned and unaligned versions of jint arraycopy. // entry_jint_arraycopy always points to the unaligned version - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); - StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_copy(StubId::stubgen_jint_arraycopy_id, entry, &entry_jint_arraycopy); //*** jlong // It is always aligned - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id, &entry); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubGenStubId::arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_copy(StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_copy(StubId::stubgen_arrayof_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; //*** oops StubRoutines::_arrayof_oop_disjoint_arraycopy - = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_id, &entry); + = generate_disjoint_copy(StubId::stubgen_arrayof_oop_disjoint_arraycopy_id, &entry); StubRoutines::_arrayof_oop_arraycopy - = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); + = generate_conjoint_copy(StubId::stubgen_arrayof_oop_arraycopy_id, entry, &entry_oop_arraycopy); // Aligned versions without pre-barriers StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit - = generate_disjoint_copy(StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id, &entry); + = generate_disjoint_copy(StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id, &entry); StubRoutines::_arrayof_oop_arraycopy_uninit - = generate_conjoint_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id, entry, nullptr); + = generate_conjoint_copy(StubId::stubgen_arrayof_oop_arraycopy_uninit_id, entry, nullptr); StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_uninit_id, nullptr); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, @@ -2371,12 +2371,12 @@ class StubGenerator: public StubCodeGenerator { entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); - StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); - StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); - StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); - StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); - StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); + StubRoutines::_jbyte_fill = generate_fill(StubId::stubgen_jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubId::stubgen_jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubId::stubgen_jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubId::stubgen_arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubId::stubgen_arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubId::stubgen_arrayof_jint_fill_id); StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(); } @@ -2414,7 +2414,7 @@ class StubGenerator: public StubCodeGenerator { assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); Label L_aes128, L_aes192; @@ -2493,7 +2493,7 @@ class StubGenerator: public StubCodeGenerator { assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); Label L_aes128, L_aes192; @@ -2595,13 +2595,13 @@ class StubGenerator: public StubCodeGenerator { // x28 = tmp1 // x29 = tmp2 // x30 = tmp3 - address generate_compare_long_string_different_encoding(StubGenStubId stub_id) { + address generate_compare_long_string_different_encoding(StubId stub_id) { bool isLU; switch (stub_id) { - case compare_long_string_LU_id: + case StubId::stubgen_compare_long_string_LU_id: isLU = true; break; - case compare_long_string_UL_id: + case StubId::stubgen_compare_long_string_UL_id: isLU = false; break; default: @@ -2697,7 +2697,7 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); Label deoptimize_label; @@ -2768,13 +2768,13 @@ class StubGenerator: public StubCodeGenerator { // x29 = tmp2 // x30 = tmp3 // x31 = tmp4 - address generate_compare_long_string_same_encoding(StubGenStubId stub_id) { + address generate_compare_long_string_same_encoding(StubId stub_id) { bool isLL; switch (stub_id) { - case compare_long_string_LL_id: + case StubId::stubgen_compare_long_string_LL_id: isLL = true; break; - case compare_long_string_UU_id: + case StubId::stubgen_compare_long_string_UU_id: isLL = false; break; default: @@ -2880,10 +2880,10 @@ class StubGenerator: public StubCodeGenerator { } void generate_compare_long_strings() { - StubRoutines::riscv::_compare_long_string_LL = generate_compare_long_string_same_encoding(StubGenStubId::compare_long_string_LL_id); - StubRoutines::riscv::_compare_long_string_UU = generate_compare_long_string_same_encoding(StubGenStubId::compare_long_string_UU_id); - StubRoutines::riscv::_compare_long_string_LU = generate_compare_long_string_different_encoding(StubGenStubId::compare_long_string_LU_id); - StubRoutines::riscv::_compare_long_string_UL = generate_compare_long_string_different_encoding(StubGenStubId::compare_long_string_UL_id); + StubRoutines::riscv::_compare_long_string_LL = generate_compare_long_string_same_encoding(StubId::stubgen_compare_long_string_LL_id); + StubRoutines::riscv::_compare_long_string_UU = generate_compare_long_string_same_encoding(StubId::stubgen_compare_long_string_UU_id); + StubRoutines::riscv::_compare_long_string_LU = generate_compare_long_string_different_encoding(StubId::stubgen_compare_long_string_LU_id); + StubRoutines::riscv::_compare_long_string_UL = generate_compare_long_string_different_encoding(StubId::stubgen_compare_long_string_UL_id); } // x10 result @@ -2891,20 +2891,20 @@ class StubGenerator: public StubCodeGenerator { // x12 src count // x13 pattern // x14 pattern count - address generate_string_indexof_linear(StubGenStubId stub_id) + address generate_string_indexof_linear(StubId stub_id) { bool needle_isL; bool haystack_isL; switch (stub_id) { - case string_indexof_linear_ll_id: + case StubId::stubgen_string_indexof_linear_ll_id: needle_isL = true; haystack_isL = true; break; - case string_indexof_linear_ul_id: + case StubId::stubgen_string_indexof_linear_ul_id: needle_isL = true; haystack_isL = false; break; - case string_indexof_linear_uu_id: + case StubId::stubgen_string_indexof_linear_uu_id: needle_isL = false; haystack_isL = false; break; @@ -3140,14 +3140,14 @@ class StubGenerator: public StubCodeGenerator { void generate_string_indexof_stubs() { - StubRoutines::riscv::_string_indexof_linear_ll = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_ll_id); - StubRoutines::riscv::_string_indexof_linear_uu = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_uu_id); - StubRoutines::riscv::_string_indexof_linear_ul = generate_string_indexof_linear(StubGenStubId::string_indexof_linear_ul_id); + StubRoutines::riscv::_string_indexof_linear_ll = generate_string_indexof_linear(StubId::stubgen_string_indexof_linear_ll_id); + StubRoutines::riscv::_string_indexof_linear_uu = generate_string_indexof_linear(StubId::stubgen_string_indexof_linear_uu_id); + StubRoutines::riscv::_string_indexof_linear_ul = generate_string_indexof_linear(StubId::stubgen_string_indexof_linear_ul_id); } #ifdef COMPILER2 void generate_lookup_secondary_supers_table_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; StubCodeMark mark(this, stub_id); const Register @@ -3173,7 +3173,7 @@ class StubGenerator: public StubCodeGenerator { // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3195,7 +3195,7 @@ class StubGenerator: public StubCodeGenerator { address generate_mulAdd() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubId stub_id = StubId::stubgen_mulAdd_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -3229,7 +3229,7 @@ class StubGenerator: public StubCodeGenerator { address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubId stub_id = StubId::stubgen_multiplyToLen_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -3260,7 +3260,7 @@ class StubGenerator: public StubCodeGenerator { address generate_squareToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubId stub_id = StubId::stubgen_squareToLen_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -3301,7 +3301,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -3353,7 +3353,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); @@ -4180,7 +4180,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_thaw() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_thaw_id; + StubId stub_id = StubId::stubgen_cont_thaw_id; StubCodeMark mark(this, stub_id); address start = __ pc(); generate_cont_thaw(Continuation::thaw_top); @@ -4191,7 +4191,7 @@ class StubGenerator: public StubCodeGenerator { if (!Continuations::enabled()) return nullptr; // TODO: will probably need multiple return barriers depending on return type - StubGenStubId stub_id = StubGenStubId::cont_returnBarrier_id; + StubId stub_id = StubId::stubgen_cont_returnBarrier_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4203,7 +4203,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_returnBarrier_exception() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_returnBarrierExc_id; + StubId stub_id = StubId::stubgen_cont_returnBarrierExc_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4214,7 +4214,7 @@ class StubGenerator: public StubCodeGenerator { address generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubId stub_id = StubId::stubgen_cont_preempt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4252,10 +4252,10 @@ class StubGenerator: public StubCodeGenerator { StubCodeGenerator* _cgen; public: Sha2Generator(MacroAssembler* masm, StubCodeGenerator* cgen) : MacroAssembler(masm->code()), _cgen(cgen) {} - address generate_sha256_implCompress(StubGenStubId stub_id) { + address generate_sha256_implCompress(StubId stub_id) { return generate_sha2_implCompress(Assembler::e32, stub_id); } - address generate_sha512_implCompress(StubGenStubId stub_id) { + address generate_sha512_implCompress(StubId stub_id) { return generate_sha2_implCompress(Assembler::e64, stub_id); } private: @@ -4389,7 +4389,7 @@ class StubGenerator: public StubCodeGenerator { // c_rarg2 - int offset // c_rarg3 - int limit // - address generate_sha2_implCompress(Assembler::SEW vset_sew, StubGenStubId stub_id) { + address generate_sha2_implCompress(Assembler::SEW vset_sew, StubId stub_id) { alignas(64) static const uint32_t round_consts_256[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, @@ -4441,19 +4441,19 @@ class StubGenerator: public StubCodeGenerator { bool multi_block; switch (stub_id) { - case sha256_implCompress_id: + case StubId::stubgen_sha256_implCompress_id: assert (vset_sew == Assembler::e32, "wrong macroassembler for stub"); multi_block = false; break; - case sha256_implCompressMB_id: + case StubId::stubgen_sha256_implCompressMB_id: assert (vset_sew == Assembler::e32, "wrong macroassembler for stub"); multi_block = true; break; - case sha512_implCompress_id: + case StubId::stubgen_sha512_implCompress_id: assert (vset_sew == Assembler::e64, "wrong macroassembler for stub"); multi_block = false; break; - case sha512_implCompressMB_id: + case StubId::stubgen_sha512_implCompressMB_id: assert (vset_sew == Assembler::e64, "wrong macroassembler for stub"); multi_block = true; break; @@ -4808,14 +4808,14 @@ class StubGenerator: public StubCodeGenerator { // x29 t4 buf5 // x30 t5 buf6 // x31 t6 buf7 - address generate_md5_implCompress(StubGenStubId stub_id) { + address generate_md5_implCompress(StubId stub_id) { __ align(CodeEntryAlignment); bool multi_block; switch (stub_id) { - case md5_implCompress_id: + case StubId::stubgen_md5_implCompress_id: multi_block = false; break; - case md5_implCompressMB_id: + case StubId::stubgen_md5_implCompressMB_id: multi_block = true; break; default: @@ -5075,7 +5075,7 @@ class StubGenerator: public StubCodeGenerator { Label L_Rounds; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubId stub_id = StubId::stubgen_chacha20Block_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5375,13 +5375,13 @@ class StubGenerator: public StubCodeGenerator { // - - - - - - below are only for implCompressMultiBlock0 - - - - - - // c_rarg0: int offset, when (multi_block == true) // - address generate_sha1_implCompress(StubGenStubId stub_id) { + address generate_sha1_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha1_implCompress_id: + case StubId::stubgen_sha1_implCompress_id: multi_block = false; break; - case sha1_implCompressMB_id: + case StubId::stubgen_sha1_implCompressMB_id: multi_block = true; break; default: @@ -5598,7 +5598,7 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubId stub_id = StubId::stubgen_base64_encodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -5855,7 +5855,7 @@ class StubGenerator: public StubCodeGenerator { }; __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubId stub_id = StubId::stubgen_base64_decodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6100,7 +6100,7 @@ class StubGenerator: public StubCodeGenerator { */ address generate_updateBytesAdler32() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubId stub_id = StubId::stubgen_updateBytesAdler32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -6278,7 +6278,7 @@ class StubGenerator: public StubCodeGenerator { // t1 = temporary register address generate_float16ToFloat() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::hf2f_id; + StubId stub_id = StubId::stubgen_hf2f_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("float16ToFloat:"); @@ -6329,7 +6329,7 @@ class StubGenerator: public StubCodeGenerator { // t1 = temporary register address generate_floatToFloat16() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::f2hf_id; + StubId stub_id = StubId::stubgen_f2hf_id; StubCodeMark mark(this, stub_id); address entry = __ pc(); BLOCK_COMMENT("floatToFloat16:"); @@ -6457,7 +6457,7 @@ static const int64_t right_3_bits = right_n_bits(3); address generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubId stub_id = StubId::stubgen_poly1305_processBlocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -6595,7 +6595,7 @@ static const int64_t right_3_bits = right_n_bits(3); assert(UseCRC32Intrinsics, "what are we doing here?"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -6620,7 +6620,7 @@ static const int64_t right_3_bits = right_n_bits(3); // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -6638,7 +6638,7 @@ static const int64_t right_3_bits = right_n_bits(3); // xmethod = Method* result address generate_upcall_stub_load_target() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -6747,14 +6747,14 @@ static const int64_t right_3_bits = right_n_bits(3); } if (UseMontgomeryMultiplyIntrinsic) { - StubGenStubId stub_id = StubGenStubId::montgomeryMultiply_id; + StubId stub_id = StubId::stubgen_montgomeryMultiply_id; StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/false); StubRoutines::_montgomeryMultiply = g.generate_multiply(); } if (UseMontgomerySquareIntrinsic) { - StubGenStubId stub_id = StubGenStubId::montgomerySquare_id; + StubId stub_id = StubId::stubgen_montgomerySquare_id; StubCodeMark mark(this, stub_id); MontgomeryMultiplyGenerator g(_masm, /*squaring*/true); StubRoutines::_montgomerySquare = g.generate_square(); @@ -6776,19 +6776,19 @@ static const int64_t right_3_bits = right_n_bits(3); if (UseSHA256Intrinsics) { Sha2Generator sha2(_masm, this); - StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); - StubRoutines::_sha256_implCompressMB = sha2.generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); + StubRoutines::_sha256_implCompress = sha2.generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = sha2.generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { Sha2Generator sha2(_masm, this); - StubRoutines::_sha512_implCompress = sha2.generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); - StubRoutines::_sha512_implCompressMB = sha2.generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); + StubRoutines::_sha512_implCompress = sha2.generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = sha2.generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubId::stubgen_md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubId::stubgen_md5_implCompressMB_id); } if (UseChaCha20Intrinsics) { @@ -6796,8 +6796,8 @@ static const int64_t right_3_bits = right_n_bits(3); } if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubId::stubgen_sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubId::stubgen_sha1_implCompressMB_id); } if (UseBASE64Intrinsics) { @@ -6817,30 +6817,30 @@ static const int64_t right_3_bits = right_n_bits(3); } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } }; // end class declaration -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index 430928a66ed..68e7114b3b6 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -47,7 +47,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -63,11 +63,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ load_const_optimized(Z_R1_scratch, _index->as_jint()); } - C1StubId stub_id; + StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = C1StubId::throw_index_exception_id; + stub_id = StubId::c1_throw_index_exception_id; } else { - stub_id = C1StubId::throw_range_check_failed_id; + stub_id = StubId::c1_throw_range_check_failed_id; __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register()); } ce->emit_call_c(Runtime1::entry_for (stub_id)); @@ -83,7 +83,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -101,7 +101,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1); ce->store_parameter(_bci, 0); - ce->emit_call_c(Runtime1::entry_for (C1StubId::counter_overflow_id)); + ce->emit_call_c(Runtime1::entry_for (StubId::c1_counter_overflow_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -113,7 +113,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - ce->emit_call_c(Runtime1::entry_for (C1StubId::throw_div0_exception_id)); + ce->emit_call_c(Runtime1::entry_for (StubId::c1_throw_div0_exception_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); @@ -123,9 +123,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for (StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for (C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for (StubId::c1_throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); @@ -150,14 +150,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { DEBUG_ONLY(__ should_not_reach_here()); } -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -185,7 +185,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); __ lgr_if_needed(Z_R13, _length->as_register()); - address a = Runtime1::entry_for (C1StubId::new_type_array_id); + address a = Runtime1::entry_for (StubId::c1_new_type_array_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -205,7 +205,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11"); __ lgr_if_needed(Z_R13, _length->as_register()); - address a = Runtime1::entry_for (C1StubId::new_object_array_id); + address a = Runtime1::entry_for (StubId::c1_new_object_array_id); ce->emit_call_c(a); CHECK_BAILOUT(); ce->add_call_info_here(_info); @@ -216,11 +216,11 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - C1StubId enter_id; + StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = C1StubId::monitorenter_id; + enter_id = StubId::c1_monitorenter_id; } else { - enter_id = C1StubId::monitorenter_nofpu_id; + enter_id = StubId::c1_monitorenter_nofpu_id; } __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register()); __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr(). @@ -241,11 +241,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register()); } // Note: non-blocking leaf routine => no call info needed. - C1StubId exit_id; + StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = C1StubId::monitorexit_id; + exit_id = StubId::c1_monitorexit_id; } else { - exit_id = C1StubId::monitorexit_nofpu_id; + exit_id = StubId::c1_monitorexit_nofpu_id; } ce->emit_call_c(Runtime1::entry_for (exit_id)); CHECK_BAILOUT(); @@ -377,10 +377,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for (C1StubId::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for (C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for (C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for (C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for (StubId::c1_access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for (StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for (StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for (StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -405,7 +405,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch. - ce->emit_call_c(Runtime1::entry_for (C1StubId::deoptimize_id)); + ce->emit_call_c(Runtime1::entry_for (StubId::c1_deoptimize_id)); CHECK_BAILOUT(); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index 8df38d1658b..4d90f313267 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -185,7 +185,7 @@ int LIR_Assembler::emit_exception_handler() { int offset = code_offset(); - address a = Runtime1::entry_for (C1StubId::handle_exception_from_callee_id); + address a = Runtime1::entry_for (StubId::c1_handle_exception_from_callee_id); address call_addr = emit_call_c(a); CHECK_BAILOUT_(-1); __ should_not_reach_here(); @@ -225,7 +225,7 @@ int LIR_Assembler::emit_unwind_handler() { // Perform needed unlocking. MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { - // C1StubId::monitorexit_id expects lock address in Z_R1_scratch. + // StubId::c1_monitorexit_id expects lock address in Z_R1_scratch. LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); monitor_address(0, lock); stub = new MonitorExitStub(lock, true, 0); @@ -258,7 +258,7 @@ int LIR_Assembler::emit_unwind_handler() { // Z_EXC_PC: exception pc // Dispatch to the unwind logic. - __ load_const_optimized(Z_R5, Runtime1::entry_for (C1StubId::unwind_exception_id)); + __ load_const_optimized(Z_R5, Runtime1::entry_for (StubId::c1_unwind_exception_id)); __ z_br(Z_R5); // Emit the slow path assembly. @@ -1931,8 +1931,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // Reuse the debug info from the safepoint poll for the throw op itself. __ get_PC(Z_EXC_PC); add_call_info(__ offset(), info); // for exception handler - address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? C1StubId::handle_exception_id - : C1StubId::handle_exception_nofpu_id); + address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? StubId::c1_handle_exception_id + : StubId::c1_handle_exception_nofpu_id); emit_call_c(stub); } @@ -2129,7 +2129,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { store_parameter(src_klass, 0); // sub store_parameter(dst_klass, 1); // super - emit_call_c(Runtime1::entry_for (C1StubId::slow_subtype_check_id)); + emit_call_c(Runtime1::entry_for (StubId::c1_slow_subtype_check_id)); CHECK_BAILOUT2(cont, slow); // Sets condition code 0 for match (2 otherwise). __ branch_optimized(Assembler::bcondEqual, cont); @@ -2549,7 +2549,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L failure_target, nullptr); if (need_slow_path) { // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); + address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id); store_parameter(klass_RInfo, 0); // sub store_parameter(k_RInfo, 1); // super emit_call_c(a); // Sets condition code 0 for match (2 otherwise). @@ -2624,7 +2624,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // Perform the fast part of the checking logic. __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr); // Call out-of-line instance of __ check_klass_subtype_slow_path(...): - address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id); + address a = Runtime1::entry_for (StubId::c1_slow_subtype_check_id); store_parameter(klass_RInfo, 0); // sub store_parameter(k_RInfo, 1); // super emit_call_c(a); // Sets condition code 0 for match (2 otherwise). diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp index 94a528a7467..5a0fd5f9561 100644 --- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp @@ -890,7 +890,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for (x->type()); - __ call_runtime(Runtime1::entry_for (C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for (StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -921,14 +921,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp1 = new_register(objectType); @@ -961,7 +961,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // Intrinsic for Class::isInstance address LIRGenerator::isInstance_entry() { - return Runtime1::entry_for(C1StubId::is_instance_of_id); + return Runtime1::entry_for(StubId::c1_is_instance_of_id); } diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp index 0e873250dca..519884c8212 100644 --- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp @@ -262,7 +262,7 @@ void C1_MacroAssembler::initialize_object( // Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); - // call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id))); + // call(RuntimeAddress(Runtime1::entry_for (StubId::c1_dtrace_object_alloc_id))); // } verify_oop(obj, FILE_AND_LINE); @@ -323,7 +323,7 @@ void C1_MacroAssembler::allocate_array( // Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); - // call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id))); + // call(RuntimeAddress(Runtime1::entry_for (StubId::c1_dtrace_object_alloc_id))); // } verify_oop(obj, FILE_AND_LINE); diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp index db04703ceb0..e78b04fe911 100644 --- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp @@ -97,10 +97,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre restore_return_pc(); load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); z_br(Z_R1); - } else if (_stub_id == (int)C1StubId::forward_exception_id) { + } else if (_stub_id == (int)StubId::c1_forward_exception_id) { should_not_reach_here(); } else { - load_const_optimized(Z_R1, Runtime1::entry_for (C1StubId::forward_exception_id)); + load_const_optimized(Z_R1, Runtime1::entry_for (StubId::c1_forward_exception_id)); z_br(Z_R1); } @@ -309,7 +309,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { // for better readability const bool must_gc_arguments = true; @@ -322,26 +322,26 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // Stub code and info for the different stubs. OopMapSet* oop_maps = nullptr; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); // will not return } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { Register klass = Z_R11; // Incoming Register obj = Z_R2; // Result - if (id == C1StubId::new_instance_id) { + if (id == StubId::c1_new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == C1StubId::fast_new_instance_id) { + } else if (id == StubId::c1_fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); + assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -356,7 +356,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: { // Arguments : // bci : stack param 0 @@ -375,14 +375,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ z_br(Z_R14); } break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { Register length = Z_R13; // Incoming Register klass = Z_R11; // Incoming Register obj = Z_R2; // Result - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -395,7 +395,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { Register t0 = obj; __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); __ z_sra(t0, Klass::_lh_array_tag_shift); - int tag = ((id == C1StubId::new_type_array_id) + int tag = ((id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); @@ -407,7 +407,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { OopMap* map = save_live_registers_except_r2(sasm); int call_offset; - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -422,7 +422,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { __ set_info("new_multi_array", dont_gc_arguments); // Z_R3,: klass // Z_R4,: rank @@ -440,7 +440,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -463,62 +463,62 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { __ set_info("range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { __ set_info("index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { __ set_info("throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { __ set_info("throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { __ set_info("handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { __ set_info("handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // Note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. generate_unwind_exception(sasm); } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { __ set_info("throw_array_store_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { // Z_R1_scratch: object __ set_info("throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // Arguments : // sub : stack param 0 @@ -589,7 +589,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ z_br(Z_R14); } break; - case C1StubId::is_instance_of_id: + case StubId::c1_is_instance_of_id: { // Mirror: Z_ARG1(R2) // Object: Z_ARG2 @@ -650,13 +650,13 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ z_br(Z_R14); } - case C1StubId::monitorenter_nofpu_id: - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_nofpu_id: + case StubId::c1_monitorenter_id: { // Z_R1_scratch : object // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) __ set_info("monitorenter", dont_gc_arguments); - int save_fpu_registers = (id == C1StubId::monitorenter_id); + int save_fpu_registers = (id == StubId::c1_monitorenter_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -670,15 +670,15 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_nofpu_id: + case StubId::c1_monitorexit_id: { // Z_R1_scratch : lock address // Note: really a leaf routine but must setup last java sp // => Use call_RT for now (speed can be improved by // doing last java sp setup manually). __ set_info("monitorexit", dont_gc_arguments); - int save_fpu_registers = (id == C1StubId::monitorexit_id); + int save_fpu_registers = (id == StubId::c1_monitorexit_id); // Make a frame and preserve the caller's caller-save registers. OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); @@ -692,7 +692,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { // Args: Z_R1_scratch: trap request __ set_info("deoptimize", dont_gc_arguments); Register trap_request = Z_R1_scratch; @@ -709,32 +709,32 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { __ set_info("access_field_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { __ set_info("load_klass_patching", dont_gc_arguments); // We should set up register map. oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { __ set_info("load_mirror_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { __ set_info("load_appendix_patching", dont_gc_arguments); oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; #if 0 - case C1StubId::dtrace_object_alloc_id: + case StubId::c1_dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // We can't gc here so skip the oopmap but make sure that all @@ -749,7 +749,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::fpu2long_stub_id: + case StubId::c1_fpu2long_stub_id: { // rax, and rdx are destroyed, but should be free since the result is returned there // preserve rsi,ecx @@ -824,7 +824,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { break; #endif // TODO - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { __ set_info("predicate_failed_trap", dont_gc_arguments); @@ -852,7 +852,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { return oop_maps; } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters: Z_EXC_OOP, Z_EXC_PC @@ -863,7 +863,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) Register reg_fp = Z_R1_scratch; switch (id) { - case C1StubId::forward_exception_id: { + case StubId::c1_forward_exception_id: { // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -890,13 +890,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ clear_mem(Address(Z_thread, JavaThread::vm_result_metadata_offset()), sizeof(Metadata*)); break; } - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // At this point all registers MAY be live. DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) - oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Z_EXC_PC); + oop_map = save_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id, Z_EXC_PC); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) __ save_return_pc(Z_EXC_PC); @@ -945,15 +945,15 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ invalidate_registers(Z_R2); switch(id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // Restore the registers that were saved at the beginning. __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. - restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); // Pops as well the frame. + restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); // Pops as well the frame. __ z_br(Z_R1_scratch); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { __ pop_frame(); __ z_br(Z_R2); // Jump to exception handler. } diff --git a/src/hotspot/cpu/s390/runtime_s390.cpp b/src/hotspot/cpu/s390/runtime_s390.cpp index 99a33716b8b..314c407af91 100644 --- a/src/hotspot/cpu/s390/runtime_s390.cpp +++ b/src/hotspot/cpu/s390/runtime_s390.cpp @@ -70,7 +70,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp index cb1f12504fd..90f54b60611 100644 --- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp @@ -2544,7 +2544,7 @@ void SharedRuntime::generate_deopt_blob() { // Allocate space for the code. ResourceMark rm; // Setup code generation tools. - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); CodeBuffer buffer(name, 2048, 1024); InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer); Label exec_mode_initialized; @@ -2767,7 +2767,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { // Allocate space for the code ResourceMark rm; // Setup code generation tools - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); CodeBuffer buffer(name, 2048, 1024); if (buffer.blob() == nullptr) { return nullptr; @@ -2895,7 +2895,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { // // Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_polling_page_id(id), "expected a polling page stub id"); @@ -2913,13 +2913,13 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal address call_pc = nullptr; int frame_size_in_bytes; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); // Make room for return address (or push it again) if (!cause_return) { __ z_lg(Z_R14, Address(Z_thread, JavaThread::saved_exception_pc_offset())); } - bool save_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); + bool save_vectors = (id == StubId::shared_polling_page_vectors_safepoint_handler_id); // Save registers, fpu state, and flags map = RegisterSaver::save_live_registers(masm, RegisterSaver::all_registers, Z_R14, save_vectors); @@ -2999,7 +2999,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_resolve_id(id), "expected a resolve stub id"); @@ -3098,7 +3098,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // SharedRuntime.cpp requires that this code be generated into a // RuntimeStub. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); diff --git a/src/hotspot/cpu/s390/stubGenerator_s390.cpp b/src/hotspot/cpu/s390/stubGenerator_s390.cpp index 45a9e02ac27..aaed67fd269 100644 --- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp @@ -118,7 +118,7 @@ class StubGenerator: public StubCodeGenerator { // Set up a new C frame, copy Java arguments, call template interpreter // or native_entry, and process result. - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -459,7 +459,7 @@ class StubGenerator: public StubCodeGenerator { // pending exception stored in JavaThread that can be tested from // within the VM. address generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -511,7 +511,7 @@ class StubGenerator: public StubCodeGenerator { // (Z_R14 is unchanged and is live out). // address generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -592,7 +592,7 @@ class StubGenerator: public StubCodeGenerator { // raddr: Z_R14, blown by call // address generate_partial_subtype_check() { - StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id; + StubId stub_id = StubId::stubgen_partial_subtype_check_id; StubCodeMark mark(this, stub_id); Label miss; @@ -626,7 +626,7 @@ class StubGenerator: public StubCodeGenerator { } void generate_lookup_secondary_supers_table_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; StubCodeMark mark(this, stub_id); const Register @@ -649,7 +649,7 @@ class StubGenerator: public StubCodeGenerator { // Slow path implementation for UseSecondarySupersTable. address generate_lookup_secondary_supers_table_slow_path_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1265,39 +1265,39 @@ class StubGenerator: public StubCodeGenerator { } } - address generate_disjoint_nonoop_copy(StubGenStubId stub_id) { + address generate_disjoint_nonoop_copy(StubId stub_id) { bool aligned; int element_size; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: aligned = false; element_size = 1; break; - case arrayof_jbyte_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id: aligned = true; element_size = 1; break; - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: aligned = false; element_size = 2; break; - case arrayof_jshort_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id: aligned = true; element_size = 2; break; - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: aligned = false; element_size = 4; break; - case arrayof_jint_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jint_disjoint_arraycopy_id: aligned = true; element_size = 4; break; - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: aligned = false; element_size = 8; break; - case arrayof_jlong_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id: aligned = true; element_size = 8; break; @@ -1310,23 +1310,23 @@ class StubGenerator: public StubCodeGenerator { return __ addr_at(start_off); } - address generate_disjoint_oop_copy(StubGenStubId stub_id) { + address generate_disjoint_oop_copy(StubId stub_id) { bool aligned; bool dest_uninitialized; switch (stub_id) { - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: aligned = false; dest_uninitialized = false; break; - case arrayof_oop_disjoint_arraycopy_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_id: aligned = true; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: aligned = false; dest_uninitialized = true; break; - case arrayof_oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id: aligned = true; dest_uninitialized = true; break; @@ -1357,47 +1357,47 @@ class StubGenerator: public StubCodeGenerator { return __ addr_at(start_off); } - address generate_conjoint_nonoop_copy(StubGenStubId stub_id) { + address generate_conjoint_nonoop_copy(StubId stub_id) { bool aligned; int shift; // i.e. log2(element size) address nooverlap_target; switch (stub_id) { - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: aligned = false; shift = 0; nooverlap_target = StubRoutines::jbyte_disjoint_arraycopy(); break; - case arrayof_jbyte_arraycopy_id: + case StubId::stubgen_arrayof_jbyte_arraycopy_id: aligned = true; shift = 0; nooverlap_target = StubRoutines::arrayof_jbyte_disjoint_arraycopy(); break; - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: aligned = false; shift = 1; nooverlap_target = StubRoutines::jshort_disjoint_arraycopy(); break; - case arrayof_jshort_arraycopy_id: + case StubId::stubgen_arrayof_jshort_arraycopy_id: aligned = true; shift = 1; nooverlap_target = StubRoutines::arrayof_jshort_disjoint_arraycopy(); break; - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: aligned = false; shift = 2; nooverlap_target = StubRoutines::jint_disjoint_arraycopy(); break; - case arrayof_jint_arraycopy_id: + case StubId::stubgen_arrayof_jint_arraycopy_id: aligned = true; shift = 2; nooverlap_target = StubRoutines::arrayof_jint_disjoint_arraycopy(); break; - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: aligned = false; shift = 3; nooverlap_target = StubRoutines::jlong_disjoint_arraycopy(); break; - case arrayof_jlong_arraycopy_id: + case StubId::stubgen_arrayof_jlong_arraycopy_id: aligned = true; shift = 3; nooverlap_target = StubRoutines::arrayof_jlong_disjoint_arraycopy(); @@ -1412,27 +1412,27 @@ class StubGenerator: public StubCodeGenerator { return __ addr_at(start_off); } - address generate_conjoint_oop_copy(StubGenStubId stub_id) { + address generate_conjoint_oop_copy(StubId stub_id) { bool aligned; bool dest_uninitialized; address nooverlap_target; switch (stub_id) { - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: aligned = false; dest_uninitialized = false; nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); break; - case arrayof_oop_arraycopy_id: + case StubId::stubgen_arrayof_oop_arraycopy_id: aligned = true; dest_uninitialized = false; nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized); break; - case oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: aligned = false; dest_uninitialized = true; nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); break; - case arrayof_oop_arraycopy_uninit_id: + case StubId::stubgen_arrayof_oop_arraycopy_uninit_id: aligned = true; dest_uninitialized = true; nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized); @@ -1480,7 +1480,7 @@ class StubGenerator: public StubCodeGenerator { // address generate_unsafe_setmemory(address unsafe_byte_fill) { __ align(CodeEntryAlignment); - StubCodeMark mark(this, StubGenStubId::unsafe_setmemory_id); + StubCodeMark mark(this, StubId::stubgen_unsafe_setmemory_id); unsigned int start_off = __ offset(); // bump this on entry, not on exit: @@ -1582,33 +1582,33 @@ class StubGenerator: public StubCodeGenerator { address ucm_common_error_exit = generate_unsafecopy_common_error_exit(); UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit); - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jbyte_disjoint_arraycopy_id); - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::jshort_disjoint_arraycopy_id); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jint_disjoint_arraycopy_id); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jlong_disjoint_arraycopy_id); - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_uninit_id); + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_jbyte_disjoint_arraycopy_id); + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubId::stubgen_jshort_disjoint_arraycopy_id); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_jint_disjoint_arraycopy_id); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_jlong_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubId::stubgen_oop_disjoint_arraycopy_id); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubId::stubgen_oop_disjoint_arraycopy_uninit_id); - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id); - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id); - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jint_disjoint_arraycopy_id); - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jlong_disjoint_arraycopy_id); - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id); - StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id); + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_arrayof_jbyte_disjoint_arraycopy_id); + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubId::stubgen_arrayof_jshort_disjoint_arraycopy_id); + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_arrayof_jint_disjoint_arraycopy_id); + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubId::stubgen_arrayof_jlong_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubId::stubgen_arrayof_oop_disjoint_arraycopy_id); + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubId::stubgen_arrayof_oop_disjoint_arraycopy_uninit_id); - StubRoutines::_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jbyte_arraycopy_id); - StubRoutines::_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jshort_arraycopy_id); - StubRoutines::_jint_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jint_arraycopy_id); - StubRoutines::_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jlong_arraycopy_id); - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id); + StubRoutines::_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_jbyte_arraycopy_id); + StubRoutines::_jshort_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_jshort_arraycopy_id); + StubRoutines::_jint_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_jint_arraycopy_id); + StubRoutines::_jlong_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_jlong_arraycopy_id); + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubId::stubgen_oop_arraycopy_id); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubId::stubgen_oop_arraycopy_uninit_id); - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jbyte_arraycopy_id); - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jshort_arraycopy_id); - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_nonoop_copy (StubGenStubId::arrayof_jint_arraycopy_id); - StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jlong_arraycopy_id); - StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id); - StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id); + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_arrayof_jbyte_arraycopy_id); + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_arrayof_jshort_arraycopy_id); + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_nonoop_copy (StubId::stubgen_arrayof_jint_arraycopy_id); + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_nonoop_copy(StubId::stubgen_arrayof_jlong_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubId::stubgen_arrayof_oop_arraycopy_id); + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubId::stubgen_arrayof_oop_arraycopy_uninit_id); #ifdef COMPILER2 StubRoutines::_unsafe_setmemory = @@ -1898,7 +1898,7 @@ class StubGenerator: public StubCodeGenerator { // Compute AES encrypt function. address generate_AES_encryptBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -1910,7 +1910,7 @@ class StubGenerator: public StubCodeGenerator { // Compute AES decrypt function. address generate_AES_decryptBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -1971,7 +1971,7 @@ class StubGenerator: public StubCodeGenerator { // Compute chained AES encrypt function. address generate_cipherBlockChaining_AES_encrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -1983,7 +1983,7 @@ class StubGenerator: public StubCodeGenerator { // Compute chained AES decrypt function. address generate_cipherBlockChaining_AES_decrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -2690,7 +2690,7 @@ class StubGenerator: public StubCodeGenerator { // Encrypt or decrypt is selected via parameters. Only one stub is necessary. address generate_counterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -2704,7 +2704,7 @@ class StubGenerator: public StubCodeGenerator { // Compute GHASH function. address generate_ghash_processBlocks() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -2782,13 +2782,13 @@ class StubGenerator: public StubCodeGenerator { // provides for a large enough source data buffer. // // Compute SHA-1 function. - address generate_SHA1_stub(StubGenStubId stub_id) { + address generate_SHA1_stub(StubId stub_id) { bool multiBlock; switch (stub_id) { - case sha1_implCompress_id: + case StubId::stubgen_sha1_implCompress_id: multiBlock = false; break; - case sha1_implCompressMB_id: + case StubId::stubgen_sha1_implCompressMB_id: multiBlock = true; break; default: @@ -2875,13 +2875,13 @@ class StubGenerator: public StubCodeGenerator { } // Compute SHA-256 function. - address generate_SHA256_stub(StubGenStubId stub_id) { + address generate_SHA256_stub(StubId stub_id) { bool multiBlock; switch (stub_id) { - case sha256_implCompress_id: + case StubId::stubgen_sha256_implCompress_id: multiBlock = false; break; - case sha256_implCompressMB_id: + case StubId::stubgen_sha256_implCompressMB_id: multiBlock = true; break; default: @@ -2966,13 +2966,13 @@ class StubGenerator: public StubCodeGenerator { } // Compute SHA-512 function. - address generate_SHA512_stub(StubGenStubId stub_id) { + address generate_SHA512_stub(StubId stub_id) { bool multiBlock; switch (stub_id) { - case sha512_implCompress_id: + case StubId::stubgen_sha512_implCompress_id: multiBlock = false; break; - case sha512_implCompressMB_id: + case StubId::stubgen_sha512_implCompressMB_id: multiBlock = true; break; default: @@ -3102,7 +3102,7 @@ class StubGenerator: public StubCodeGenerator { // Compute CRC32 function. address generate_CRC32_updateBytes() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -3122,7 +3122,7 @@ class StubGenerator: public StubCodeGenerator { // Compute CRC32C function. address generate_CRC32C_updateBytes() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32C_id; StubCodeMark mark(this, stub_id); unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). @@ -3147,7 +3147,7 @@ class StubGenerator: public StubCodeGenerator { // Z_ARG5 - z address address generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubId stub_id = StubId::stubgen_multiplyToLen_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3179,7 +3179,7 @@ class StubGenerator: public StubCodeGenerator { address generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3245,7 +3245,7 @@ class StubGenerator: public StubCodeGenerator { // exception handler for upcall stubs address generate_upcall_stub_exception_handler() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3263,7 +3263,7 @@ class StubGenerator: public StubCodeGenerator { // Z_ARG1 = jobject receiver // Z_method = Method* result address generate_upcall_stub_load_target() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3391,16 +3391,16 @@ class StubGenerator: public StubCodeGenerator { // Generate SHA1/SHA256/SHA512 intrinsics code. if (UseSHA1Intrinsics) { - StubRoutines::_sha1_implCompress = generate_SHA1_stub(StubGenStubId::sha1_implCompress_id); - StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(StubGenStubId::sha1_implCompressMB_id); + StubRoutines::_sha1_implCompress = generate_SHA1_stub(StubId::stubgen_sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(StubId::stubgen_sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { - StubRoutines::_sha256_implCompress = generate_SHA256_stub(StubGenStubId::sha256_implCompress_id); - StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(StubGenStubId::sha256_implCompressMB_id); + StubRoutines::_sha256_implCompress = generate_SHA256_stub(StubId::stubgen_sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { - StubRoutines::_sha512_implCompress = generate_SHA512_stub(StubGenStubId::sha512_implCompress_id); - StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(StubGenStubId::sha512_implCompressMB_id); + StubRoutines::_sha512_implCompress = generate_SHA512_stub(StubId::stubgen_sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(StubId::stubgen_sha512_implCompressMB_id); } #ifdef COMPILER2 @@ -3420,25 +3420,25 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } @@ -3477,6 +3477,6 @@ class StubGenerator: public StubCodeGenerator { }; -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 7c0d3ff624d..6d3cf649233 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -55,7 +55,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { Metadata *m = _method->as_constant_ptr()->as_metadata(); ce->store_parameter(m, 1); ce->store_parameter(_bci, 0); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ jmp(_continuation); @@ -64,7 +64,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -78,11 +78,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { ce->store_parameter(_index->as_jint(), 0); } - C1StubId stub_id; + StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = C1StubId::throw_index_exception_id; + stub_id = StubId::c1_throw_index_exception_id; } else { - stub_id = C1StubId::throw_range_check_failed_id; + stub_id = StubId::c1_throw_range_check_failed_id; ce->store_parameter(_array->as_pointer_register(), 1); } __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); @@ -97,7 +97,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + address a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); __ call(RuntimeAddress(a)); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -109,7 +109,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_throw_div0_exception_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -117,14 +117,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub -NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) { +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == C1StubId::new_instance_id || - stub_id == C1StubId::fast_new_instance_id || - stub_id == C1StubId::fast_new_instance_init_check_id, + assert(stub_id == StubId::c1_new_instance_id || + stub_id == StubId::c1_fast_new_instance_id || + stub_id == StubId::c1_fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -157,7 +157,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == rbx, "length must in rbx,"); assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_type_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == rax, "result must in rax,"); @@ -180,7 +180,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == rbx, "length must in rbx,"); assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_new_object_array_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == rax, "result must in rax,"); @@ -192,11 +192,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - C1StubId enter_id; + StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = C1StubId::monitorenter_id; + enter_id = StubId::c1_monitorenter_id; } else { - enter_id = C1StubId::monitorenter_nofpu_id; + enter_id = StubId::c1_monitorenter_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); @@ -213,11 +213,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - C1StubId exit_id; + StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = C1StubId::monitorexit_id; + exit_id = StubId::c1_monitorexit_id; } else { - exit_id = C1StubId::monitorexit_nofpu_id; + exit_id = StubId::c1_monitorexit_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); __ jmp(_continuation); @@ -346,10 +346,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { address target = nullptr; relocInfo::relocType reloc_type = relocInfo::none; switch (_id) { - case access_field_id: target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; - case load_klass_id: target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; - case load_mirror_id: target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; - case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; + case access_field_id: target = Runtime1::entry_for(StubId::c1_access_field_patching_id); break; + case load_klass_id: target = Runtime1::entry_for(StubId::c1_load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; + case load_mirror_id: target = Runtime1::entry_for(StubId::c1_load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; + case load_appendix_id: target = Runtime1::entry_for(StubId::c1_load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; default: ShouldNotReachHere(); } __ bind(call_patch); @@ -379,7 +379,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -389,9 +389,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); + a = Runtime1::entry_for(StubId::c1_predicate_failed_trap_id); } else { - a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); + a = Runtime1::entry_for(StubId::c1_throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index 04e32e2b8be..1a639b243c1 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -377,7 +377,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(rax); // search an exception handler (rax: exception oop, rdx: throwing pc) - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_handle_exception_from_callee_id))); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -433,7 +433,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ remove_frame(initial_frame_size_in_bytes()); - __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id))); + __ jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_unwind_exception_id))); // Emit the slow path assembly if (stub != nullptr) { @@ -1387,7 +1387,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L __ push(klass_RInfo); __ push(k_RInfo); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(klass_RInfo); // result is a boolean @@ -1401,7 +1401,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo); __ push(k_RInfo); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(k_RInfo); // result is a boolean @@ -1480,7 +1480,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { // call out-of-line instance of __ check_klass_subtype_slow_path(...): __ push(klass_RInfo); __ push(k_RInfo); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ pop(klass_RInfo); __ pop(k_RInfo); // result is a boolean @@ -2217,7 +2217,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - C1StubId unwind_id; + StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -2229,9 +2229,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(rax); // search an exception handler (rax: exception oop, rdx: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = C1StubId::handle_exception_id; + unwind_id = StubId::c1_handle_exception_id; } else { - unwind_id = C1StubId::handle_exception_nofpu_id; + unwind_id = StubId::c1_handle_exception_nofpu_id; } __ call(RuntimeAddress(Runtime1::entry_for(unwind_id))); @@ -2546,7 +2546,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ push(src); __ push(dst); - __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id))); + __ call(RuntimeAddress(Runtime1::entry_for(StubId::c1_slow_subtype_check_id))); __ pop(dst); __ pop(src); diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp index 3ea2e99fe57..04c90c43b6e 100644 --- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp @@ -1243,7 +1243,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(StubId::c1_new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1276,12 +1276,12 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); + stub = new SimpleExceptionStub(StubId::c1_throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; @@ -1317,7 +1317,7 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) { // Intrinsic for Class::isInstance address LIRGenerator::isInstance_entry() { - return Runtime1::entry_for(C1StubId::is_instance_of_id); + return Runtime1::entry_for(StubId::c1_is_instance_of_id); } diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index cde429b0383..c9d103768b8 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -253,7 +253,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); - call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); @@ -291,7 +291,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == rax, "must be"); - call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id))); + call(RuntimeAddress(Runtime1::entry_for(StubId::c1_dtrace_object_alloc_id))); } verify_oop(obj); diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp index 726574e69e8..3189854f564 100644 --- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp @@ -59,7 +59,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre // At a method handle call, the stack may not be properly aligned // when returning with an exception. - align_stack = (stub_id() == (int)C1StubId::handle_exception_from_callee_id); + align_stack = (stub_id() == (int)StubId::c1_handle_exception_from_callee_id); mov(c_rarg0, thread); set_num_rt_args(0); // Nothing on stack @@ -111,10 +111,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (frame_size() == no_frame_size) { leave(); jump(RuntimeAddress(StubRoutines::forward_exception_entry())); - } else if (_stub_id == (int)C1StubId::forward_exception_id) { + } else if (_stub_id == (int)StubId::c1_forward_exception_id) { should_not_reach_here(); } else { - jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); + jump(RuntimeAddress(Runtime1::entry_for(StubId::c1_forward_exception_id))); } bind(L); } @@ -503,7 +503,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe } -OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -516,7 +516,7 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = nullptr; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -535,12 +535,12 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ movptr(Address(thread, JavaThread::vm_result_oop_offset()), NULL_WORD); __ movptr(Address(thread, JavaThread::vm_result_metadata_offset()), NULL_WORD); break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, 1 /*thread*/, id != C1StubId::handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, 1 /*thread*/, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: { + case StubId::c1_handle_exception_from_callee_id: { // At this point all registers except exception oop (RAX) and // exception pc (RDX) are dead. const int frame_size = 2 /*BP, return address*/ WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); @@ -597,13 +597,13 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) __ movptr(Address(rbp, 1*BytesPerWord), rax); switch (id) { - case C1StubId::forward_exception_id: - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_forward_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); + restore_live_registers(sasm, id != StubId::c1_handle_exception_nofpu_id); break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP // since we do a leave anyway. @@ -739,7 +739,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { __ testptr(rax, rax); // have we deoptimized? __ jump_cc(Assembler::equal, - RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id))); + RuntimeAddress(Runtime1::entry_for(StubId::c1_forward_exception_id))); // the deopt blob expects exceptions in the special fields of // JavaThread, so copy and clear pending exception. @@ -811,7 +811,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { } -OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(StubId id, StubAssembler* sasm) { // for better readability const bool must_gc_arguments = true; @@ -823,7 +823,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // stub code & info for the different stubs OopMapSet* oop_maps = nullptr; switch (id) { - case C1StubId::forward_exception_id: + case StubId::c1_forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -831,19 +831,19 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_instance_id: - case C1StubId::fast_new_instance_id: - case C1StubId::fast_new_instance_init_check_id: + case StubId::c1_new_instance_id: + case StubId::c1_fast_new_instance_id: + case StubId::c1_fast_new_instance_init_check_id: { Register klass = rdx; // Incoming Register obj = rax; // Result - if (id == C1StubId::new_instance_id) { + if (id == StubId::c1_new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == C1StubId::fast_new_instance_id) { + } else if (id == StubId::c1_fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId"); + assert(id == StubId::c1_fast_new_instance_init_check_id, "bad StubId"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -862,7 +862,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { break; - case C1StubId::counter_overflow_id: + case StubId::c1_counter_overflow_id: { Register bci = rax, method = rbx; __ enter(); @@ -880,14 +880,14 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_type_array_id: - case C1StubId::new_object_array_id: + case StubId::c1_new_type_array_id: + case StubId::c1_new_object_array_id: { Register length = rbx; // Incoming Register klass = rdx; // Incoming Register obj = rax; // Result - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -900,7 +900,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { Register t0 = obj; __ movl(t0, Address(klass, Klass::layout_helper_offset())); __ sarl(t0, Klass::_lh_array_tag_shift); - int tag = ((id == C1StubId::new_type_array_id) + int tag = ((id == StubId::c1_new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ cmpl(t0, tag); @@ -914,7 +914,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { __ enter(); OopMap* map = save_live_registers(sasm, 3); int call_offset; - if (id == C1StubId::new_type_array_id) { + if (id == StubId::c1_new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -932,7 +932,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::new_multi_array_id: + case StubId::c1_new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // rax,: klass // rbx,: rank @@ -949,7 +949,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::register_finalizer_id: + case StubId::c1_register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -982,44 +982,44 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_range_check_failed_id: + case StubId::c1_throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case C1StubId::throw_index_exception_id: + case StubId::c1_throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case C1StubId::throw_div0_exception_id: + case StubId::c1_throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case C1StubId::throw_null_pointer_exception_id: + case StubId::c1_throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case C1StubId::handle_exception_nofpu_id: - case C1StubId::handle_exception_id: + case StubId::c1_handle_exception_nofpu_id: + case StubId::c1_handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::handle_exception_from_callee_id: + case StubId::c1_handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case C1StubId::unwind_exception_id: + case StubId::c1_unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current // activation and we are calling a leaf VM function only. @@ -1027,7 +1027,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_array_store_exception_id: + case StubId::c1_throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); // tos + 0: link // + 1: return address @@ -1035,19 +1035,19 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::throw_class_cast_exception_id: + case StubId::c1_throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case C1StubId::throw_incompatible_class_change_error_id: + case StubId::c1_throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case C1StubId::slow_subtype_check_id: + case StubId::c1_slow_subtype_check_id: { // Typical calling sequence: // __ push(klass_RInfo); // object klass or other subclass @@ -1100,7 +1100,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::is_instance_of_id: + case StubId::c1_is_instance_of_id: { // Mirror: c_rarg0 (Windows: rcx, SysV: rdi) // Object: c_rarg1 (Windows: rdx, SysV: rsi) @@ -1154,10 +1154,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorenter_nofpu_id: + case StubId::c1_monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorenter_id: + case StubId::c1_monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments, true /* use_pop_on_epilog */); OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); @@ -1175,10 +1175,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::monitorexit_nofpu_id: + case StubId::c1_monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case C1StubId::monitorexit_id: + case StubId::c1_monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); @@ -1198,7 +1198,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::deoptimize_id: + case StubId::c1_deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments); const int num_rt_args = 2; // thread, trap_request @@ -1215,35 +1215,35 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::access_field_patching_id: + case StubId::c1_access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); } break; - case C1StubId::load_klass_patching_id: + case StubId::c1_load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); } break; - case C1StubId::load_mirror_patching_id: + case StubId::c1_load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); } break; - case C1StubId::load_appendix_patching_id: + case StubId::c1_load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); // we should set up register map oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); } break; - case C1StubId::dtrace_object_alloc_id: + case StubId::c1_dtrace_object_alloc_id: { // rax,: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); // we can't gc here so skip the oopmap but make sure that all @@ -1257,7 +1257,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::fpu2long_stub_id: + case StubId::c1_fpu2long_stub_id: { Label done; __ cvttsd2siq(rax, Address(rsp, wordSize)); @@ -1273,7 +1273,7 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { } break; - case C1StubId::predicate_failed_trap_id: + case StubId::c1_predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); diff --git a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp index ee2d6d8a0be..23dd9f908b5 100644 --- a/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp +++ b/src/hotspot/cpu/x86/c2_stubGenerator_x86_64_string.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Intel Corporation. All rights reserved. + * Copyright (c) 2024, 2025, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -205,7 +205,7 @@ static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptr bool isU = isUL || isUU; // At least one is UTF-16 assert(isLL || isUL || isUU, "Encoding not recognized"); - StubGenStubId stub_id = (isLL ? StubGenStubId::string_indexof_linear_ll_id : (isUL ? StubGenStubId::string_indexof_linear_ul_id : StubGenStubId::string_indexof_linear_uu_id)); + StubId stub_id = (isLL ? StubId::stubgen_string_indexof_linear_ll_id : (isUL ? StubId::stubgen_string_indexof_linear_ul_id : StubId::stubgen_string_indexof_linear_uu_id)); StubCodeMark mark(stubgen, stub_id); // Keep track of isUL since we need to generate UU code in the main body // for the case where we expand the needle from bytes to words on the stack. diff --git a/src/hotspot/cpu/x86/frame_x86.cpp b/src/hotspot/cpu/x86/frame_x86.cpp index b9a2ef35f10..46ffda93699 100644 --- a/src/hotspot/cpu/x86/frame_x86.cpp +++ b/src/hotspot/cpu/x86/frame_x86.cpp @@ -430,8 +430,8 @@ JavaThread** frame::saved_thread_address(const frame& f) { JavaThread** thread_addr; #ifdef COMPILER1 - if (cb == Runtime1::blob_for(C1StubId::monitorenter_id) || - cb == Runtime1::blob_for(C1StubId::monitorenter_nofpu_id)) { + if (cb == Runtime1::blob_for(StubId::c1_monitorenter_id) || + cb == Runtime1::blob_for(StubId::c1_monitorenter_nofpu_id)) { thread_addr = (JavaThread**)(f.sp() + Runtime1::runtime_blob_current_thread_offset(f)); } else #endif diff --git a/src/hotspot/cpu/x86/runtime_x86_64.cpp b/src/hotspot/cpu/x86/runtime_x86_64.cpp index 5865bec2e39..68a8c224659 100644 --- a/src/hotspot/cpu/x86/runtime_x86_64.cpp +++ b/src/hotspot/cpu/x86/runtime_x86_64.cpp @@ -57,8 +57,8 @@ class SimpleRuntimeFrame { //------------------------------generate_uncommon_trap_blob-------------------- UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { - const char* name = OptoRuntime::stub_name(OptoStubId::uncommon_trap_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)OptoStubId::uncommon_trap_id, name); + const char* name = OptoRuntime::stub_name(StubId::c2_uncommon_trap_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, BlobId::c2_uncommon_trap_id); if (blob != nullptr) { return blob->as_uncommon_trap_blob(); } @@ -236,7 +236,7 @@ UncommonTrapBlob* OptoRuntime::generate_uncommon_trap_blob() { UncommonTrapBlob *ut_blob = UncommonTrapBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); - AOTCodeCache::store_code_blob(*ut_blob, AOTCodeEntry::C2Blob, (uint)OptoStubId::uncommon_trap_id, name); + AOTCodeCache::store_code_blob(*ut_blob, AOTCodeEntry::C2Blob, BlobId::c2_uncommon_trap_id); return ut_blob; } @@ -273,8 +273,8 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned"); - const char* name = OptoRuntime::stub_name(OptoStubId::exception_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, (uint)OptoStubId::exception_id, name); + const char* name = OptoRuntime::stub_name(StubId::c2_exception_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, BlobId::c2_exception_id); if (blob != nullptr) { return blob->as_exception_blob(); } @@ -377,7 +377,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() { // Set exception blob ExceptionBlob* ex_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1); - AOTCodeCache::store_code_blob(*ex_blob, AOTCodeEntry::C2Blob, (uint)OptoStubId::exception_id, name); + AOTCodeCache::store_code_blob(*ex_blob, AOTCodeEntry::C2Blob, BlobId::c2_exception_id); return ex_blob; } #endif // COMPILER2 diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp index 78259157dfa..7c7ca61f1ae 100644 --- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp @@ -2617,8 +2617,8 @@ void SharedRuntime::generate_deopt_blob() { pad += 512; // Increase the buffer size when compiling for JVMCI } #endif - const char* name = SharedRuntime::stub_name(SharedStubId::deopt_id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)SharedStubId::deopt_id, name); + const char* name = SharedRuntime::stub_name(StubId::shared_deopt_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id); if (blob != nullptr) { _deopt_blob = blob->as_deoptimization_blob(); return; @@ -2976,7 +2976,7 @@ void SharedRuntime::generate_deopt_blob() { } #endif - AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, (uint)SharedStubId::deopt_id, name); + AOTCodeCache::store_code_blob(*_deopt_blob, AOTCodeEntry::SharedBlob, BlobId::shared_deopt_id); } //------------------------------generate_handler_blob------ @@ -2984,14 +2984,14 @@ void SharedRuntime::generate_deopt_blob() { // Generate a special Compile2Runtime blob that saves all registers, // and setup oopmap. // -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { assert(StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_polling_page_id(id), "expected a polling page stub id"); // Allocate space for the code. Setup code generation tools. const char* name = SharedRuntime::stub_name(id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_safepoint_blob(); } @@ -3005,8 +3005,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal address start = __ pc(); address call_pc = nullptr; int frame_size_in_words; - bool cause_return = (id == SharedStubId::polling_page_return_handler_id); - bool save_wide_vectors = (id == SharedStubId::polling_page_vectors_safepoint_handler_id); + bool cause_return = (id == StubId::shared_polling_page_return_handler_id); + bool save_wide_vectors = (id == StubId::shared_polling_page_vectors_safepoint_handler_id); // Make room for return address (or push it again) if (!cause_return) { @@ -3160,7 +3160,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // Fill-out other meta info SafepointBlob* sp_blob = SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); - AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*sp_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return sp_blob; } @@ -3172,12 +3172,12 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal // but since this is generic code we don't know what they are and the caller // must do any gc of the args. // -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { assert (StubRoutines::forward_exception_entry() != nullptr, "must be generated before"); assert(is_resolve_id(id), "expected a resolve stub id"); const char* name = SharedRuntime::stub_name(id); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_runtime_stub(); } @@ -3254,7 +3254,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // frame_size_words or bytes?? RuntimeStub* rs_blob = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true); - AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*rs_blob, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return rs_blob; } @@ -3273,7 +3273,7 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti // AbstractMethodError on entry) are either at call sites or // otherwise assume that stack unwinding will be initiated, so // caller saved registers were assumed volatile in the compiler. -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { assert(is_throw_id(id), "expected a throw stub id"); const char* name = SharedRuntime::stub_name(id); @@ -3296,7 +3296,7 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru const char* timer_msg = "SharedRuntime generate_throw_exception"; TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime)); - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, (uint)id, name); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::SharedBlob, StubInfo::blob(id)); if (blob != nullptr) { return blob->as_runtime_stub(); } @@ -3359,7 +3359,7 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru frame_complete, (framesize >> (LogBytesPerWord - LogBytesPerInt)), oop_maps, false); - AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, (uint)id, name); + AOTCodeCache::store_code_blob(*stub, AOTCodeEntry::SharedBlob, StubInfo::blob(id)); return stub; } @@ -3643,7 +3643,7 @@ RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() { framesize // inclusive of return address }; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_write_checkpoint_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_write_checkpoint_id); CodeBuffer code(name, 1024, 64); MacroAssembler* masm = new MacroAssembler(&code); address start = __ pc(); @@ -3688,7 +3688,7 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() { framesize // inclusive of return address }; - const char* name = SharedRuntime::stub_name(SharedStubId::jfr_return_lease_id); + const char* name = SharedRuntime::stub_name(StubId::shared_jfr_return_lease_id); CodeBuffer code(name, 1024, 64); MacroAssembler* masm = new MacroAssembler(&code); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubDeclarations_x86.hpp b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp index 7cbe3829783..ae947135f78 100644 --- a/src/hotspot/cpu/x86/stubDeclarations_x86.hpp +++ b/src/hotspot/cpu/x86/stubDeclarations_x86.hpp @@ -115,10 +115,10 @@ do_arch_entry(x86, compiler, vector_64_bit_mask, \ vector_64_bit_mask, vector_64_bit_mask) \ do_stub(compiler, vector_byte_shuffle_mask) \ - do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ + do_arch_entry(x86, compiler, vector_byte_shuffle_mask, \ vector_byte_shuffle_mask, vector_byte_shuffle_mask) \ do_stub(compiler, vector_short_shuffle_mask) \ - do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ + do_arch_entry(x86, compiler, vector_short_shuffle_mask, \ vector_short_shuffle_mask, vector_short_shuffle_mask) \ do_stub(compiler, vector_int_shuffle_mask) \ do_arch_entry(x86, compiler, vector_int_shuffle_mask, \ diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index e6dd880527c..30ce9718378 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -187,7 +187,7 @@ address StubGenerator::generate_call_stub(address& return_address) { assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, "adjust this code"); - StubGenStubId stub_id = StubGenStubId::call_stub_id; + StubId stub_id = StubId::stubgen_call_stub_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -410,7 +410,7 @@ address StubGenerator::generate_call_stub(address& return_address) { // rax: exception oop address StubGenerator::generate_catch_exception() { - StubGenStubId stub_id = StubGenStubId::catch_exception_id; + StubId stub_id = StubId::stubgen_catch_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -466,7 +466,7 @@ address StubGenerator::generate_catch_exception() { // NOTE: At entry of this stub, exception-pc must be on stack !! address StubGenerator::generate_forward_exception() { - StubGenStubId stub_id = StubGenStubId::forward_exception_id; + StubId stub_id = StubId::stubgen_forward_exception_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -530,7 +530,7 @@ address StubGenerator::generate_forward_exception() { // // Result: address StubGenerator::generate_orderaccess_fence() { - StubGenStubId stub_id = StubGenStubId::fence_id; + StubId stub_id = StubId::stubgen_fence_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -546,7 +546,7 @@ address StubGenerator::generate_orderaccess_fence() { // This routine is used to find the previous stack pointer for the // caller. address StubGenerator::generate_get_previous_sp() { - StubGenStubId stub_id = StubGenStubId::get_previous_sp_id; + StubId stub_id = StubId::stubgen_get_previous_sp_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -565,7 +565,7 @@ address StubGenerator::generate_get_previous_sp() { // MXCSR register to our expected state. address StubGenerator::generate_verify_mxcsr() { - StubGenStubId stub_id = StubGenStubId::verify_mxcsr_id; + StubId stub_id = StubId::stubgen_verify_mxcsr_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -594,7 +594,7 @@ address StubGenerator::generate_verify_mxcsr() { } address StubGenerator::generate_f2i_fixup() { - StubGenStubId stub_id = StubGenStubId::f2i_fixup_id; + StubId stub_id = StubId::stubgen_f2i_fixup_id; StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves @@ -633,7 +633,7 @@ address StubGenerator::generate_f2i_fixup() { } address StubGenerator::generate_f2l_fixup() { - StubGenStubId stub_id = StubGenStubId::f2l_fixup_id; + StubId stub_id = StubId::stubgen_f2l_fixup_id; StubCodeMark mark(this, stub_id); Address inout(rsp, 5 * wordSize); // return address + 4 saves address start = __ pc(); @@ -671,7 +671,7 @@ address StubGenerator::generate_f2l_fixup() { } address StubGenerator::generate_d2i_fixup() { - StubGenStubId stub_id = StubGenStubId::d2i_fixup_id; + StubId stub_id = StubId::stubgen_d2i_fixup_id; StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves @@ -719,7 +719,7 @@ address StubGenerator::generate_d2i_fixup() { } address StubGenerator::generate_d2l_fixup() { - StubGenStubId stub_id = StubGenStubId::d2l_fixup_id; + StubId stub_id = StubId::stubgen_d2l_fixup_id; StubCodeMark mark(this, stub_id); Address inout(rsp, 6 * wordSize); // return address + 5 saves @@ -768,7 +768,7 @@ address StubGenerator::generate_d2l_fixup() { address StubGenerator::generate_count_leading_zeros_lut() { __ align64(); - StubGenStubId stub_id = StubGenStubId::vector_count_leading_zeros_lut_id; + StubId stub_id = StubId::stubgen_vector_count_leading_zeros_lut_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -786,7 +786,7 @@ address StubGenerator::generate_count_leading_zeros_lut() { address StubGenerator::generate_popcount_avx_lut() { __ align64(); - StubGenStubId stub_id = StubGenStubId::vector_popcount_lut_id; + StubId stub_id = StubId::stubgen_vector_popcount_lut_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -804,7 +804,7 @@ address StubGenerator::generate_popcount_avx_lut() { address StubGenerator::generate_iota_indices() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_iota_indices_id; + StubId stub_id = StubId::stubgen_vector_iota_indices_id; StubCodeMark mark(this, stub_id); address start = __ pc(); // B @@ -866,7 +866,7 @@ address StubGenerator::generate_iota_indices() { address StubGenerator::generate_vector_reverse_bit_lut() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_reverse_bit_lut_id; + StubId stub_id = StubId::stubgen_vector_reverse_bit_lut_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -884,7 +884,7 @@ address StubGenerator::generate_vector_reverse_bit_lut() { address StubGenerator::generate_vector_reverse_byte_perm_mask_long() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_long_id; + StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_long_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -902,7 +902,7 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_long() { address StubGenerator::generate_vector_reverse_byte_perm_mask_int() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_int_id; + StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_int_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -920,7 +920,7 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_int() { address StubGenerator::generate_vector_reverse_byte_perm_mask_short() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_reverse_byte_perm_mask_short_id; + StubId stub_id = StubId::stubgen_vector_reverse_byte_perm_mask_short_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -938,7 +938,7 @@ address StubGenerator::generate_vector_reverse_byte_perm_mask_short() { address StubGenerator::generate_vector_byte_shuffle_mask() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_byte_shuffle_mask_id; + StubId stub_id = StubId::stubgen_vector_byte_shuffle_mask_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -950,7 +950,7 @@ address StubGenerator::generate_vector_byte_shuffle_mask() { return start; } -address StubGenerator::generate_fp_mask(StubGenStubId stub_id, int64_t mask) { +address StubGenerator::generate_fp_mask(StubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -961,13 +961,13 @@ address StubGenerator::generate_fp_mask(StubGenStubId stub_id, int64_t mask) { return start; } -address StubGenerator::generate_compress_perm_table(StubGenStubId stub_id) { +address StubGenerator::generate_compress_perm_table(StubId stub_id) { int esize; switch (stub_id) { - case compress_perm_table32_id: + case StubId::stubgen_compress_perm_table32_id: esize = 32; break; - case compress_perm_table64_id: + case StubId::stubgen_compress_perm_table64_id: esize = 64; break; default: @@ -1016,13 +1016,13 @@ address StubGenerator::generate_compress_perm_table(StubGenStubId stub_id) { return start; } -address StubGenerator::generate_expand_perm_table(StubGenStubId stub_id) { +address StubGenerator::generate_expand_perm_table(StubId stub_id) { int esize; switch (stub_id) { - case expand_perm_table32_id: + case StubId::stubgen_expand_perm_table32_id: esize = 32; break; - case expand_perm_table64_id: + case StubId::stubgen_expand_perm_table64_id: esize = 64; break; default: @@ -1069,7 +1069,7 @@ address StubGenerator::generate_expand_perm_table(StubGenStubId stub_id) { return start; } -address StubGenerator::generate_vector_mask(StubGenStubId stub_id, int64_t mask) { +address StubGenerator::generate_vector_mask(StubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1088,7 +1088,7 @@ address StubGenerator::generate_vector_mask(StubGenStubId stub_id, int64_t mask) address StubGenerator::generate_vector_byte_perm_mask() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vector_byte_perm_mask_id; + StubId stub_id = StubId::stubgen_vector_byte_perm_mask_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1104,7 +1104,7 @@ address StubGenerator::generate_vector_byte_perm_mask() { return start; } -address StubGenerator::generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask) { +address StubGenerator::generate_vector_fp_mask(StubId stub_id, int64_t mask) { __ align(CodeEntryAlignment); StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1121,7 +1121,7 @@ address StubGenerator::generate_vector_fp_mask(StubGenStubId stub_id, int64_t ma return start; } -address StubGenerator::generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len, +address StubGenerator::generate_vector_custom_i32(StubId stub_id, Assembler::AvxVectorLen len, int32_t val0, int32_t val1, int32_t val2, int32_t val3, int32_t val4, int32_t val5, int32_t val6, int32_t val7, int32_t val8, int32_t val9, int32_t val10, int32_t val11, @@ -1171,7 +1171,7 @@ address StubGenerator::generate_vector_custom_i32(StubGenStubId stub_id, Assembl // * [tos + 8]: saved r10 (rscratch1) - saved by caller // * = popped on exit address StubGenerator::generate_verify_oop() { - StubGenStubId stub_id = StubGenStubId::verify_oop_id; + StubId stub_id = StubId::stubgen_verify_oop_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1369,7 +1369,7 @@ address StubGenerator::generate_data_cache_writeback() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1387,7 +1387,7 @@ address StubGenerator::generate_data_cache_writeback_sync() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::data_cache_writeback_sync_id; + StubId stub_id = StubId::stubgen_data_cache_writeback_sync_id; StubCodeMark mark(this, stub_id); // pre wbsync is a no-op @@ -1409,13 +1409,13 @@ address StubGenerator::generate_data_cache_writeback_sync() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) -address StubGenerator::generate_md5_implCompress(StubGenStubId stub_id) { +address StubGenerator::generate_md5_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case md5_implCompress_id: + case StubId::stubgen_md5_implCompress_id: multi_block = false; break; - case md5_implCompressMB_id: + case StubId::stubgen_md5_implCompressMB_id: multi_block = true; break; default: @@ -1458,7 +1458,7 @@ address StubGenerator::generate_md5_implCompress(StubGenStubId stub_id) { address StubGenerator::generate_upper_word_mask() { __ align64(); - StubGenStubId stub_id = StubGenStubId::upper_word_mask_id; + StubId stub_id = StubId::stubgen_upper_word_mask_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1470,7 +1470,7 @@ address StubGenerator::generate_upper_word_mask() { address StubGenerator::generate_shuffle_byte_flip_mask() { __ align64(); - StubGenStubId stub_id = StubGenStubId::shuffle_byte_flip_mask_id; + StubId stub_id = StubId::stubgen_shuffle_byte_flip_mask_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1482,13 +1482,13 @@ address StubGenerator::generate_shuffle_byte_flip_mask() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) -address StubGenerator::generate_sha1_implCompress(StubGenStubId stub_id) { +address StubGenerator::generate_sha1_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha1_implCompress_id: + case StubId::stubgen_sha1_implCompress_id: multi_block = false; break; - case sha1_implCompressMB_id: + case StubId::stubgen_sha1_implCompressMB_id: multi_block = true; break; default: @@ -1530,7 +1530,7 @@ address StubGenerator::generate_sha1_implCompress(StubGenStubId stub_id) { address StubGenerator::generate_pshuffle_byte_flip_mask() { __ align64(); - StubGenStubId stub_id = StubGenStubId::pshuffle_byte_flip_mask_id; + StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1558,7 +1558,7 @@ address StubGenerator::generate_pshuffle_byte_flip_mask() { //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. address StubGenerator::generate_pshuffle_byte_flip_mask_sha512() { __ align32(); - StubGenStubId stub_id = StubGenStubId::pshuffle_byte_flip_mask_sha512_id; + StubId stub_id = StubId::stubgen_pshuffle_byte_flip_mask_sha512_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1578,13 +1578,13 @@ address StubGenerator::generate_pshuffle_byte_flip_mask_sha512() { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) -address StubGenerator::generate_sha256_implCompress(StubGenStubId stub_id) { +address StubGenerator::generate_sha256_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha256_implCompress_id: + case StubId::stubgen_sha256_implCompress_id: multi_block = false; break; - case sha256_implCompressMB_id: + case StubId::stubgen_sha256_implCompressMB_id: multi_block = true; break; default: @@ -1631,13 +1631,13 @@ address StubGenerator::generate_sha256_implCompress(StubGenStubId stub_id) { return start; } -address StubGenerator::generate_sha512_implCompress(StubGenStubId stub_id) { +address StubGenerator::generate_sha512_implCompress(StubId stub_id) { bool multi_block; switch (stub_id) { - case sha512_implCompress_id: + case StubId::stubgen_sha512_implCompress_id: multi_block = false; break; - case sha512_implCompressMB_id: + case StubId::stubgen_sha512_implCompressMB_id: multi_block = true; break; default: @@ -1681,7 +1681,7 @@ address StubGenerator::generate_sha512_implCompress(StubGenStubId stub_id) { address StubGenerator::base64_shuffle_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::shuffle_base64_id; + StubId stub_id = StubId::stubgen_shuffle_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1701,7 +1701,7 @@ address StubGenerator::base64_shuffle_addr() { address StubGenerator::base64_avx2_shuffle_addr() { __ align32(); - StubGenStubId stub_id = StubGenStubId::avx2_shuffle_base64_id; + StubId stub_id = StubId::stubgen_avx2_shuffle_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1715,7 +1715,7 @@ address StubGenerator::base64_avx2_shuffle_addr() { address StubGenerator::base64_avx2_input_mask_addr() { __ align32(); - StubGenStubId stub_id = StubGenStubId::avx2_input_mask_base64_id; + StubId stub_id = StubId::stubgen_avx2_input_mask_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1729,7 +1729,7 @@ address StubGenerator::base64_avx2_input_mask_addr() { address StubGenerator::base64_avx2_lut_addr() { __ align32(); - StubGenStubId stub_id = StubGenStubId::avx2_lut_base64_id; + StubId stub_id = StubId::stubgen_avx2_lut_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1749,7 +1749,7 @@ address StubGenerator::base64_avx2_lut_addr() { address StubGenerator::base64_encoding_table_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::encoding_table_base64_id; + StubId stub_id = StubId::stubgen_encoding_table_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1783,7 +1783,7 @@ address StubGenerator::base64_encoding_table_addr() { address StubGenerator::generate_base64_encodeBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_encodeBlock_id; + StubId stub_id = StubId::stubgen_base64_encodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2166,7 +2166,7 @@ address StubGenerator::generate_base64_encodeBlock() // base64 AVX512vbmi tables address StubGenerator::base64_vbmi_lookup_lo_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::lookup_lo_base64_id; + StubId stub_id = StubId::stubgen_lookup_lo_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2186,7 +2186,7 @@ address StubGenerator::base64_vbmi_lookup_lo_addr() { address StubGenerator::base64_vbmi_lookup_hi_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::lookup_hi_base64_id; + StubId stub_id = StubId::stubgen_lookup_hi_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2205,7 +2205,7 @@ address StubGenerator::base64_vbmi_lookup_hi_addr() { } address StubGenerator::base64_vbmi_lookup_lo_url_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::lookup_lo_base64url_id; + StubId stub_id = StubId::stubgen_lookup_lo_base64url_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2225,7 +2225,7 @@ address StubGenerator::base64_vbmi_lookup_lo_url_addr() { address StubGenerator::base64_vbmi_lookup_hi_url_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::lookup_hi_base64url_id; + StubId stub_id = StubId::stubgen_lookup_hi_base64url_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2245,7 +2245,7 @@ address StubGenerator::base64_vbmi_lookup_hi_url_addr() { address StubGenerator::base64_vbmi_pack_vec_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::pack_vec_base64_id; + StubId stub_id = StubId::stubgen_pack_vec_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2265,7 +2265,7 @@ address StubGenerator::base64_vbmi_pack_vec_addr() { address StubGenerator::base64_vbmi_join_0_1_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::join_0_1_base64_id; + StubId stub_id = StubId::stubgen_join_0_1_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2285,7 +2285,7 @@ address StubGenerator::base64_vbmi_join_0_1_addr() { address StubGenerator::base64_vbmi_join_1_2_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::join_1_2_base64_id; + StubId stub_id = StubId::stubgen_join_1_2_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2305,7 +2305,7 @@ address StubGenerator::base64_vbmi_join_1_2_addr() { address StubGenerator::base64_vbmi_join_2_3_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::join_2_3_base64_id; + StubId stub_id = StubId::stubgen_join_2_3_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2325,7 +2325,7 @@ address StubGenerator::base64_vbmi_join_2_3_addr() { address StubGenerator::base64_AVX2_decode_tables_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::avx2_decode_tables_base64_id; + StubId stub_id = StubId::stubgen_avx2_decode_tables_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2360,7 +2360,7 @@ address StubGenerator::base64_AVX2_decode_tables_addr() { address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { __ align64(); - StubGenStubId stub_id = StubGenStubId::avx2_decode_lut_tables_base64_id; + StubId stub_id = StubId::stubgen_avx2_decode_lut_tables_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2400,7 +2400,7 @@ address StubGenerator::base64_AVX2_decode_LUT_tables_addr() { } address StubGenerator::base64_decoding_table_addr() { - StubGenStubId stub_id = StubGenStubId::decoding_table_base64_id; + StubId stub_id = StubId::stubgen_decoding_table_base64_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2483,7 +2483,7 @@ address StubGenerator::base64_decoding_table_addr() { // private void decodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL, isMIME) { address StubGenerator::generate_base64_decodeBlock() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::base64_decodeBlock_id; + StubId stub_id = StubId::stubgen_base64_decodeBlock_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3017,7 +3017,7 @@ address StubGenerator::generate_updateBytesCRC32() { assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3074,7 +3074,7 @@ address StubGenerator::generate_updateBytesCRC32() { address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { assert(UseCRC32CIntrinsics, "need SSE4_2"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id; + StubId stub_id = StubId::stubgen_updateBytesCRC32C_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3155,7 +3155,7 @@ address StubGenerator::generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { */ address StubGenerator::generate_multiplyToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::multiplyToLen_id; + StubId stub_id = StubId::stubgen_multiplyToLen_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3212,7 +3212,7 @@ address StubGenerator::generate_multiplyToLen() { */ address StubGenerator::generate_vectorizedMismatch() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::vectorizedMismatch_id; + StubId stub_id = StubId::stubgen_vectorizedMismatch_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3264,7 +3264,7 @@ address StubGenerator::generate_vectorizedMismatch() { address StubGenerator::generate_squareToLen() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::squareToLen_id; + StubId stub_id = StubId::stubgen_squareToLen_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3300,7 +3300,7 @@ address StubGenerator::generate_squareToLen() { address StubGenerator::generate_method_entry_barrier() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id; + StubId stub_id = StubId::stubgen_method_entry_barrier_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3390,7 +3390,7 @@ address StubGenerator::generate_method_entry_barrier() { */ address StubGenerator::generate_mulAdd() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::mulAdd_id; + StubId stub_id = StubId::stubgen_mulAdd_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3432,7 +3432,7 @@ address StubGenerator::generate_mulAdd() { address StubGenerator::generate_bigIntegerRightShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerRightShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerRightShiftWorker_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3568,7 +3568,7 @@ address StubGenerator::generate_bigIntegerRightShift() { */ address StubGenerator::generate_bigIntegerLeftShift() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::bigIntegerLeftShiftWorker_id; + StubId stub_id = StubId::stubgen_bigIntegerLeftShiftWorker_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3720,7 +3720,7 @@ void StubGenerator::generate_libm_stubs() { * xmm0 - float */ address StubGenerator::generate_float16ToFloat() { - StubGenStubId stub_id = StubGenStubId::hf2f_id; + StubId stub_id = StubId::stubgen_hf2f_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3746,7 +3746,7 @@ address StubGenerator::generate_float16ToFloat() { * rax - float16 jshort */ address StubGenerator::generate_floatToFloat16() { - StubGenStubId stub_id = StubGenStubId::f2hf_id; + StubId stub_id = StubId::stubgen_f2hf_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3762,7 +3762,7 @@ address StubGenerator::generate_floatToFloat16() { return start; } -address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) { +address StubGenerator::generate_cont_thaw(StubId stub_id) { if (!Continuations::enabled()) return nullptr; bool return_barrier; @@ -3770,17 +3770,17 @@ address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) { Continuation::thaw_kind kind; switch (stub_id) { - case cont_thaw_id: + case StubId::stubgen_cont_thaw_id: return_barrier = false; return_barrier_exception = false; kind = Continuation::thaw_top; break; - case cont_returnBarrier_id: + case StubId::stubgen_cont_returnBarrier_id: return_barrier = true; return_barrier_exception = false; kind = Continuation::thaw_return_barrier; break; - case cont_returnBarrierExc_id: + case StubId::stubgen_cont_returnBarrierExc_id: return_barrier = true; return_barrier_exception = true; kind = Continuation::thaw_return_barrier_exception; @@ -3906,22 +3906,22 @@ address StubGenerator::generate_cont_thaw(StubGenStubId stub_id) { } address StubGenerator::generate_cont_thaw() { - return generate_cont_thaw(StubGenStubId::cont_thaw_id); + return generate_cont_thaw(StubId::stubgen_cont_thaw_id); } // TODO: will probably need multiple return barriers depending on return type address StubGenerator::generate_cont_returnBarrier() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrier_id); } address StubGenerator::generate_cont_returnBarrier_exception() { - return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id); + return generate_cont_thaw(StubId::stubgen_cont_returnBarrierExc_id); } address StubGenerator::generate_cont_preempt_stub() { if (!Continuations::enabled()) return nullptr; - StubGenStubId stub_id = StubGenStubId::cont_preempt_id; + StubId stub_id = StubId::stubgen_cont_preempt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3952,7 +3952,7 @@ address StubGenerator::generate_cont_preempt_stub() { // exception handler for upcall stubs address StubGenerator::generate_upcall_stub_exception_handler() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id; + StubId stub_id = StubId::stubgen_upcall_stub_exception_handler_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3973,7 +3973,7 @@ address StubGenerator::generate_upcall_stub_exception_handler() { // j_rarg0 = jobject receiver // rbx = result address StubGenerator::generate_upcall_stub_load_target() { - StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id; + StubId stub_id = StubId::stubgen_upcall_stub_load_target_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -3993,7 +3993,7 @@ address StubGenerator::generate_upcall_stub_load_target() { } void StubGenerator::generate_lookup_secondary_supers_table_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_id; StubCodeMark mark(this, stub_id); const Register @@ -4013,7 +4013,7 @@ void StubGenerator::generate_lookup_secondary_supers_table_stub() { // Slow path implementation for UseSecondarySupersTable. address StubGenerator::generate_lookup_secondary_supers_table_slow_path_stub() { - StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id; + StubId stub_id = StubId::stubgen_lookup_secondary_supers_table_slow_path_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -4089,10 +4089,10 @@ void StubGenerator::generate_initial_stubs() { StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); - StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubGenStubId::float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); - StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubGenStubId::float_sign_flip_id, 0x8000000080000000); - StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubGenStubId::double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); - StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubGenStubId::double_sign_flip_id, 0x8000000000000000); + StubRoutines::x86::_float_sign_mask = generate_fp_mask(StubId::stubgen_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); + StubRoutines::x86::_float_sign_flip = generate_fp_mask(StubId::stubgen_float_sign_flip_id, 0x8000000080000000); + StubRoutines::x86::_double_sign_mask = generate_fp_mask(StubId::stubgen_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); + StubRoutines::x86::_double_sign_flip = generate_fp_mask(StubId::stubgen_double_sign_flip_id, 0x8000000000000000); if (UseCRC32Intrinsics) { // set table address before stub generation which use it @@ -4165,25 +4165,25 @@ void StubGenerator::generate_compiler_stubs() { // Entry points that are C2 compiler specific. - StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubGenStubId::vector_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); - StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubGenStubId::vector_float_sign_flip_id, 0x8000000080000000); - StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask(StubGenStubId::vector_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); - StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask(StubGenStubId::vector_double_sign_flip_id, 0x8000000000000000); - StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubGenStubId::vector_all_bits_set_id, 0xFFFFFFFFFFFFFFFF); - StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubGenStubId::vector_int_mask_cmp_bits_id, 0x0000000100000001); - StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubGenStubId::vector_short_to_byte_mask_id, 0x00ff00ff00ff00ff); + StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask(StubId::stubgen_vector_float_sign_mask_id, 0x7FFFFFFF7FFFFFFF); + StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask(StubId::stubgen_vector_float_sign_flip_id, 0x8000000080000000); + StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask(StubId::stubgen_vector_double_sign_mask_id, 0x7FFFFFFFFFFFFFFF); + StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask(StubId::stubgen_vector_double_sign_flip_id, 0x8000000000000000); + StubRoutines::x86::_vector_all_bits_set = generate_vector_mask(StubId::stubgen_vector_all_bits_set_id, 0xFFFFFFFFFFFFFFFF); + StubRoutines::x86::_vector_int_mask_cmp_bits = generate_vector_mask(StubId::stubgen_vector_int_mask_cmp_bits_id, 0x0000000100000001); + StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_short_to_byte_mask_id, 0x00ff00ff00ff00ff); StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask(); - StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubGenStubId::vector_int_to_byte_mask_id, 0x000000ff000000ff); - StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubGenStubId::vector_int_to_short_mask_id, 0x0000ffff0000ffff); - StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_32_bit_mask_id, Assembler::AVX_512bit, + StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask(StubId::stubgen_vector_int_to_byte_mask_id, 0x000000ff000000ff); + StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask(StubId::stubgen_vector_int_to_short_mask_id, 0x0000ffff0000ffff); + StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_32_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0, 0, 0); - StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubGenStubId::vector_64_bit_mask_id, Assembler::AVX_512bit, + StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32(StubId::stubgen_vector_64_bit_mask_id, Assembler::AVX_512bit, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); - StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubGenStubId::vector_int_shuffle_mask_id, 0x0302010003020100); + StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_int_shuffle_mask_id, 0x0302010003020100); StubRoutines::x86::_vector_byte_shuffle_mask = generate_vector_byte_shuffle_mask(); - StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubGenStubId::vector_short_shuffle_mask_id, 0x0100010001000100); - StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubGenStubId::vector_long_shuffle_mask_id, 0x0000000100000000); - StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubGenStubId::vector_long_sign_mask_id, 0x8000000000000000); + StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_short_shuffle_mask_id, 0x0100010001000100); + StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask(StubId::stubgen_vector_long_shuffle_mask_id, 0x0000000100000000); + StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask(StubId::stubgen_vector_long_sign_mask_id, 0x8000000000000000); StubRoutines::x86::_vector_iota_indices = generate_iota_indices(); StubRoutines::x86::_vector_count_leading_zeros_lut = generate_count_leading_zeros_lut(); StubRoutines::x86::_vector_reverse_bit_lut = generate_vector_reverse_bit_lut(); @@ -4192,10 +4192,10 @@ void StubGenerator::generate_compiler_stubs() { StubRoutines::x86::_vector_reverse_byte_perm_mask_short = generate_vector_reverse_byte_perm_mask_short(); if (VM_Version::supports_avx2() && !VM_Version::supports_avx512vl()) { - StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table(StubGenStubId::compress_perm_table32_id); - StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table(StubGenStubId::compress_perm_table64_id); - StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table(StubGenStubId::expand_perm_table32_id); - StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table(StubGenStubId::expand_perm_table64_id); + StubRoutines::x86::_compress_perm_table32 = generate_compress_perm_table(StubId::stubgen_compress_perm_table32_id); + StubRoutines::x86::_compress_perm_table64 = generate_compress_perm_table(StubId::stubgen_compress_perm_table64_id); + StubRoutines::x86::_expand_perm_table32 = generate_expand_perm_table(StubId::stubgen_expand_perm_table32_id); + StubRoutines::x86::_expand_perm_table64 = generate_expand_perm_table(StubId::stubgen_expand_perm_table64_id); } if (VM_Version::supports_avx2() && !VM_Version::supports_avx512_vpopcntdq()) { @@ -4239,15 +4239,15 @@ void StubGenerator::generate_compiler_stubs() { } if (UseMD5Intrinsics) { - StubRoutines::_md5_implCompress = generate_md5_implCompress(StubGenStubId::md5_implCompress_id); - StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubGenStubId::md5_implCompressMB_id); + StubRoutines::_md5_implCompress = generate_md5_implCompress(StubId::stubgen_md5_implCompress_id); + StubRoutines::_md5_implCompressMB = generate_md5_implCompress(StubId::stubgen_md5_implCompressMB_id); } if (UseSHA1Intrinsics) { StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); - StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubGenStubId::sha1_implCompress_id); - StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubGenStubId::sha1_implCompressMB_id); + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(StubId::stubgen_sha1_implCompress_id); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(StubId::stubgen_sha1_implCompressMB_id); } if (UseSHA256Intrinsics) { @@ -4260,15 +4260,15 @@ void StubGenerator::generate_compiler_stubs() { } StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); - StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubGenStubId::sha256_implCompress_id); - StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubGenStubId::sha256_implCompressMB_id); + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(StubId::stubgen_sha256_implCompress_id); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(StubId::stubgen_sha256_implCompressMB_id); } if (UseSHA512Intrinsics) { StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); - StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubGenStubId::sha512_implCompress_id); - StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(StubId::stubgen_sha512_implCompress_id); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubId::stubgen_sha512_implCompressMB_id); } if (UseBASE64Intrinsics) { @@ -4344,30 +4344,30 @@ void StubGenerator::generate_compiler_stubs() { #endif // COMPILER2_OR_JVMCI } -StubGenerator::StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { +StubGenerator::StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: generate_compiler_stubs(); break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp index 032e05f4b4f..de2b0ad506d 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.hpp @@ -98,19 +98,19 @@ class StubGenerator: public StubCodeGenerator { address generate_vector_reverse_byte_perm_mask_short(); address generate_vector_byte_shuffle_mask(); - address generate_fp_mask(StubGenStubId stub_id, int64_t mask); + address generate_fp_mask(StubId stub_id, int64_t mask); - address generate_compress_perm_table(StubGenStubId stub_id); + address generate_compress_perm_table(StubId stub_id); - address generate_expand_perm_table(StubGenStubId stub_id); + address generate_expand_perm_table(StubId stub_id); - address generate_vector_mask(StubGenStubId stub_id, int64_t mask); + address generate_vector_mask(StubId stub_id, int64_t mask); address generate_vector_byte_perm_mask(); - address generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask); + address generate_vector_fp_mask(StubId stub_id, int64_t mask); - address generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len, + address generate_vector_custom_i32(StubId stub_id, Assembler::AvxVectorLen len, int32_t val0, int32_t val1, int32_t val2, int32_t val3, int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, @@ -180,9 +180,9 @@ class StubGenerator: public StubCodeGenerator { // - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over // 64 byte vector registers (ZMMs). - address generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry); + address generate_disjoint_copy_avx3_masked(StubId stub_id, address* entry); - address generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, + address generate_conjoint_copy_avx3_masked(StubId stub_id, address* entry, address nooverlap_target); void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from, @@ -230,14 +230,14 @@ class StubGenerator: public StubCodeGenerator { address generate_disjoint_short_copy(address *entry); - address generate_fill(StubGenStubId stub_id); + address generate_fill(StubId stub_id); address generate_conjoint_short_copy(address nooverlap_target, address *entry); - address generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry); - address generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, + address generate_disjoint_int_oop_copy(StubId stub_id, address* entry); + address generate_conjoint_int_oop_copy(StubId stub_id, address nooverlap_target, address *entry); - address generate_disjoint_long_oop_copy(StubGenStubId stub_id, address* entry); - address generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, + address generate_disjoint_long_oop_copy(StubId stub_id, address* entry); + address generate_conjoint_long_oop_copy(StubId stub_id, address nooverlap_target, address *entry); // Helper for generating a dynamic type check. @@ -248,7 +248,7 @@ class StubGenerator: public StubCodeGenerator { Label& L_success); // Generate checkcasting array copy stub - address generate_checkcast_copy(StubGenStubId stub_id, address *entry); + address generate_checkcast_copy(StubId stub_id, address *entry); // Generate 'unsafe' array copy stub // Though just as safe as the other stubs, it takes an unscaled @@ -294,19 +294,19 @@ class StubGenerator: public StubCodeGenerator { // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs) - address generate_md5_implCompress(StubGenStubId stub_id); + address generate_md5_implCompress(StubId stub_id); // SHA stubs // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha1_implCompress(StubGenStubId stub_id); + address generate_sha1_implCompress(StubId stub_id); // ofs and limit are use for multi-block byte array. // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) - address generate_sha256_implCompress(StubGenStubId stub_id); - address generate_sha512_implCompress(StubGenStubId stub_id); + address generate_sha256_implCompress(StubId stub_id); + address generate_sha512_implCompress(StubId stub_id); // Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. address generate_pshuffle_byte_flip_mask_sha512(); @@ -590,7 +590,7 @@ class StubGenerator: public StubCodeGenerator { void generate_string_indexof(address *fnptrs); #endif - address generate_cont_thaw(StubGenStubId stub_id); + address generate_cont_thaw(StubId stub_id); address generate_cont_thaw(); // TODO: will probably need multiple return barriers depending on return type @@ -641,7 +641,7 @@ class StubGenerator: public StubCodeGenerator { void generate_final_stubs(); public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id); + StubGenerator(CodeBuffer* code, BlobId blob_id); }; #endif // CPU_X86_STUBGENERATOR_X86_64_HPP diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp index 8a2aeaa5887..2799997a761 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_adler.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2021, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2021, 2025, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -66,7 +66,7 @@ address StubGenerator::generate_updateBytesAdler32() { assert(UseAdler32Intrinsics, ""); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id; + StubId stub_id = StubId::stubgen_updateBytesAdler32_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp index 26bf3f7d725..2cd1ed24fcd 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_aes.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2019, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2019, 2025, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -249,7 +249,7 @@ void StubGenerator::generate_aes_stubs() { // rax - number of processed bytes address StubGenerator::generate_galoisCounterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -336,7 +336,7 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() { // rax - number of processed bytes address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_galoisCounterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -408,7 +408,7 @@ address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() { // Vector AES Counter implementation address StubGenerator::generate_counterMode_VectorAESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -497,7 +497,7 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() { address StubGenerator::generate_counterMode_AESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id; + StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -785,7 +785,7 @@ address StubGenerator::generate_counterMode_AESCrypt_Parallel() { address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1068,7 +1068,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() { address StubGenerator::generate_aescrypt_encryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_encryptBlock_id; StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -1163,7 +1163,7 @@ address StubGenerator::generate_aescrypt_encryptBlock() { address StubGenerator::generate_aescrypt_decryptBlock() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id; + StubId stub_id = StubId::stubgen_aescrypt_decryptBlock_id; StubCodeMark mark(this, stub_id); Label L_doLast; address start = __ pc(); @@ -1265,7 +1265,7 @@ address StubGenerator::generate_aescrypt_decryptBlock() { address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1417,7 +1417,7 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() { address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() { assert(UseAES, "need AES instructions and misaligned SSE support"); __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1660,7 +1660,7 @@ __ opc(xmm_result3, src_reg); \ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::electronicCodeBook_encryptAESCrypt_id; + StubId stub_id = StubId::stubgen_electronicCodeBook_encryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -1681,7 +1681,7 @@ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() { address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::electronicCodeBook_decryptAESCrypt_id; + StubId stub_id = StubId::stubgen_electronicCodeBook_decryptAESCrypt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp index 1e056f15213..9dac1eab002 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_arraycopy.cpp @@ -90,25 +90,25 @@ void StubGenerator::generate_arraycopy_stubs() { StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(&entry); StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(entry, &entry_jshort_arraycopy); - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry); - StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy); + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubId::stubgen_jint_disjoint_arraycopy_id, &entry); + StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(StubId::stubgen_jint_arraycopy_id, entry, &entry_jint_arraycopy); - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::jlong_disjoint_arraycopy_id, &entry); - StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::jlong_arraycopy_id, entry, &entry_jlong_arraycopy); + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubId::stubgen_jlong_disjoint_arraycopy_id, &entry); + StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(StubId::stubgen_jlong_arraycopy_id, entry, &entry_jlong_arraycopy); if (UseCompressedOops) { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry); - StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_id, &entry); + StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(StubId::stubgen_oop_arraycopy_id, entry, &entry_oop_arraycopy); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_uninit_id, &entry); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(StubId::stubgen_oop_arraycopy_uninit_id, entry, nullptr); } else { - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry); - StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy); - StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry); - StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr); + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_id, &entry); + StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(StubId::stubgen_oop_arraycopy_id, entry, &entry_oop_arraycopy); + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(StubId::stubgen_oop_disjoint_arraycopy_uninit_id, &entry); + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(StubId::stubgen_oop_arraycopy_uninit_id, entry, nullptr); } - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy); - StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr); + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_id, &entry_checkcast_arraycopy); + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubId::stubgen_checkcast_arraycopy_uninit_id, nullptr); StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy, entry_jshort_arraycopy, @@ -121,12 +121,12 @@ void StubGenerator::generate_arraycopy_stubs() { entry_jlong_arraycopy, entry_checkcast_arraycopy); - StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id); - StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id); - StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id); - StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id); - StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id); - StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id); + StubRoutines::_jbyte_fill = generate_fill(StubId::stubgen_jbyte_fill_id); + StubRoutines::_jshort_fill = generate_fill(StubId::stubgen_jshort_fill_id); + StubRoutines::_jint_fill = generate_fill(StubId::stubgen_jint_fill_id); + StubRoutines::_arrayof_jbyte_fill = generate_fill(StubId::stubgen_arrayof_jbyte_fill_id); + StubRoutines::_arrayof_jshort_fill = generate_fill(StubId::stubgen_arrayof_jshort_fill_id); + StubRoutines::_arrayof_jint_fill = generate_fill(StubId::stubgen_arrayof_jint_fill_id); StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(StubRoutines::_jbyte_fill); @@ -484,7 +484,7 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest, // disjoint_copy_avx3_masked is set to the no-overlap entry point // used by generate_conjoint_[byte/int/short/long]_copy(). // -address StubGenerator::generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry) { +address StubGenerator::generate_disjoint_copy_avx3_masked(StubId stub_id, address* entry) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; int shift; @@ -492,32 +492,32 @@ address StubGenerator::generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, bool dest_uninitialized; switch (stub_id) { - case jbyte_disjoint_arraycopy_id: + case StubId::stubgen_jbyte_disjoint_arraycopy_id: shift = 0; is_oop = false; dest_uninitialized = false; break; - case jshort_disjoint_arraycopy_id: + case StubId::stubgen_jshort_disjoint_arraycopy_id: shift = 1; is_oop = false; dest_uninitialized = false; break; - case jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: shift = 2; is_oop = false; dest_uninitialized = false; break; - case jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: shift = 3; is_oop = false; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: shift = (UseCompressedOops ? 2 : 3); is_oop = true; dest_uninitialized = false; break; - case oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: shift = (UseCompressedOops ? 2 : 3); is_oop = true; dest_uninitialized = true; @@ -822,7 +822,7 @@ void StubGenerator::arraycopy_avx3_large(Register to, Register from, Register te // c_rarg2 - element count, treated as ssize_t, can be zero // // -address StubGenerator::generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, address nooverlap_target) { +address StubGenerator::generate_conjoint_copy_avx3_masked(StubId stub_id, address* entry, address nooverlap_target) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; int shift; @@ -830,32 +830,32 @@ address StubGenerator::generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, bool dest_uninitialized; switch (stub_id) { - case jbyte_arraycopy_id: + case StubId::stubgen_jbyte_arraycopy_id: shift = 0; is_oop = false; dest_uninitialized = false; break; - case jshort_arraycopy_id: + case StubId::stubgen_jshort_arraycopy_id: shift = 1; is_oop = false; dest_uninitialized = false; break; - case jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: shift = 2; is_oop = false; dest_uninitialized = false; break; - case jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: shift = 3; is_oop = false; dest_uninitialized = false; break; - case oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: shift = (UseCompressedOops ? 2 : 3); is_oop = true; dest_uninitialized = false; break; - case oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: shift = (UseCompressedOops ? 2 : 3); is_oop = true; dest_uninitialized = true; @@ -1334,7 +1334,7 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe // used by generate_conjoint_byte_copy(). // address StubGenerator::generate_disjoint_byte_copy(address* entry) { - StubGenStubId stub_id = StubGenStubId::jbyte_disjoint_arraycopy_id; + StubId stub_id = StubId::stubgen_jbyte_disjoint_arraycopy_id; // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; #if COMPILER2_OR_JVMCI @@ -1452,7 +1452,7 @@ __ BIND(L_exit); // and stored atomically. // address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, address* entry) { - StubGenStubId stub_id = StubGenStubId::jbyte_arraycopy_id; + StubId stub_id = StubId::stubgen_jbyte_arraycopy_id; // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; #if COMPILER2_OR_JVMCI @@ -1565,7 +1565,7 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add // used by generate_conjoint_short_copy(). // address StubGenerator::generate_disjoint_short_copy(address *entry) { - StubGenStubId stub_id = StubGenStubId::jshort_disjoint_arraycopy_id; + StubId stub_id = StubId::stubgen_jshort_disjoint_arraycopy_id; // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; #if COMPILER2_OR_JVMCI @@ -1662,32 +1662,32 @@ __ BIND(L_exit); } -address StubGenerator::generate_fill(StubGenStubId stub_id) { +address StubGenerator::generate_fill(StubId stub_id) { BasicType t; bool aligned; switch (stub_id) { - case jbyte_fill_id: + case StubId::stubgen_jbyte_fill_id: t = T_BYTE; aligned = false; break; - case jshort_fill_id: + case StubId::stubgen_jshort_fill_id: t = T_SHORT; aligned = false; break; - case jint_fill_id: + case StubId::stubgen_jint_fill_id: t = T_INT; aligned = false; break; - case arrayof_jbyte_fill_id: + case StubId::stubgen_arrayof_jbyte_fill_id: t = T_BYTE; aligned = true; break; - case arrayof_jshort_fill_id: + case StubId::stubgen_arrayof_jshort_fill_id: t = T_SHORT; aligned = true; break; - case arrayof_jint_fill_id: + case StubId::stubgen_arrayof_jint_fill_id: t = T_INT; aligned = true; break; @@ -1737,7 +1737,7 @@ address StubGenerator::generate_fill(StubGenStubId stub_id) { // and stored atomically. // address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, address *entry) { - StubGenStubId stub_id = StubGenStubId::jshort_arraycopy_id; + StubId stub_id = StubId::stubgen_jshort_arraycopy_id; // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; #if COMPILER2_OR_JVMCI @@ -1843,22 +1843,22 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad // disjoint_int_copy_entry is set to the no-overlap entry point // used by generate_conjoint_int_oop_copy(). // -address StubGenerator::generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry) { +address StubGenerator::generate_disjoint_int_oop_copy(StubId stub_id, address* entry) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; bool is_oop; bool dest_uninitialized; switch (stub_id) { - case StubGenStubId::jint_disjoint_arraycopy_id: + case StubId::stubgen_jint_disjoint_arraycopy_id: is_oop = false; dest_uninitialized = false; break; - case StubGenStubId::oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: assert(UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = false; break; - case StubGenStubId::oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: assert(UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = true; @@ -1974,22 +1974,22 @@ __ BIND(L_exit); // the hardware handle it. The two dwords within qwords that span // cache line boundaries will still be loaded and stored atomically. // -address StubGenerator::generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { +address StubGenerator::generate_conjoint_int_oop_copy(StubId stub_id, address nooverlap_target, address *entry) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; bool is_oop; bool dest_uninitialized; switch (stub_id) { - case StubGenStubId::jint_arraycopy_id: + case StubId::stubgen_jint_arraycopy_id: is_oop = false; dest_uninitialized = false; break; - case StubGenStubId::oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: assert(UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = false; break; - case StubGenStubId::oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: assert(UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = true; @@ -2107,22 +2107,22 @@ __ BIND(L_exit); // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the // no-overlap entry point used by generate_conjoint_long_oop_copy(). // -address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, address *entry) { +address StubGenerator::generate_disjoint_long_oop_copy(StubId stub_id, address *entry) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; bool is_oop; bool dest_uninitialized; switch (stub_id) { - case StubGenStubId::jlong_disjoint_arraycopy_id: + case StubId::stubgen_jlong_disjoint_arraycopy_id: is_oop = false; dest_uninitialized = false; break; - case StubGenStubId::oop_disjoint_arraycopy_id: + case StubId::stubgen_oop_disjoint_arraycopy_id: assert(!UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = false; break; - case StubGenStubId::oop_disjoint_arraycopy_uninit_id: + case StubId::stubgen_oop_disjoint_arraycopy_uninit_id: assert(!UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = true; @@ -2240,22 +2240,22 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, ad // c_rarg1 - destination array address // c_rarg2 - element count, treated as ssize_t, can be zero // -address StubGenerator::generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) { +address StubGenerator::generate_conjoint_long_oop_copy(StubId stub_id, address nooverlap_target, address *entry) { // aligned is always false -- x86_64 always uses the unaligned code const bool aligned = false; bool is_oop; bool dest_uninitialized; switch (stub_id) { - case StubGenStubId::jlong_arraycopy_id: + case StubId::stubgen_jlong_arraycopy_id: is_oop = false; dest_uninitialized = false; break; - case StubGenStubId::oop_arraycopy_id: + case StubId::stubgen_oop_arraycopy_id: assert(!UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = false; break; - case StubGenStubId::oop_arraycopy_uninit_id: + case StubId::stubgen_oop_arraycopy_uninit_id: assert(!UseCompressedOops, "inconsistent oop copy size!"); is_oop = true; dest_uninitialized = true; @@ -2391,14 +2391,14 @@ void StubGenerator::generate_type_check(Register sub_klass, // rax == 0 - success // rax == -1^K - failure, where K is partial transfer count // -address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *entry) { +address StubGenerator::generate_checkcast_copy(StubId stub_id, address *entry) { bool dest_uninitialized; switch (stub_id) { - case StubGenStubId::checkcast_arraycopy_id: + case StubId::stubgen_checkcast_arraycopy_id: dest_uninitialized = false; break; - case StubGenStubId::checkcast_arraycopy_uninit_id: + case StubId::stubgen_checkcast_arraycopy_uninit_id: dest_uninitialized = true; break; default: @@ -2623,7 +2623,7 @@ address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address sho const Register bits = rax; // test copy of low bits __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id; + StubId stub_id = StubId::stubgen_unsafe_arraycopy_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -2759,7 +2759,7 @@ static void do_setmemory_atomic_loop(USM_TYPE type, Register dest, // address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::unsafe_setmemory_id; + StubId stub_id = StubId::stubgen_unsafe_setmemory_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame @@ -2929,7 +2929,7 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh if (advance < 0) advance += modulus; if (advance > 0) __ nop(advance); } - StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id; + StubId stub_id = StubId::stubgen_generic_arraycopy_id; StubCodeMark mark(this, stub_id); // Short-hop target to L_failed. Makes for denser prologue code. diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp index 6faa2081fb2..c35f1b9f65e 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_cbrt.cpp @@ -190,7 +190,7 @@ ATTRIBUTE_ALIGNED(4) static const juint _D_table[] = #define __ _masm-> address StubGenerator::generate_libmCbrt() { - StubGenStubId stub_id = StubGenStubId::dcbrt_id; + StubId stub_id = StubId::stubgen_dcbrt_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp index f7fb4024077..7afaf34e031 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_chacha.cpp @@ -112,7 +112,7 @@ void StubGenerator::generate_chacha_stubs() { /* The 2-block AVX/AVX2-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubId stub_id = StubId::stubgen_chacha20Block_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -301,7 +301,7 @@ address StubGenerator::generate_chacha20Block_avx() { /* The 4-block AVX512-enabled ChaCha20 block function implementation */ address StubGenerator::generate_chacha20Block_avx512() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::chacha20Block_id; + StubId stub_id = StubId::stubgen_chacha20Block_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp index 3f037a919d7..67017e7559a 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_cos.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. + * Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -173,7 +173,7 @@ #define __ _masm-> address StubGenerator::generate_libmCos() { - StubGenStubId stub_id = StubGenStubId::dcos_id; + StubId stub_id = StubId::stubgen_dcos_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp index 7121db2ab91..9555d60c8a4 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_dilithium.cpp @@ -274,7 +274,7 @@ static address generate_dilithiumAlmostNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = dilithiumAlmostNtt_id; + StubId stub_id = StubId::stubgen_dilithiumAlmostNtt_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -464,7 +464,7 @@ static address generate_dilithiumAlmostInverseNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = dilithiumAlmostInverseNtt_id; + StubId stub_id = StubId::stubgen_dilithiumAlmostInverseNtt_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -645,7 +645,7 @@ static address generate_dilithiumNttMult_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = dilithiumNttMult_id; + StubId stub_id = StubId::stubgen_dilithiumNttMult_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -709,7 +709,7 @@ static address generate_dilithiumMontMulByConstant_avx512(StubGenerator *stubgen MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = dilithiumMontMulByConstant_id; + StubId stub_id = StubId::stubgen_dilithiumMontMulByConstant_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -773,7 +773,7 @@ static address generate_dilithiumDecomposePoly_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = dilithiumDecomposePoly_id; + StubId stub_id = StubId::stubgen_dilithiumDecomposePoly_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp index b48ed80788b..299a37b88fd 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_exp.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -165,7 +165,7 @@ ATTRIBUTE_ALIGNED(4) static const juint _INF[] = #define __ _masm-> address StubGenerator::generate_libmExp() { - StubGenStubId stub_id = StubGenStubId::dexp_id; + StubId stub_id = StubId::stubgen_dexp_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp index 958f65c883a..b1eaa4b8031 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_fmod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Intel Corporation. All rights reserved. + * Copyright (c) 2023, 2025, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -73,7 +73,7 @@ ATTRIBUTE_ALIGNED(32) static const uint64_t CONST_e307[] = { address StubGenerator::generate_libmFmod() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::fmod_id; + StubId stub_id = StubId::stubgen_fmod_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); // required for proper stackwalking of RuntimeStub frame diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp index 6d1f9fbd5a1..37485bac1d1 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_ghash.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2019, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2019, 2025, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -82,7 +82,7 @@ void StubGenerator::generate_ghash_stubs() { address StubGenerator::generate_ghash_processBlocks() { __ align(CodeEntryAlignment); Label L_ghash_loop, L_exit; - StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -219,7 +219,7 @@ address StubGenerator::generate_ghash_processBlocks() { address StubGenerator::generate_avx_ghash_processBlocks() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id; + StubId stub_id = StubId::stubgen_ghash_processBlocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp index 91c005e92de..6e0af2563fa 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_kyber.cpp @@ -368,7 +368,7 @@ static int xmm29_29[] = {29, 29, 29, 29}; address generate_kyberNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberNtt_id; + StubId stub_id = StubId::stubgen_kyberNtt_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -465,7 +465,7 @@ address generate_kyberInverseNtt_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberInverseNtt_id; + StubId stub_id = StubId::stubgen_kyberInverseNtt_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -590,7 +590,7 @@ address generate_kyberNttMult_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberNttMult_id; + StubId stub_id = StubId::stubgen_kyberNttMult_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -710,7 +710,7 @@ address generate_kyberAddPoly_2_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberAddPoly_2_id; + StubId stub_id = StubId::stubgen_kyberAddPoly_2_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -756,7 +756,7 @@ address generate_kyberAddPoly_3_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberAddPoly_3_id; + StubId stub_id = StubId::stubgen_kyberAddPoly_3_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -810,7 +810,7 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyber12To16_id; + StubId stub_id = StubId::stubgen_kyber12To16_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); @@ -907,7 +907,7 @@ address generate_kyberBarrettReduce_avx512(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = kyberBarrettReduce_id; + StubId stub_id = StubId::stubgen_kyberBarrettReduce_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp index 6aacfaaea03..8665aec5903 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_log.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -176,7 +176,7 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff[] = #define __ _masm-> address StubGenerator::generate_libmLog() { - StubGenStubId stub_id = StubGenStubId::dlog_id; + StubId stub_id = StubId::stubgen_dlog_id; StubCodeMark mark(this, stub_id); address start = __ pc(); @@ -515,7 +515,7 @@ ATTRIBUTE_ALIGNED(16) static const juint _coeff_log10[] = }; address StubGenerator::generate_libmLog10() { - StubGenStubId stub_id = StubGenStubId::dlog10_id; + StubId stub_id = StubId::stubgen_dlog10_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp index dfd4ca21dbd..461422b8afd 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly1305.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2024, Intel Corporation. All rights reserved. + * Copyright (c) 2022, 2025, Intel Corporation. All rights reserved. * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -910,7 +910,7 @@ void StubGenerator::poly1305_process_blocks_avx512( // and accumulator will point to the current accumulator value address StubGenerator::generate_poly1305_processBlocks() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::poly1305_processBlocks_id; + StubId stub_id = StubId::stubgen_poly1305_processBlocks_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp index d142414be5e..d0ac050dbf9 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_poly_mont.cpp @@ -559,7 +559,7 @@ void montgomeryMultiplyAVX2(const Register aLimbs, const Register bLimbs, const address StubGenerator::generate_intpoly_montgomeryMult_P256() { __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::intpoly_montgomeryMult_P256_id; + StubId stub_id = StubId::stubgen_intpoly_montgomeryMult_P256_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); @@ -681,7 +681,7 @@ address StubGenerator::generate_intpoly_assign() { // Special Cases 5, 10, 14, 16, 19 __ align(CodeEntryAlignment); - StubGenStubId stub_id = StubGenStubId::intpoly_assign_id; + StubId stub_id = StubId::stubgen_intpoly_assign_id; StubCodeMark mark(this, stub_id); address start = __ pc(); __ enter(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp index 4029e53d1b1..22ac8059458 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_pow.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. * Intel Math Library (LIBM) Source Code * @@ -759,7 +759,7 @@ ATTRIBUTE_ALIGNED(8) static const juint _DOUBLE0DOT5[] = { #define __ _masm-> address StubGenerator::generate_libmPow() { - StubGenStubId stub_id = StubGenStubId::dpow_id; + StubId stub_id = StubId::stubgen_dpow_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp index 9f13233f1d2..1e245952118 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sha3.cpp @@ -90,15 +90,15 @@ static address permsAndRotsAddr() { // c_rarg3 - int offset // c_rarg4 - int limit // -static address generate_sha3_implCompress(StubGenStubId stub_id, +static address generate_sha3_implCompress(StubId stub_id, StubGenerator *stubgen, MacroAssembler *_masm) { bool multiBlock; switch(stub_id) { - case sha3_implCompress_id: + case StubId::stubgen_sha3_implCompress_id: multiBlock = false; break; - case sha3_implCompressMB_id: + case StubId::stubgen_sha3_implCompressMB_id: multiBlock = true; break; default: @@ -327,7 +327,7 @@ static address generate_sha3_implCompress(StubGenStubId stub_id, // two computations are executed interleaved. static address generate_double_keccak(StubGenerator *stubgen, MacroAssembler *_masm) { __ align(CodeEntryAlignment); - StubGenStubId stub_id = double_keccak_id; + StubId stub_id = StubId::stubgen_double_keccak_id; StubCodeMark mark(stubgen, stub_id); address start = __ pc(); @@ -501,10 +501,10 @@ static address generate_double_keccak(StubGenerator *stubgen, MacroAssembler *_m void StubGenerator::generate_sha3_stubs() { if (UseSHA3Intrinsics) { StubRoutines::_sha3_implCompress = - generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id, this, _masm); + generate_sha3_implCompress(StubId::stubgen_sha3_implCompress_id, this, _masm); StubRoutines::_double_keccak = generate_double_keccak(this, _masm); StubRoutines::_sha3_implCompressMB = - generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id, this, _masm); + generate_sha3_implCompress(StubId::stubgen_sha3_implCompressMB_id, this, _masm); } } diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp index 362eeb95e41..67ac2fa6b87 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_sin.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. + * Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -180,7 +180,7 @@ ATTRIBUTE_ALIGNED(8) static const juint _ALL_ONES[] = #define __ _masm-> address StubGenerator::generate_libmSin() { - StubGenStubId stub_id = StubGenStubId::dsin_id; + StubId stub_id = StubId::stubgen_dsin_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp index 46cb0801a81..f73885b18c2 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tan.cpp @@ -1,5 +1,5 @@ /* -* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved. +* Copyright (c) 2016, 2025, Intel Corporation. All rights reserved. * Intel Math Library (LIBM) Source Code * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -455,7 +455,7 @@ ATTRIBUTE_ALIGNED(8) static const juint _QQ_2_tan[] = #define __ _masm-> address StubGenerator::generate_libmTan() { - StubGenStubId stub_id = StubGenStubId::dtan_id; + StubId stub_id = StubId::stubgen_dtan_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp index 52ce2731b1f..372c4898f37 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64_tanh.cpp @@ -302,7 +302,7 @@ ATTRIBUTE_ALIGNED(16) static const juint _T2_neg_f[] = #define __ _masm-> address StubGenerator::generate_libmTanh() { - StubGenStubId stub_id = StubGenStubId::dtanh_id; + StubId stub_id = StubId::stubgen_dtanh_id; StubCodeMark mark(this, stub_id); address start = __ pc(); diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp index 60a873ab31f..2c5fb717c40 100644 --- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp +++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp @@ -108,15 +108,15 @@ void SharedRuntime::generate_deopt_blob() { _deopt_blob = generate_empty_deopt_blob(); } -SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address call_ptr) { +SafepointBlob* SharedRuntime::generate_handler_blob(StubId id, address call_ptr) { return generate_empty_safepoint_blob(); } -RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address destination) { +RuntimeStub* SharedRuntime::generate_resolve_blob(StubId id, address destination) { return generate_empty_runtime_stub(); } -RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address runtime_entry) { +RuntimeStub* SharedRuntime::generate_throw_exception(StubId id, address runtime_entry) { return generate_empty_runtime_stub(); } diff --git a/src/hotspot/cpu/zero/stubGenerator_zero.cpp b/src/hotspot/cpu/zero/stubGenerator_zero.cpp index 65fe33dcba4..bc273804266 100644 --- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp +++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp @@ -215,31 +215,31 @@ class StubGenerator: public StubCodeGenerator { } public: - StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) { + StubGenerator(CodeBuffer* code, BlobId blob_id) : StubCodeGenerator(code, blob_id) { switch(blob_id) { - case preuniverse_id: + case BlobId::stubgen_preuniverse_id: generate_preuniverse_stubs(); break; - case initial_id: + case BlobId::stubgen_initial_id: generate_initial_stubs(); break; - case continuation_id: + case BlobId::stubgen_continuation_id: generate_continuation_stubs(); break; - case compiler_id: + case BlobId::stubgen_compiler_id: // do nothing break; - case final_id: + case BlobId::stubgen_final_id: generate_final_stubs(); break; default: - fatal("unexpected blob id: %d", blob_id); + fatal("unexpected blob id: %s", StubInfo::name(blob_id)); break; }; } }; -void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) { +void StubGenerator_generate(CodeBuffer* code, BlobId blob_id) { StubGenerator g(code, blob_id); } diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp index a885e4c0816..5d1c51bdbbf 100644 --- a/src/hotspot/share/c1/c1_CodeStubs.hpp +++ b/src/hotspot/share/c1/c1_CodeStubs.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -264,10 +264,10 @@ class NewInstanceStub: public CodeStub { LIR_Opr _klass_reg; LIR_Opr _result; CodeEmitInfo* _info; - C1StubId _stub_id; + StubId _stub_id; public: - NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id); + NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, StubId stub_id); virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } virtual void visit(LIR_OpVisitState* visitor) { @@ -515,11 +515,11 @@ public: class SimpleExceptionStub: public CodeStub { private: LIR_Opr _obj; - C1StubId _stub; + StubId _stub; CodeEmitInfo* _info; public: - SimpleExceptionStub(C1StubId stub, LIR_Opr obj, CodeEmitInfo* info): + SimpleExceptionStub(StubId stub, LIR_Opr obj, CodeEmitInfo* info): _obj(obj), _stub(stub), _info(info) { FrameMap* f = Compilation::current()->frame_map(); f->update_reserved_argument_area_size(2 * BytesPerWord); @@ -546,7 +546,7 @@ class SimpleExceptionStub: public CodeStub { class ArrayStoreExceptionStub: public SimpleExceptionStub { public: - ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(C1StubId::throw_array_store_exception_id, obj, info) {} + ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(StubId::c1_throw_array_store_exception_id, obj, info) {} #ifndef PRODUCT virtual void print_name(outputStream* out) const { out->print("ArrayStoreExceptionStub"); } #endif // PRODUCT diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index d0d1670f3b4..b3ce7242b4f 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -656,7 +656,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr if (UseFastNewInstance && klass->is_loaded() && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { - C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id; + StubId stub_id = klass->is_initialized() ? StubId::c1_fast_new_instance_id : StubId::c1_fast_new_instance_init_check_id; CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); @@ -667,7 +667,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); } else { - CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id); + CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, StubId::c1_new_instance_id); __ branch(lir_cond_always, slow_path); __ branch_destination(slow_path->continuation()); } @@ -1394,7 +1394,7 @@ void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { args->append(receiver.result()); CodeEmitInfo* info = state_for(x, x->state()); call_runtime(&signature, args, - CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::register_finalizer_id)), + CAST_FROM_FN_PTR(address, Runtime1::entry_for(StubId::c1_register_finalizer_id)), voidType, info); set_no_result(x); diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index 57d22a38324..b230351c5e1 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -62,6 +62,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stackWatermarkSet.hpp" +#include "runtime/stubInfo.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframe.inline.hpp" #include "runtime/vframeArray.hpp" @@ -103,14 +104,7 @@ void StubAssembler::set_num_rt_args(int args) { } // Implementation of Runtime1 - -CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS]; - -#define C1_BLOB_NAME_DEFINE(name) "C1 Runtime " # name "_blob", -const char *Runtime1::_blob_names[] = { - C1_STUBS_DO(C1_BLOB_NAME_DEFINE) -}; -#undef C1_STUB_NAME_DEFINE +CodeBlob* Runtime1::_blobs[StubInfo::C1_STUB_COUNT]; #ifndef PRODUCT // statistics @@ -188,19 +182,21 @@ static void deopt_caller(JavaThread* current) { } } -class C1StubIdStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { +class C1StubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure { private: - C1StubId _id; + StubId _id; public: - C1StubIdStubAssemblerCodeGenClosure(C1StubId id) : _id(id) {} + C1StubAssemblerCodeGenClosure(StubId id) : _id(id) { + assert(StubInfo::is_c1(_id), "not a c1 stub id %s", StubInfo::name(_id)); + } virtual OopMapSet* generate_code(StubAssembler* sasm) { return Runtime1::generate_code_for(_id, sasm); } }; -CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { - if ((int)id >= 0) { - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C1Blob, (uint)id, name, 0, nullptr); +CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) { + if (id != StubId::NO_STUBID) { + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C1Blob, StubInfo::blob(id)); if (blob != nullptr) { return blob; } @@ -240,61 +236,63 @@ CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const ch must_gc_arguments, false /* alloc_fail_is_fatal */ ); if (blob != nullptr && (int)id >= 0) { - AOTCodeCache::store_code_blob(*blob, AOTCodeEntry::C1Blob, (uint)id, name, 0, nullptr); + AOTCodeCache::store_code_blob(*blob, AOTCodeEntry::C1Blob, StubInfo::blob(id)); } return blob; } -bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, C1StubId id) { - assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); +bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubId id) { + assert(StubInfo::is_c1(id), "not a c1 stub %s", StubInfo::name(id)); bool expect_oop_map = true; #ifdef ASSERT // Make sure that stubs that need oopmaps have them switch (id) { // These stubs don't need to have an oopmap - case C1StubId::dtrace_object_alloc_id: - case C1StubId::slow_subtype_check_id: - case C1StubId::fpu2long_stub_id: - case C1StubId::unwind_exception_id: - case C1StubId::counter_overflow_id: - case C1StubId::is_instance_of_id: + case StubId::c1_dtrace_object_alloc_id: + case StubId::c1_slow_subtype_check_id: + case StubId::c1_fpu2long_stub_id: + case StubId::c1_unwind_exception_id: + case StubId::c1_counter_overflow_id: + case StubId::c1_is_instance_of_id: expect_oop_map = false; break; default: break; } #endif - C1StubIdStubAssemblerCodeGenClosure cl(id); + C1StubAssemblerCodeGenClosure cl(id); CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl); // install blob - _blobs[(int)id] = blob; + int idx = StubInfo::c1_offset(id); // will assert on non-c1 id + _blobs[idx] = blob; return blob != nullptr; } bool Runtime1::initialize(BufferBlob* blob) { // platform-dependent initialization initialize_pd(); - // generate stubs - int limit = (int)C1StubId::NUM_STUBIDS; - for (int id = 0; id <= (int)C1StubId::forward_exception_id; id++) { - if (!generate_blob_for(blob, (C1StubId) id)) { + // iterate blobs in C1 group and generate a single stub per blob + StubId id = StubInfo::stub_base(StubGroup::C1); + StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1)); + for (; id != limit; id = StubInfo::next(id)) { + if (!generate_blob_for(blob, id)) { return false; } - } - AOTCodeCache::init_early_c1_table(); - for (int id = (int)C1StubId::forward_exception_id+1; id < limit; id++) { - if (!generate_blob_for(blob, (C1StubId) id)) { - return false; + if (id == StubId::c1_forward_exception_id) { + // publish early c1 stubs at this point so later stubs can refer to them + AOTCodeCache::init_early_c1_table(); } } // printing #ifndef PRODUCT if (PrintSimpleStubs) { ResourceMark rm; - for (int id = 0; id < limit; id++) { - _blobs[id]->print(); - if (_blobs[id]->oop_maps() != nullptr) { - _blobs[id]->oop_maps()->print(); + id = StubInfo::stub_base(StubGroup::C1); + for (; id != limit; id = StubInfo::next(id)) { + CodeBlob* blob = blob_for(id); + blob->print(); + if (blob->oop_maps() != nullptr) { + blob->oop_maps()->print(); } } } @@ -303,22 +301,22 @@ bool Runtime1::initialize(BufferBlob* blob) { return bs->generate_c1_runtime_stubs(blob); } -CodeBlob* Runtime1::blob_for(C1StubId id) { - assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); - return _blobs[(int)id]; +CodeBlob* Runtime1::blob_for(StubId id) { + int idx = StubInfo::c1_offset(id); // will assert on non-c1 id + return _blobs[idx]; } -const char* Runtime1::name_for(C1StubId id) { - assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id"); - return _blob_names[(int)id]; +const char* Runtime1::name_for(StubId id) { + return StubInfo::name(id); } const char* Runtime1::name_for_address(address entry) { - int limit = (int)C1StubId::NUM_STUBIDS; - for (int i = 0; i < limit; i++) { - C1StubId id = (C1StubId)i; - if (entry == entry_for(id)) return name_for(id); + // iterate stubs starting from C1 group base + StubId id = StubInfo::stub_base(StubGroup::C1); + StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1)); + for (; id != limit; id = StubInfo::next(id)) { + if (entry == entry_for(id)) return StubInfo::name(id); } #define FUNCTION_CASE(a, f) \ @@ -450,7 +448,7 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int JRT_END -JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id)) +JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id)) tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id); JRT_END @@ -550,8 +548,8 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c // This function is called when we are about to throw an exception. Therefore, // we have to poll the stack watermark barrier to make sure that not yet safe // stack frames are made safe before returning into them. - if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) { - // The C1StubId::handle_exception_from_callee_id handler is invoked after the + if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) { + // The StubId::c1_handle_exception_from_callee_id handler is invoked after the // frame has been unwound. It instead builds its own stub frame, to call the // runtime. But the throwing frame has already been unwound here. StackWatermarkSet::after_unwind(current); @@ -947,7 +945,7 @@ static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TR // Therefore, if there is any chance of a race condition, we try to // patch only naturally aligned words, as single, full-word writes. -JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) +JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, StubId stub_id )) #ifndef PRODUCT if (PrintC1Statistics) { _patch_code_slowcase_cnt++; @@ -984,9 +982,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) Handle mirror(current, nullptr); // oop needed by load_mirror_patching code Handle appendix(current, nullptr); // oop needed by appendix_patching code bool load_klass_or_mirror_patch_id = - (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id); + (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id); - if (stub_id == C1StubId::access_field_patching_id) { + if (stub_id == StubId::c1_access_field_patching_id) { Bytecode_field field_access(caller_method, bci); fieldDescriptor result; // initialize class if needed @@ -1069,7 +1067,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id"); } load_klass = k; - } else if (stub_id == C1StubId::load_appendix_patching_id) { + } else if (stub_id == StubId::c1_load_appendix_patching_id) { Bytecode_invoke bytecode(caller_method, bci); Bytecodes::Code bc = bytecode.invoke_code(); @@ -1153,7 +1151,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) if (TracePatching) { ttyLocker ttyl; tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT " (%s)", Bytecodes::name(code), bci, - p2i(instr_pc), (stub_id == C1StubId::access_field_patching_id) ? "field" : "klass"); + p2i(instr_pc), (stub_id == StubId::c1_access_field_patching_id) ? "field" : "klass"); nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc()); assert(caller_code != nullptr, "nmethod not found"); @@ -1169,7 +1167,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) } // depending on the code below, do_patch says whether to copy the patch body back into the nmethod bool do_patch = true; - if (stub_id == C1StubId::access_field_patching_id) { + if (stub_id == StubId::c1_access_field_patching_id) { // The offset may not be correct if the class was not loaded at code generation time. // Set it now. NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff); @@ -1195,7 +1193,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) assert(n_copy->data() == 0 || n_copy->data() == (intptr_t)Universe::non_oop_word(), "illegal init value"); - if (stub_id == C1StubId::load_klass_patching_id) { + if (stub_id == StubId::c1_load_klass_patching_id) { assert(load_klass != nullptr, "klass not set"); n_copy->set_data((intx) (load_klass)); } else { @@ -1207,7 +1205,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) Disassembler::decode(copy_buff, copy_buff + *byte_count, tty); } } - } else if (stub_id == C1StubId::load_appendix_patching_id) { + } else if (stub_id == StubId::c1_load_appendix_patching_id) { NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff); assert(n_copy->data() == 0 || n_copy->data() == (intptr_t)Universe::non_oop_word(), @@ -1226,7 +1224,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) // first replace the tail, then the call #ifdef ARM if((load_klass_or_mirror_patch_id || - stub_id == C1StubId::load_appendix_patching_id) && + stub_id == StubId::c1_load_appendix_patching_id) && nativeMovConstReg_at(copy_buff)->is_pc_relative()) { nmethod* nm = CodeCache::find_nmethod(instr_pc); address addr = nullptr; @@ -1234,13 +1232,13 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) RelocIterator mds(nm, copy_buff, copy_buff + 1); while (mds.next()) { if (mds.type() == relocInfo::oop_type) { - assert(stub_id == C1StubId::load_mirror_patching_id || - stub_id == C1StubId::load_appendix_patching_id, "wrong stub id"); + assert(stub_id == StubId::c1_load_mirror_patching_id || + stub_id == StubId::c1_load_appendix_patching_id, "wrong stub id"); oop_Relocation* r = mds.oop_reloc(); addr = (address)r->oop_addr(); break; } else if (mds.type() == relocInfo::metadata_type) { - assert(stub_id == C1StubId::load_klass_patching_id, "wrong stub id"); + assert(stub_id == StubId::c1_load_klass_patching_id, "wrong stub id"); metadata_Relocation* r = mds.metadata_reloc(); addr = (address)r->metadata_addr(); break; @@ -1263,9 +1261,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id )) NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff); if (load_klass_or_mirror_patch_id || - stub_id == C1StubId::load_appendix_patching_id) { + stub_id == StubId::c1_load_appendix_patching_id) { relocInfo::relocType rtype = - (stub_id == C1StubId::load_klass_patching_id) ? + (stub_id == StubId::c1_load_klass_patching_id) ? relocInfo::metadata_type : relocInfo::oop_type; // update relocInfo to metadata @@ -1299,9 +1297,9 @@ JRT_END #else // DEOPTIMIZE_WHEN_PATCHING -static bool is_patching_needed(JavaThread* current, C1StubId stub_id) { - if (stub_id == C1StubId::load_klass_patching_id || - stub_id == C1StubId::load_mirror_patching_id) { +static bool is_patching_needed(JavaThread* current, StubId stub_id) { + if (stub_id == StubId::c1_load_klass_patching_id || + stub_id == StubId::c1_load_mirror_patching_id) { // last java frame on stack vframeStream vfst(current, true); assert(!vfst.at_end(), "Java frame must exist"); @@ -1330,7 +1328,7 @@ static bool is_patching_needed(JavaThread* current, C1StubId stub_id) { return true; } -void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) { +void Runtime1::patch_code(JavaThread* current, StubId stub_id) { #ifndef PRODUCT if (PrintC1Statistics) { _patch_code_slowcase_cnt++; @@ -1385,7 +1383,7 @@ int Runtime1::move_klass_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, C1StubId::load_klass_patching_id); + patch_code(current, StubId::c1_load_klass_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1402,7 +1400,7 @@ int Runtime1::move_mirror_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, C1StubId::load_mirror_patching_id); + patch_code(current, StubId::c1_load_mirror_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1419,7 +1417,7 @@ int Runtime1::move_appendix_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, C1StubId::load_appendix_patching_id); + patch_code(current, StubId::c1_load_appendix_patching_id); } // Back in JAVA, use no oops DON'T safepoint @@ -1446,7 +1444,7 @@ int Runtime1::access_field_patching(JavaThread* current) { { // Enter VM mode ResetNoHandleMark rnhm; - patch_code(current, C1StubId::access_field_patching_id); + patch_code(current, StubId::c1_access_field_patching_id); } // Back in JAVA, use no oops DON'T safepoint diff --git a/src/hotspot/share/c1/c1_Runtime1.hpp b/src/hotspot/share/c1/c1_Runtime1.hpp index 9912b6b515e..6fa17e53c19 100644 --- a/src/hotspot/share/c1/c1_Runtime1.hpp +++ b/src/hotspot/share/c1/c1_Runtime1.hpp @@ -30,6 +30,7 @@ #include "interpreter/interpreter.hpp" #include "memory/allStatic.hpp" #include "runtime/stubDeclarations.hpp" +#include "runtime/stubInfo.hpp" class StubAssembler; @@ -42,16 +43,6 @@ class StubAssemblerCodeGenClosure: public Closure { virtual OopMapSet* generate_code(StubAssembler* sasm) = 0; }; -// define C1StubId enum tags: unwind_exception_id etc - -#define C1_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name), -enum class C1StubId :int { - NO_STUBID = -1, - C1_STUBS_DO(C1_STUB_ID_ENUM_DECLARE) - NUM_STUBIDS -}; -#undef C1_STUB_ID_ENUM_DECLARE - class Runtime1: public AllStatic { friend class ArrayCopyStub; friend class AOTCodeAddressTable; @@ -80,17 +71,16 @@ public: #endif private: - static CodeBlob* _blobs[(int)C1StubId::NUM_STUBIDS]; - static const char* _blob_names[]; + static CodeBlob* _blobs[(int)StubInfo::C1_STUB_COUNT]; // stub generation public: - static CodeBlob* generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl); - static bool generate_blob_for(BufferBlob* blob, C1StubId id); - static OopMapSet* generate_code_for(C1StubId id, StubAssembler* sasm); + static CodeBlob* generate_blob(BufferBlob* buffer_blob, StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl); + static bool generate_blob_for(BufferBlob* blob, StubId id); + static OopMapSet* generate_code_for(StubId id, StubAssembler* sasm); private: static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument); - static OopMapSet* generate_handle_exception(C1StubId id, StubAssembler* sasm); + static OopMapSet* generate_handle_exception(StubId id, StubAssembler* sasm); static void generate_unwind_exception(StubAssembler *sasm); static OopMapSet* generate_patching(StubAssembler* sasm, address target); @@ -105,7 +95,7 @@ public: static address counter_overflow(JavaThread* current, int bci, Method* method); - static void unimplemented_entry(JavaThread* current, C1StubId id); + static void unimplemented_entry(JavaThread* current, StubId id); static address exception_handler_for_pc(JavaThread* current); @@ -127,7 +117,7 @@ public: static int move_mirror_patching(JavaThread* current); static int move_appendix_patching(JavaThread* current); - static void patch_code(JavaThread* current, C1StubId stub_id); + static void patch_code(JavaThread* current, StubId stub_id); public: // initialization @@ -138,9 +128,9 @@ public: static uint runtime_blob_current_thread_offset(frame f); // stubs - static CodeBlob* blob_for (C1StubId id); - static address entry_for(C1StubId id) { return blob_for(id)->code_begin(); } - static const char* name_for (C1StubId id); + static CodeBlob* blob_for (StubId id); + static address entry_for(StubId id) { return blob_for(id)->code_begin(); } + static const char* name_for (StubId id); static const char* name_for_address(address entry); // platform might add runtime names. diff --git a/src/hotspot/share/code/aotCodeCache.cpp b/src/hotspot/share/code/aotCodeCache.cpp index 08cd05a64ed..4c080b90acc 100644 --- a/src/hotspot/share/code/aotCodeCache.cpp +++ b/src/hotspot/share/code/aotCodeCache.cpp @@ -42,6 +42,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/sharedRuntime.hpp" +#include "runtime/stubInfo.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/copy.hpp" #ifdef COMPILER1 @@ -147,12 +148,15 @@ static uint32_t encode_id(AOTCodeEntry::Kind kind, int id) { if (kind == AOTCodeEntry::Adapter) { return id; } else if (kind == AOTCodeEntry::SharedBlob) { + assert(StubInfo::is_shared(static_cast(id)), "not a shared blob id %d", id); return id; } else if (kind == AOTCodeEntry::C1Blob) { - return (int)SharedStubId::NUM_STUBIDS + id; + assert(StubInfo::is_c1(static_cast(id)), "not a c1 blob id %d", id); + return id; } else { // kind must be AOTCodeEntry::C2Blob - return (int)SharedStubId::NUM_STUBIDS + COMPILER1_PRESENT((int)C1StubId::NUM_STUBIDS) + id; + assert(StubInfo::is_c2(static_cast(id)), "not a c2 blob id %d", id); + return id; } } @@ -899,6 +903,12 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind return true; } +bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, BlobId id, int entry_offset_count, int* entry_offsets) { + assert(AOTCodeEntry::is_blob(entry_kind), + "wrong entry kind for blob id %s", StubInfo::name(id)); + return store_code_blob(blob, entry_kind, (uint)id, StubInfo::name(id), entry_offset_count, entry_offsets); +} + CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, const char* name, int entry_offset_count, int* entry_offsets) { AOTCodeCache* cache = open_for_use(); if (cache == nullptr) { @@ -926,6 +936,12 @@ CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, uint id, c return blob; } +CodeBlob* AOTCodeCache::load_code_blob(AOTCodeEntry::Kind entry_kind, BlobId id, int entry_offset_count, int* entry_offsets) { + assert(AOTCodeEntry::is_blob(entry_kind), + "wrong entry kind for blob id %s", StubInfo::name(id)); + return load_code_blob(entry_kind, (uint)id, StubInfo::name(id), entry_offset_count, entry_offsets); +} + CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_count, int* entry_offsets) { uint entry_position = _entry->offset(); @@ -1298,6 +1314,7 @@ void AOTCodeAddressTable::init_extrs() { SET_ADDRESS(_extrs, SharedRuntime::resolve_opt_virtual_call_C); SET_ADDRESS(_extrs, SharedRuntime::resolve_virtual_call_C); SET_ADDRESS(_extrs, SharedRuntime::resolve_static_call_C); + SET_ADDRESS(_extrs, SharedRuntime::throw_StackOverflowError); SET_ADDRESS(_extrs, SharedRuntime::throw_delayed_StackOverflowError); SET_ADDRESS(_extrs, SharedRuntime::throw_AbstractMethodError); SET_ADDRESS(_extrs, SharedRuntime::throw_IncompatibleClassChangeError); @@ -1459,8 +1476,10 @@ void AOTCodeAddressTable::init_shared_blobs() { void AOTCodeAddressTable::init_early_c1() { #ifdef COMPILER1 // Runtime1 Blobs - for (int i = 0; i <= (int)C1StubId::forward_exception_id; i++) { - C1StubId id = (C1StubId)i; + StubId id = StubInfo::stub_base(StubGroup::C1); + // include forward_exception in range we publish + StubId limit = StubInfo::next(StubId::c1_forward_exception_id); + for (; id != limit; id = StubInfo::next(id)) { if (Runtime1::blob_for(id) == nullptr) { log_info(aot, codecache, init)("C1 blob %s is missing", Runtime1::name_for(id)); continue; diff --git a/src/hotspot/share/code/aotCodeCache.hpp b/src/hotspot/share/code/aotCodeCache.hpp index fb96a580764..702ce2565df 100644 --- a/src/hotspot/share/code/aotCodeCache.hpp +++ b/src/hotspot/share/code/aotCodeCache.hpp @@ -25,6 +25,8 @@ #ifndef SHARE_CODE_AOTCODECACHE_HPP #define SHARE_CODE_AOTCODECACHE_HPP +#include "runtime/stubInfo.hpp" + /* * AOT Code Cache collects code from Code Cache and corresponding metadata * during application training run. @@ -327,6 +329,7 @@ public: bool write_dbg_strings(CodeBlob& cb); #endif // PRODUCT + // save and restore API for non-enumerable code blobs static bool store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind, uint id, const char* name, @@ -338,6 +341,18 @@ public: int entry_offset_count = 0, int* entry_offsets = nullptr) NOT_CDS_RETURN_(nullptr); + // save and restore API for enumerable code blobs + static bool store_code_blob(CodeBlob& blob, + AOTCodeEntry::Kind entry_kind, + BlobId id, + int entry_offset_count = 0, + int* entry_offsets = nullptr) NOT_CDS_RETURN_(false); + + static CodeBlob* load_code_blob(AOTCodeEntry::Kind kind, + BlobId id, + int entry_offset_count = 0, + int* entry_offsets = nullptr) NOT_CDS_RETURN_(nullptr); + static uint store_entries_cnt() { if (is_on_for_dump()) { return cache()->_store_entries_cnt; diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 118594c9ea1..98cc13b3859 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -457,6 +457,7 @@ class RuntimeStub: public RuntimeBlob { void* operator new(size_t s, unsigned size) throw(); public: + static const int ENTRY_COUNT = 1; // Creation static RuntimeStub* new_runtime_stub( const char* stub_name, @@ -559,6 +560,7 @@ class DeoptimizationBlob: public SingletonBlob { ); public: + static const int ENTRY_COUNT = 4 JVMTI_ONLY(+ 2); // Creation static DeoptimizationBlob* create( CodeBuffer* cb, @@ -689,6 +691,7 @@ class SafepointBlob: public SingletonBlob { ); public: + static const int ENTRY_COUNT = 1; // Creation static SafepointBlob* create( CodeBuffer* cb, diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp index c4791311e9b..425be474602 100644 --- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp +++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp @@ -218,9 +218,9 @@ class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { bool G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { C1G1PreBarrierCodeGenClosure pre_code_gen_cl; C1G1PostBarrierCodeGenClosure post_code_gen_cl; - _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow", + _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "g1_pre_barrier_slow", false, &pre_code_gen_cl); - _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow", + _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "g1_post_barrier_slow", false, &post_code_gen_cl); return _pre_barrier_c1_runtime_code_blob != nullptr && _post_barrier_c1_runtime_code_blob != nullptr; } diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp index 9f58016a6f1..ad1dca47503 100644 --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp @@ -274,7 +274,7 @@ public: bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl; - _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, + _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "shenandoah_pre_barrier_slow", false, &pre_code_gen_cl); if (_pre_barrier_c1_runtime_code_blob == nullptr) { @@ -282,7 +282,7 @@ bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) } if (ShenandoahLoadRefBarrier) { C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF); - _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, + _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "shenandoah_load_reference_barrier_strong_slow", false, &lrb_strong_code_gen_cl); if (_load_reference_barrier_strong_rt_code_blob == nullptr) { @@ -290,7 +290,7 @@ bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) } C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE); - _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, + _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "shenandoah_load_reference_barrier_strong_native_slow", false, &lrb_strong_native_code_gen_cl); if (_load_reference_barrier_strong_native_rt_code_blob == nullptr) { @@ -298,7 +298,7 @@ bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) } C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF); - _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, + _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "shenandoah_load_reference_barrier_weak_slow", false, &lrb_weak_code_gen_cl); if (_load_reference_barrier_weak_rt_code_blob == nullptr) { @@ -306,7 +306,7 @@ bool ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) } C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE); - _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, + _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, StubId::NO_STUBID, "shenandoah_load_reference_barrier_phantom_slow", false, &lrb_phantom_code_gen_cl); return (_load_reference_barrier_phantom_rt_code_blob != nullptr); diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp index 7bd24955910..8d5d8662ebc 100644 --- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp +++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp @@ -506,7 +506,7 @@ public: static address generate_c1_load_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) { ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); return (code_blob != nullptr) ? code_blob->code_begin() : nullptr; } @@ -526,7 +526,7 @@ public: static address generate_c1_store_runtime_stub(BufferBlob* blob, bool self_healing, const char* name) { ZStoreBarrierRuntimeStubCodeGenClosure cl(self_healing); - CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); + CodeBlob* const code_blob = Runtime1::generate_blob(blob, StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl); return (code_blob != nullptr) ? code_blob->code_begin() : nullptr; } diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index e524249a6cf..edb6ddef4f4 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -636,7 +636,7 @@ Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci, _ilt(nullptr), _stub_function(nullptr), _stub_name(nullptr), - _stub_id(-1), + _stub_id(StubId::NO_STUBID), _stub_entry_point(nullptr), _max_node_limit(MaxNodeLimit), _post_loop_opts_phase(false), @@ -898,7 +898,7 @@ Compile::Compile(ciEnv* ci_env, TypeFunc_generator generator, address stub_function, const char* stub_name, - int stub_id, + StubId stub_id, int is_fancy_jump, bool pass_tls, bool return_pc, @@ -964,7 +964,8 @@ Compile::Compile(ciEnv* ci_env, // try to reuse an existing stub { - CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, _stub_id, stub_name); + BlobId blob_id = StubInfo::blob(_stub_id); + CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C2Blob, blob_id); if (blob != nullptr) { RuntimeStub* rs = blob->as_runtime_stub(); _stub_entry_point = rs->entry_point(); diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp index 69244a6d178..2cada9c04c9 100644 --- a/src/hotspot/share/opto/compile.hpp +++ b/src/hotspot/share/opto/compile.hpp @@ -308,7 +308,7 @@ class Compile : public Phase { InlineTree* _ilt; // Ditto (temporary). address _stub_function; // VM entry for stub being compiled, or null const char* _stub_name; // Name of stub or adapter being compiled, or null - int _stub_id; // unique id for stub or -1 + StubId _stub_id; // unique id for stub or NO_STUBID address _stub_entry_point; // Compile code entry for generated stub, or null // Control of this compilation. @@ -572,7 +572,7 @@ public: InlineTree* ilt() const { return _ilt; } address stub_function() const { return _stub_function; } const char* stub_name() const { return _stub_name; } - int stub_id() const { return _stub_id; } + StubId stub_id() const { return _stub_id; } address stub_entry_point() const { return _stub_entry_point; } void set_stub_entry_point(address z) { _stub_entry_point = z; } @@ -1145,7 +1145,7 @@ public: // convention. Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), address stub_function, const char *stub_name, - int stub_id, int is_fancy_jump, bool pass_tls, + StubId stub_id, int is_fancy_jump, bool pass_tls, bool return_pc, DirectiveSet* directive); ~Compile(); diff --git a/src/hotspot/share/opto/output.cpp b/src/hotspot/share/opto/output.cpp index 5cf99891316..124b00a6549 100644 --- a/src/hotspot/share/opto/output.cpp +++ b/src/hotspot/share/opto/output.cpp @@ -3489,7 +3489,8 @@ void PhaseOutput::install_stub(const char* stub_name) { } else { assert(rs->is_runtime_stub(), "sanity check"); C->set_stub_entry_point(rs->entry_point()); - AOTCodeCache::store_code_blob(*rs, AOTCodeEntry::C2Blob, C->stub_id(), stub_name); + BlobId blob_id = StubInfo::blob(C->stub_id()); + AOTCodeCache::store_code_blob(*rs, AOTCodeEntry::C2Blob, blob_id); } } } diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 89b698bfc3f..3cee7653455 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -88,7 +88,7 @@ #define C2_BLOB_FIELD_DEFINE(name, type) \ - type OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; + type* OptoRuntime:: BLOB_FIELD_NAME(name) = nullptr; #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java #define C2_STUB_FIELD_DEFINE(name, f, t, r) \ address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr; @@ -99,16 +99,6 @@ C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFI #undef C2_STUB_FIELD_DEFINE #undef C2_JVMTI_STUB_FIELD_DEFINE -#define C2_BLOB_NAME_DEFINE(name, type) "C2 Runtime " # name "_blob", -#define C2_STUB_NAME_DEFINE(name, f, t, r) "C2 Runtime " # name, -#define C2_JVMTI_STUB_NAME_DEFINE(name) "C2 Runtime " # name, -const char* OptoRuntime::_stub_names[] = { - C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE) -}; -#undef C2_BLOB_NAME_DEFINE -#undef C2_STUB_NAME_DEFINE -#undef C2_JVMTI_STUB_NAME_DEFINE - // This should be called in an assertion at the start of OptoRuntime routines // which are entered from compiled code (all of them) #ifdef ASSERT @@ -139,8 +129,8 @@ static bool check_compiled_frame(JavaThread* thread) { #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java #define C2_STUB_TYPEFUNC(name) name ## _Type #define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C) -#define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id) -#define C2_STUB_ID(name) OptoStubId::name ## _id +#define C2_STUB_ID(name) StubId:: JOIN3(c2, name, id) +#define C2_STUB_NAME(name) stub_name(C2_STUB_ID(name)) // Almost all the C functions targeted from the generated stubs are // implemented locally to OptoRuntime with names that can be generated @@ -157,7 +147,7 @@ static bool check_compiled_frame(JavaThread* thread) { C2_STUB_TYPEFUNC(name), \ C2_STUB_C_FUNC(name), \ C2_STUB_NAME(name), \ - (int)C2_STUB_ID(name), \ + C2_STUB_ID(name), \ fancy_jump, \ pass_tls, \ pass_retpc); \ @@ -171,7 +161,7 @@ static bool check_compiled_frame(JavaThread* thread) { notify_jvmti_vthread_Type, \ C2_JVMTI_STUB_C_FUNC(name), \ C2_STUB_NAME(name), \ - (int)C2_STUB_ID(name), \ + C2_STUB_ID(name), \ 0, \ true, \ false); \ @@ -279,7 +269,7 @@ const TypeFunc* OptoRuntime::_dtrace_object_alloc_Type = nullptr; // Helper method to do generation of RunTimeStub's address OptoRuntime::generate_stub(ciEnv* env, TypeFunc_generator gen, address C_function, - const char *name, int stub_id, + const char *name, StubId stub_id, int is_fancy_jump, bool pass_tls, bool return_pc) { diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp index 761527a694b..40e436c0d5c 100644 --- a/src/hotspot/share/opto/runtime.hpp +++ b/src/hotspot/share/opto/runtime.hpp @@ -29,6 +29,7 @@ #include "opto/machnode.hpp" #include "opto/optoreg.hpp" #include "runtime/stubDeclarations.hpp" +#include "runtime/stubInfo.hpp" #include "runtime/vframe.hpp" //------------------------------OptoRuntime------------------------------------ @@ -97,20 +98,6 @@ private: typedef const TypeFunc*(*TypeFunc_generator)(); -// define OptoStubId enum tags: uncommon_trap_id etc - -#define C2_BLOB_ID_ENUM_DECLARE(name, type) STUB_ID_NAME(name), -#define C2_STUB_ID_ENUM_DECLARE(name, f, t, r) STUB_ID_NAME(name), -#define C2_JVMTI_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name), -enum class OptoStubId :int { - NO_STUBID = -1, - C2_STUBS_DO(C2_BLOB_ID_ENUM_DECLARE, C2_STUB_ID_ENUM_DECLARE, C2_JVMTI_STUB_ID_ENUM_DECLARE) - NUM_STUBIDS -}; -#undef C2_BLOB_ID_ENUM_DECLARE -#undef C2_STUB_ID_ENUM_DECLARE -#undef C2_JVMTI_STUB_ID_ENUM_DECLARE - class OptoRuntime : public AllStatic { friend class Matcher; // allow access to stub names friend class AOTCodeAddressTable; @@ -118,7 +105,7 @@ class OptoRuntime : public AllStatic { private: // declare opto stub address/blob holder static fields #define C2_BLOB_FIELD_DECLARE(name, type) \ - static type BLOB_FIELD_NAME(name); + static type* BLOB_FIELD_NAME(name); #define C2_STUB_FIELD_NAME(name) _ ## name ## _Java #define C2_STUB_FIELD_DECLARE(name, f, t, r) \ static address C2_STUB_FIELD_NAME(name) ; @@ -213,11 +200,8 @@ class OptoRuntime : public AllStatic { static const TypeFunc* _dtrace_method_entry_exit_Type; static const TypeFunc* _dtrace_object_alloc_Type; - // Stub names indexed by sharedStubId - static const char *_stub_names[]; - // define stubs - static address generate_stub(ciEnv* ci_env, TypeFunc_generator gen, address C_function, const char* name, int stub_id, int is_fancy_jump, bool pass_tls, bool return_pc); + static address generate_stub(ciEnv* ci_env, TypeFunc_generator gen, address C_function, const char* name, StubId stub_id, int is_fancy_jump, bool pass_tls, bool return_pc); // // Implementation of runtime methods @@ -283,9 +267,9 @@ private: static const char* stub_name(address entry); // Returns the name associated with a given stub id - static const char* stub_name(OptoStubId id) { - assert(id > OptoStubId::NO_STUBID && id < OptoStubId::NUM_STUBIDS, "stub id out of range"); - return _stub_names[(int)id]; + static const char* stub_name(StubId id) { + assert(StubInfo::is_c2(id), "not a C2 stub %s", StubInfo::name(id)); + return StubInfo::name(id); } // access to runtime stubs entry points for java code diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index b54afd9f735..73292a7b6d9 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -68,6 +68,7 @@ void compilationPolicy_init(); void codeCache_init(); void VM_Version_init(); void icache_init2(); +void initialize_stub_info(); // must precede all blob/stub generation void preuniverse_stubs_init(); void initial_stubs_init(); @@ -130,6 +131,8 @@ jint init_globals() { codeCache_init(); VM_Version_init(); // depends on codeCache_init for emitting code icache_init2(); // depends on VM_Version for choosing the mechanism + // ensure we know about all blobs, stubs and entries + initialize_stub_info(); // initialize stubs needed before we can init the universe preuniverse_stubs_init(); jint status = universe_init(); // dependent on codeCache_init and preuniverse_stubs_init @@ -146,10 +149,10 @@ jint init_globals() { AOTCodeCache::init2(); // depends on universe_init, must be before initial_stubs_init AsyncLogWriter::initialize(); - initial_stubs_init(); // initial stub routines + initial_stubs_init(); // stubgen initial stub routines // stack overflow exception blob is referenced by the interpreter + AOTCodeCache::init_early_stubs_table(); // need this after stubgen initial stubs and before shared runtime initial stubs SharedRuntime::generate_initial_stubs(); - AOTCodeCache::init_early_stubs_table(); // need this after initial_stubs gc_barrier_stubs_init(); // depends on universe_init, must be before interpreter_init continuations_init(); // must precede continuation stub generation continuation_stubs_init(); // depends on continuations_init diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 9664084a2f2..554b94b56c6 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -95,59 +95,62 @@ #define SHARED_STUB_FIELD_DEFINE(name, type) \ - type SharedRuntime::BLOB_FIELD_NAME(name); + type* SharedRuntime::BLOB_FIELD_NAME(name); SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE) #undef SHARED_STUB_FIELD_DEFINE nmethod* SharedRuntime::_cont_doYield_stub; +#if 0 +// TODO tweak global stub name generation to match this #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob", const char *SharedRuntime::_stub_names[] = { SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE) }; +#endif //----------------------------generate_stubs----------------------------------- void SharedRuntime::generate_initial_stubs() { // Build this early so it's available for the interpreter. _throw_StackOverflowError_blob = - generate_throw_exception(SharedStubId::throw_StackOverflowError_id, + generate_throw_exception(StubId::shared_throw_StackOverflowError_id, CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); } void SharedRuntime::generate_stubs() { _wrong_method_blob = - generate_resolve_blob(SharedStubId::wrong_method_id, + generate_resolve_blob(StubId::shared_wrong_method_id, CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method)); _wrong_method_abstract_blob = - generate_resolve_blob(SharedStubId::wrong_method_abstract_id, + generate_resolve_blob(StubId::shared_wrong_method_abstract_id, CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract)); _ic_miss_blob = - generate_resolve_blob(SharedStubId::ic_miss_id, + generate_resolve_blob(StubId::shared_ic_miss_id, CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss)); _resolve_opt_virtual_call_blob = - generate_resolve_blob(SharedStubId::resolve_opt_virtual_call_id, + generate_resolve_blob(StubId::shared_resolve_opt_virtual_call_id, CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C)); _resolve_virtual_call_blob = - generate_resolve_blob(SharedStubId::resolve_virtual_call_id, + generate_resolve_blob(StubId::shared_resolve_virtual_call_id, CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C)); _resolve_static_call_blob = - generate_resolve_blob(SharedStubId::resolve_static_call_id, + generate_resolve_blob(StubId::shared_resolve_static_call_id, CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C)); _throw_delayed_StackOverflowError_blob = - generate_throw_exception(SharedStubId::throw_delayed_StackOverflowError_id, + generate_throw_exception(StubId::shared_throw_delayed_StackOverflowError_id, CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); _throw_AbstractMethodError_blob = - generate_throw_exception(SharedStubId::throw_AbstractMethodError_id, + generate_throw_exception(StubId::shared_throw_AbstractMethodError_id, CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError)); _throw_IncompatibleClassChangeError_blob = - generate_throw_exception(SharedStubId::throw_IncompatibleClassChangeError_id, + generate_throw_exception(StubId::shared_throw_IncompatibleClassChangeError_id, CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError)); _throw_NullPointerException_at_call_blob = - generate_throw_exception(SharedStubId::throw_NullPointerException_at_call_id, + generate_throw_exception(StubId::shared_throw_NullPointerException_at_call_id, CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call)); #if COMPILER2_OR_JVMCI @@ -155,15 +158,15 @@ void SharedRuntime::generate_stubs() { bool support_wide = is_wide_vector(MaxVectorSize); if (support_wide) { _polling_page_vectors_safepoint_handler_blob = - generate_handler_blob(SharedStubId::polling_page_vectors_safepoint_handler_id, + generate_handler_blob(StubId::shared_polling_page_vectors_safepoint_handler_id, CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception)); } #endif // COMPILER2_OR_JVMCI _polling_page_safepoint_handler_blob = - generate_handler_blob(SharedStubId::polling_page_safepoint_handler_id, + generate_handler_blob(StubId::shared_polling_page_safepoint_handler_id, CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception)); _polling_page_return_handler_blob = - generate_handler_blob(SharedStubId::polling_page_return_handler_id, + generate_handler_blob(StubId::shared_polling_page_return_handler_id, CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception)); generate_deopt_blob(); diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index be391c3b0d5..41e4fc1fb3f 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -32,7 +32,7 @@ #include "memory/allStatic.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/resourceArea.hpp" -#include "runtime/stubDeclarations.hpp" +#include "runtime/stubInfo.hpp" #include "utilities/macros.hpp" class AdapterHandlerEntry; @@ -45,44 +45,34 @@ class vframeStream; // Java exceptions), locking/unlocking mechanisms, statistical // information, etc. -// define SharedStubId enum tags: wrong_method_id, etc - -#define SHARED_STUB_ID_ENUM_DECLARE(name, type) STUB_ID_NAME(name), -enum class SharedStubId :int { - NO_STUBID = -1, - SHARED_STUBS_DO(SHARED_STUB_ID_ENUM_DECLARE) - NUM_STUBIDS -}; -#undef SHARED_STUB_ID_ENUM_DECLARE - class SharedRuntime: AllStatic { private: // Declare shared stub fields #define SHARED_STUB_FIELD_DECLARE(name, type) \ - static type BLOB_FIELD_NAME(name); + static type* BLOB_FIELD_NAME(name); SHARED_STUBS_DO(SHARED_STUB_FIELD_DECLARE) #undef SHARED_STUB_FIELD_DECLARE #ifdef ASSERT - static bool is_resolve_id(SharedStubId id) { - return (id == SharedStubId::wrong_method_id || - id == SharedStubId::wrong_method_abstract_id || - id == SharedStubId::ic_miss_id || - id == SharedStubId::resolve_opt_virtual_call_id || - id == SharedStubId::resolve_virtual_call_id || - id == SharedStubId::resolve_static_call_id); + static bool is_resolve_id(StubId id) { + return (id == StubId::shared_wrong_method_id || + id == StubId::shared_wrong_method_abstract_id || + id == StubId::shared_ic_miss_id || + id == StubId::shared_resolve_opt_virtual_call_id || + id == StubId::shared_resolve_virtual_call_id || + id == StubId::shared_resolve_static_call_id); } - static bool is_polling_page_id(SharedStubId id) { - return (id == SharedStubId::polling_page_vectors_safepoint_handler_id || - id == SharedStubId::polling_page_safepoint_handler_id || - id == SharedStubId::polling_page_return_handler_id); + static bool is_polling_page_id(StubId id) { + return (id == StubId::shared_polling_page_vectors_safepoint_handler_id || + id == StubId::shared_polling_page_safepoint_handler_id || + id == StubId::shared_polling_page_return_handler_id); } - static bool is_throw_id(SharedStubId id) { - return (id == SharedStubId::throw_AbstractMethodError_id || - id == SharedStubId::throw_IncompatibleClassChangeError_id || - id == SharedStubId::throw_NullPointerException_at_call_id || - id == SharedStubId::throw_StackOverflowError_id || - id == SharedStubId::throw_delayed_StackOverflowError_id); + static bool is_throw_id(StubId id) { + return (id == StubId::shared_throw_AbstractMethodError_id || + id == StubId::shared_throw_IncompatibleClassChangeError_id || + id == StubId::shared_throw_NullPointerException_at_call_id || + id == StubId::shared_throw_StackOverflowError_id || + id == StubId::shared_throw_delayed_StackOverflowError_id); } #endif @@ -92,18 +82,15 @@ class SharedRuntime: AllStatic { // counterpart the continuation do_enter method. static nmethod* _cont_doYield_stub; - // Stub names indexed by SharedStubId - static const char *_stub_names[]; - #ifndef PRODUCT // Counters static int64_t _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) #endif // !PRODUCT private: - static SafepointBlob* generate_handler_blob(SharedStubId id, address call_ptr); - static RuntimeStub* generate_resolve_blob(SharedStubId id, address destination); - static RuntimeStub* generate_throw_exception(SharedStubId id, address runtime_entry); + static SafepointBlob* generate_handler_blob(StubId id, address call_ptr); + static RuntimeStub* generate_resolve_blob(StubId id, address destination); + static RuntimeStub* generate_throw_exception(StubId id, address runtime_entry); public: static void generate_initial_stubs(void); static void generate_stubs(void); @@ -118,9 +105,9 @@ class SharedRuntime: AllStatic { #endif static void init_adapter_library(); - static const char *stub_name(SharedStubId id) { - assert(id > SharedStubId::NO_STUBID && id < SharedStubId::NUM_STUBIDS, "stub id out of range"); - return _stub_names[(int)id]; + static const char *stub_name(StubId id) { + assert(StubInfo::is_shared(id), "not a shared stub %s", StubInfo::name(id)); + return StubInfo::name(id); } // max bytes for each dtrace string parameter diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp index eafd2ad572e..4e5f1635e42 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.cpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp @@ -69,11 +69,13 @@ void StubCodeDesc::print() const { print_on(tty); } StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, bool print_code) { _masm = new MacroAssembler(code); - _blob_id = StubGenBlobId::NO_BLOBID; + _blob_id = BlobId::NO_BLOBID; _print_code = PrintStubCode || print_code; } -StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, StubGenBlobId blob_id, bool print_code) { +StubCodeGenerator::StubCodeGenerator(CodeBuffer* code, BlobId blob_id, bool print_code) { + assert(StubInfo::is_stubgen(blob_id), + "not a stubgen blob %s", StubInfo::name(blob_id)); _masm = new MacroAssembler(code); _blob_id = blob_id; _print_code = PrintStubCode || print_code; @@ -119,7 +121,7 @@ void StubCodeGenerator::stub_epilog(StubCodeDesc* cdesc) { } #ifdef ASSERT -void StubCodeGenerator::verify_stub(StubGenStubId stub_id) { +void StubCodeGenerator::verify_stub(StubId stub_id) { assert(StubRoutines::stub_to_blob(stub_id) == blob_id(), "wrong blob %s for generation of stub %s", StubRoutines::get_blob_name(blob_id()), StubRoutines::get_stub_name(stub_id)); } #endif @@ -134,7 +136,7 @@ StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, const char* group, const cha _cdesc->set_begin(_cgen->assembler()->pc()); } -StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, StubGenStubId stub_id) : StubCodeMark(cgen, "StubRoutines", StubRoutines::get_stub_name(stub_id)) { +StubCodeMark::StubCodeMark(StubCodeGenerator* cgen, StubId stub_id) : StubCodeMark(cgen, "StubRoutines", StubRoutines::get_stub_name(stub_id)) { #ifdef ASSERT cgen->verify_stub(stub_id); #endif diff --git a/src/hotspot/share/runtime/stubCodeGenerator.hpp b/src/hotspot/share/runtime/stubCodeGenerator.hpp index 41bd7e49b31..7d8944c85ea 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.hpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "asm/assembler.hpp" #include "memory/allocation.hpp" +#include "runtime/stubInfo.hpp" // All the basic framework for stub code generation/debugging/printing. @@ -98,32 +99,29 @@ class StubCodeDesc: public CHeapObj { // forward declare blob and stub id enums -enum StubGenBlobId : int; -enum StubGenStubId : int; - // The base class for all stub-generating code generators. // Provides utility functions. class StubCodeGenerator: public StackObj { private: bool _print_code; - StubGenBlobId _blob_id; + BlobId _blob_id; protected: MacroAssembler* _masm; public: StubCodeGenerator(CodeBuffer* code, bool print_code = false); - StubCodeGenerator(CodeBuffer* code, StubGenBlobId blob_id, bool print_code = false); + StubCodeGenerator(CodeBuffer* code, BlobId blob_id, bool print_code = false); ~StubCodeGenerator(); MacroAssembler* assembler() const { return _masm; } - StubGenBlobId blob_id() { return _blob_id; } + BlobId blob_id() { return _blob_id; } virtual void stub_prolog(StubCodeDesc* cdesc); // called by StubCodeMark constructor virtual void stub_epilog(StubCodeDesc* cdesc); // called by StubCodeMark destructor #ifdef ASSERT - void verify_stub(StubGenStubId stub_id); + void verify_stub(StubId stub_id); #endif }; @@ -139,7 +137,7 @@ class StubCodeMark: public StackObj { public: StubCodeMark(StubCodeGenerator* cgen, const char* group, const char* name); - StubCodeMark(StubCodeGenerator* cgen, StubGenStubId stub_id); + StubCodeMark(StubCodeGenerator* cgen, StubId stub_id); ~StubCodeMark(); }; diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp index 8c01434b73a..bea27d8255a 100644 --- a/src/hotspot/share/runtime/stubDeclarations.hpp +++ b/src/hotspot/share/runtime/stubDeclarations.hpp @@ -26,22 +26,60 @@ #ifndef SHARE_RUNTIME_STUBDECLARATIONS_HPP #define SHARE_RUNTIME_STUBDECLARATIONS_HPP +#include "code/codeBlob.hpp" +#include "oops/klass.hpp" #include "utilities/macros.hpp" -// macros for generating definitions and declarations for shared, c1 -// and opto blob fields and associated stub ids - -// Different shared stubs can have different blob types and may -// include some JFR stubs +// Macros for generating definitions and declarations for shared, c1, +// opto and stubgen blobs and associated stub and entry ids. +// +// The template macros that follow define blobs, stubs and entries in +// each stub group. Invocations of the macros with different macro +// arguments can be used to generate definitions and declarations of +// types, data and methods/functions which support blob, stub and +// entry management. +// +// In particular, they are used to generate 3 global enums that list +// all blobs, stubs and entries across stub groups. They are also used +// to generate local (per-stub group) enums listing every stub in the +// group. The former are provided ot allow systematic management of +// blobs, stubs and entries by generic code. The latter are used by +// code which generates and consumes stubs in a specific group. An API +// is provided to convert between global and local ids where needed +// (see class StubInfo). + +// Shared stub declarations +// +// Every shared stub has a unique associated blob whose type must be +// defined as part of the stub declaration. The blob type determines +// how many entries are associated with the stub, normally 1. A build +// may optionally include some JFR stubs. +// +// n.b resolve, handler and throw stubs must remain grouped +// contiguously and in the same order so that id values can be range +// checked +// +// Alongside the global and local enums, shared declations are used to +// generate the following code elements in class SharedRuntime: +// +// Shared Stub blob fields +// +// Static field declarations/definitons for fields of class +// SharedRuntime are generated to store shared blobs +// +// static * _; +// +// Shared stub field names +// +// Stubs are provided with names in the format "Shared Runtime +// _blob". // -// n.b resolve, handler and throw stubs must remain grouped in the -// same order to allow id values to be range checked #if INCLUDE_JFR // do_blob(name, type) #define SHARED_JFR_STUBS_DO(do_blob) \ - do_blob(jfr_write_checkpoint, RuntimeStub*) \ - do_blob(jfr_return_lease, RuntimeStub*) \ + do_blob(jfr_write_checkpoint, RuntimeStub) \ + do_blob(jfr_return_lease, RuntimeStub) \ #else #define SHARED_JFR_STUBS_DO(do_blob) @@ -51,28 +89,33 @@ // // do_blob(name, type) #define SHARED_STUBS_DO(do_blob) \ - do_blob(deopt, DeoptimizationBlob*) \ + do_blob(deopt, DeoptimizationBlob) \ /* resolve stubs */ \ - do_blob(wrong_method, RuntimeStub*) \ - do_blob(wrong_method_abstract, RuntimeStub*) \ - do_blob(ic_miss, RuntimeStub*) \ - do_blob(resolve_opt_virtual_call, RuntimeStub*) \ - do_blob(resolve_virtual_call, RuntimeStub*) \ - do_blob(resolve_static_call, RuntimeStub*) \ + do_blob(wrong_method, RuntimeStub) \ + do_blob(wrong_method_abstract, RuntimeStub) \ + do_blob(ic_miss, RuntimeStub) \ + do_blob(resolve_opt_virtual_call, RuntimeStub) \ + do_blob(resolve_virtual_call, RuntimeStub) \ + do_blob(resolve_static_call, RuntimeStub) \ /* handler stubs */ \ - do_blob(polling_page_vectors_safepoint_handler, SafepointBlob*) \ - do_blob(polling_page_safepoint_handler, SafepointBlob*) \ - do_blob(polling_page_return_handler, SafepointBlob*) \ + do_blob(polling_page_vectors_safepoint_handler, SafepointBlob) \ + do_blob(polling_page_safepoint_handler, SafepointBlob) \ + do_blob(polling_page_return_handler, SafepointBlob) \ /* throw stubs */ \ - do_blob(throw_AbstractMethodError, RuntimeStub*) \ - do_blob(throw_IncompatibleClassChangeError, RuntimeStub*) \ - do_blob(throw_NullPointerException_at_call, RuntimeStub*) \ - do_blob(throw_StackOverflowError, RuntimeStub*) \ - do_blob(throw_delayed_StackOverflowError, RuntimeStub*) \ + do_blob(throw_AbstractMethodError, RuntimeStub) \ + do_blob(throw_IncompatibleClassChangeError, RuntimeStub) \ + do_blob(throw_NullPointerException_at_call, RuntimeStub) \ + do_blob(throw_StackOverflowError, RuntimeStub) \ + do_blob(throw_delayed_StackOverflowError, RuntimeStub) \ /* other stubs */ \ SHARED_JFR_STUBS_DO(do_blob) \ -// C1 stubs are always generated in a generic CodeBlob +// C1 stub declarations +// +// C1 stubs are always generated in a unique associated generic +// CodeBlob with a single entry. C1 stubs are stored in an array +// indexed by local enum. So, no other code elements need to be +// generated via this macro. #ifdef COMPILER1 // client macro to operate on c1 stubs @@ -118,14 +161,42 @@ #define C1_STUBS_DO(do_blob) #endif -// Opto stubs can be stored as entries with just an address or as -// blobs of different types. The former may include some JVMTI stubs. +// C2 stub declarations // -// n.b. blobs and stub defines are generated in the order defined by +// C2 stubs are always generated in a unique associated generic +// CodeBlob and have a single entry. In some cases, including JVMTI +// stubs, a standard code blob is employed and only the stub entry +// address is retained. In others a specialized code blob with +// stub-specific properties (e.g. frame size) is required so the blob +// address needs to be stored. In these latter cases the declaration +// includes the relevant storage type. +// +// n.b. blob and stub enum tags are generated in the order defined by // C2_STUBS_DO, allowing dependencies from any givem stub on its // predecessors to be guaranteed. That explains the initial placement // of the blob declarations and intermediate placement of the jvmti // stubs. +// +// Alongside the local and global enums, C2 declarations are used to +// generate several elements of class OptoRuntime. +// +// C2 Stub blob/address fields +// +// Static field declarations/definitions for fields of class +// OptoRuntime are generated to store either C2 blob or C2 blob entry +// addresses: +// +// static * __Java; +// static address _; +// +// C2 stub blob/field names +// +// C2 stubs are provided with names in the format "C2 Runtime +// _blob". +// +// A stub creation method OptoRuntime::generate(ciEnv* env) is +// generated which invokes the C2 compiler to generate each stub in +// declaration order. #ifdef COMPILER2 // do_jvmti_stub(name) @@ -146,12 +217,20 @@ // do_stub(name, fancy_jump, pass_tls, return_pc) // do_jvmti_stub(name) // -// n.b. non-jvmti stubs may employ a special type of jump (0, 1 or 2) -// and require access to TLS and the return pc. jvmti stubs always -// employ jump 0, and require no special access +// do_blob is used for stubs that are generated via direct invocation +// of the assembler to write into a blob of the appropriate type +// +// do_stub is used for stubs that are generated as C2 compiler IR +// intrinsics, using the supplied arguments to determine wheher nodes +// in the IR graph employ a special type of jump (0, 1 or 2) or +// provide access to TLS and the return pc. +// +// do_jvmti_stub generates a JVMTI stub as an IR intrinsic which +// employs jump 0, and requires no special access + #define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) \ - do_blob(uncommon_trap, UncommonTrapBlob*) \ - do_blob(exception, ExceptionBlob*) \ + do_blob(uncommon_trap, UncommonTrapBlob) \ + do_blob(exception, ExceptionBlob) \ do_stub(new_instance, 0, true, false) \ do_stub(new_array, 0, true, false) \ do_stub(new_array_nozero, 0, true, false) \ @@ -172,7 +251,9 @@ #define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub) #endif -// Stub Generator Blobs and Stubs Overview +// Stubgen stub declarations +// +// Stub Generator Blobs, Stubs and Entries Overview // // StubGenerator stubs do not require their own individual blob. They // are generated in batches into one of five distinct BufferBlobs: @@ -183,75 +264,52 @@ // 4) Compiler stubs // 5) Final stubs // -// Creation of each successive BufferBlobs is staged to ensure that +// Most StubGen stubs have a single entry point. However, in some +// cases there are additional entry points. +// +// Creation of each successive BufferBlob is staged to ensure that // specific VM subsystems required by those stubs are suitably -// initialized before generated code attempt to reference data or +// initialized before generated code attempts to reference data or // addresses exported by those subsystems. The sequencing of // initialization must be taken into account when adding a new stub // declaration. // -// StubGenerator stubs are declared using template macros, one set of -// declarations per blob (see below), with arch-specific stubs for any -// gven blob declared after generic stubs for that blob. Blobs are -// created in a fixed order during startup, which is reflected in the -// order of the declaration set. Stubs within a blob are currently -// created in an order determined by the arch-specific generator code -// which may not reflect the order of stub declarations. It is not -// straightforward to enforce a strict ordering. not least because -// arch-specific stub creation may need to be interleaved with generic -// stub creation. +// StubGen blobs, stubs and entries are declared using template +// macros, grouped hierarchically by blob and stub, with arch-specific +// stubs for any given blob declared after generic stubs for that +// blob. Stub declarations must follow the blob start (do_blob) +// declaration for their containing blob. Entry declarations must +// follow the the stub start (do_stub) declaration for their +// containing stub. // -// Blob and stub declaration templates are used to generate a variety -// of C++ code elements needed to manage stubs. +// Blob and stub declarations are used to generate a variety of C++ +// code elements needed to manage stubs, including the global and +// local blob, stub and entry enum types mentioned above. The blob +// declaration order must reflect the order in which blob create +// operations are invoked during startup. Stubs within a blob are +// currently generated in an order determined by the arch-specific +// generator code which may not always reflect the order of stub +// declarations (it is not straightforward to enforce a strict +// ordering, not least because arch-specific stub creation may need to +// be interleaved with generic stub creation). // -// Blob identifiers: -// -// public enum StubGenBlobId is generated to identify each of the -// StubGenerator blobs in blob declaration order. This enum is -// provided for use by client code to identify a specific blob. For a -// blob declared with name the associated enum value is -// StubGenBlobId::_id. -// -// Global stub identifiers: -// -// public enum StubGenStubId is generated to identify all declared -// stubs across all blobs, sorted first by blob declaration order and -// then within a blob by stub declaration order, generic stubs before -// arch-specific stubs. This enum is provided for use by client code -// to identify a specific stub, independent of the blob it belongs to. -// For a stub declared with name the associated enum value -// is StubGenStubId::_id. -// -// Blob-local stub identifiers: -// -// For each blob , public enum StubGenStubId_ is -// generated to enumerate all stubs within the blob in stub -// declaration order, generic stubs before arch-specific stubs. This -// enum is provided only in a non-product build and is intended for -// internal use by class StubRoutines to validate stub declarations. -// For a stub declared with name belonging to blob -// the associated enum value is -// StubGenStubId::__id. +// Alongside the global enums, the stubgen declarations are used to +// define the following elements of class StubRoutines: // // Stub names and associated getters: // -// Two private static fields are generated to hold the names of the -// four generated blobs and all the generated stubs. +// Name strings are generated for each blob where a blob declared with +// name argument will be named using string "". // -// const char* StubRoutines::_blob_names[]; -// const char* StubRoutines::_stub_names[]; -// -// The entry in _blob_names for a blob declared with name -// will be "". -// -// The entry in _stub_names for a stub declared with name -// will be "". +// Name strings are also generated for each stub where a stub declared +// with name argument will be named using string +// "". // // Corresponding public static lookup methods are generated to allow // names to be looked up by blob or global stub id. // -// const char* StubRoutines::get_blob_name(StubGenBlobId id) -// const char* StubRoutines::get_stub_name(StubGenStubId id) +// const char* StubRoutines::get_blob_name(BlobId id) +// const char* StubRoutines::get_stub_name(StubId id) // // These name lookup methods should be used by generic and // cpu-specific client code to ensure that blobs and stubs are @@ -349,7 +407,6 @@ // static void set_f2i_fixup(address a) { _f2i_fixup = a; } // - //-------------------------------------------------- // Stub Generator Blob, Stub and Entry Declarations // ------------------------------------------------- @@ -415,15 +472,17 @@ // given blob. This enum is private to the stub management code and // used to validate correct use of stubs within a given blob. // -// The do_stub template receives a blob name and stub name as argument. +// The do_stub template receives a blob name and stub name as +// argument. // // do_stub(blob_name, stub_name) // -// do_stub is primarily used to define a global enum tag for a stub -// and a constant string name, both for use by client code. It is also -// used to declare a tag within the blob-local enum type used to -// validate correct use of stubs within their declared blob. Finally, -// it is also used to declare a name for each stub. +// do_stub is primarily used to define values associated with the stub +// wiht name stub_name, a global enum tag for it and a constant string +// name, both for use by client code. It is also used to declare a tag +// within the blob-local enum type used to validate correct use of +// stubs within their declared blob. Finally, it is also used to +// declare a name string for the stub. // // The do_entry and do_entry_array templates receive 4 or 5 arguments // @@ -453,6 +512,14 @@ // its own named getter. In the latter case multiple do_entry or // do_entry_init declarations are associated with the stub. // +// All the above entry macros are used to declare enum tages that +// identify the entry. Three different enums are generated via these +// macros: a per-stub enum that indexes and provides a count for the +// entries associated with the owning stub; a per-blob enume that +// indexes and provides a count for the entries associated with the +// owning blob; and a global enum that indexes and provides a count +// for all entries associated with generated stubs. +// // blob_name and stub_name are the names of the blob and stub to which // the entry belongs. // @@ -634,12 +701,13 @@ do_entry, do_entry_init, \ do_entry_array, \ do_arch_blob, \ - do_arch_entry, do_arch_entry_init) \ + do_arch_entry, \ + do_arch_entry_init) \ do_blob(continuation) \ do_stub(continuation, cont_thaw) \ do_entry(continuation, cont_thaw, cont_thaw, cont_thaw) \ do_stub(continuation, cont_preempt) \ - do_entry(continuation, cont_prempt, cont_preempt_stub, \ + do_entry(continuation, cont_preempt, cont_preempt_stub, \ cont_preempt_stub) \ do_stub(continuation, cont_returnBarrier) \ do_entry(continuation, cont_returnBarrier, cont_returnBarrier, \ @@ -648,7 +716,7 @@ do_entry(continuation, cont_returnBarrierExc, cont_returnBarrierExc, \ cont_returnBarrierExc) \ /* merge in stubs and entries declared in arch header */ \ - STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ do_arch_entry, do_arch_entry_init) \ end_blob(continuation) \ @@ -802,7 +870,7 @@ do_entry(compiler, bigIntegerLeftShiftWorker, \ bigIntegerLeftShiftWorker, bigIntegerLeftShift) \ /* merge in stubs and entries declared in arch header */ \ - STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ + STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, do_arch_blob, \ do_arch_entry, do_arch_entry_init) \ end_blob(compiler) \ @@ -971,55 +1039,6 @@ end_blob(final) \ -// Convenience macros for use by template implementations - -#define STUB_ID_NAME(base) base##_id - -// emit a runtime or stubgen stub field name - -#define STUB_FIELD_NAME(base) _##base - -// emit a runtime blob field name - -#define BLOB_FIELD_NAME(base) _##base##_blob - -// emit a stubgen blob field name - -#define STUBGEN_BLOB_FIELD_NAME(base) _ ## base ## _stubs_code - -// Convenience templates that emit nothing - -// ignore do_blob(blob_name, type) declarations -#define DO_BLOB_EMPTY2(blob_name, type) - -// ignore do_blob(blob_name) and end_blob(blob_name) declarations -#define DO_BLOB_EMPTY1(blob_name) - -// ignore do_stub(name, fancy_jump, pass_tls, return_pc) declarations -#define DO_STUB_EMPTY4(name, fancy_jump, pass_tls, return_pc) - -// ignore do_jvmti_stub(name) declarations -#define DO_JVMTI_STUB_EMPTY1(stub_name) - -// ignore do_stub(blob_name, stub_name) declarations -#define DO_STUB_EMPTY2(blob_name, stub_name) - -// ignore do_entry(blob_name, stub_name, fieldname, getter_name) declarations -#define DO_ENTRY_EMPTY4(blob_name, stub_name, fieldname, getter_name) - -// ignore do_entry(blob_name, stub_name, fieldname, getter_name, init_function) and -// do_entry_array(blob_name, stub_name, fieldname, getter_name, count) declarations -#define DO_ENTRY_EMPTY5(blob_name, stub_name, fieldname, getter_name, init_function) - -// ignore do_arch_blob(blob_name, size) declarations -#define DO_ARCH_BLOB_EMPTY2(arch, size) - -// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name) declarations -#define DO_ARCH_ENTRY_EMPTY5(arch, blob_name, stub_name, field_name, getter_name) - -// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name, init_function) declarations -#define DO_ARCH_ENTRY_EMPTY6(arch, blob_name, stub_name, field_name, getter_name, init_function) - // The whole shebang! // // client macro for emitting StubGenerator blobs, stubs and entries @@ -1061,6 +1080,87 @@ do_arch_blob, \ do_arch_entry, do_arch_entry_init) \ +// Convenience macros for use by template implementations + +#define JOIN2(name, suffix) \ + name ## _ ## suffix + +#define JOIN3(prefix, name, suffix) \ + prefix ## _ ## name ## _ ## suffix + +#define JOIN4(prefix, prefix2, name, suffix) \ + prefix ## _ ## prefix2 ## _ ## name ## _ ## suffix + +#define STUB_ID_NAME(base) JOIN2(base, id) + +// emit a runtime or stubgen stub field name + +#define STUB_FIELD_NAME(base) _##base + +// emit a runtime blob field name + +#define BLOB_FIELD_NAME(base) _## base ## _blob + +// emit a stubgen blob field name + +#define STUBGEN_BLOB_FIELD_NAME(base) _ ## base ## _stubs_code + +// first some macros that add an increment + +#define COUNT1(_1) \ + + 1 + +#define COUNT2(_1, _2) \ + + 1 + +#define COUNT4(_1, _2, _3, _4) \ + + 1 + +#define COUNT5(_1, _2, _3, _4, _5) \ + + 1 + +#define COUNT6(_1, _2, _3, _4, _5, _6) \ + + 1 + +#define SHARED_COUNT2(_1, type) \ + + type :: ENTRY_COUNT + +#define STUBGEN_COUNT5(_1, _2, _3, _4, count) \ + + count + +// Convenience templates that emit nothing + +// ignore do_blob(blob_name, type) declarations +#define DO_BLOB_EMPTY2(blob_name, type) + +// ignore do_blob(blob_name) and end_blob(blob_name) declarations +#define DO_BLOB_EMPTY1(blob_name) + +// ignore do_stub(name, fancy_jump, pass_tls, return_pc) declarations +#define DO_STUB_EMPTY4(name, fancy_jump, pass_tls, return_pc) + +// ignore do_jvmti_stub(name) declarations +#define DO_JVMTI_STUB_EMPTY1(stub_name) + +// ignore do_stub(blob_name, stub_name) declarations +#define DO_STUB_EMPTY2(blob_name, stub_name) + +// ignore do_entry(blob_name, stub_name, fieldname, getter_name) declarations +#define DO_ENTRY_EMPTY4(blob_name, stub_name, fieldname, getter_name) + +// ignore do_entry(blob_name, stub_name, fieldname, getter_name, init_function) and +// do_entry_array(blob_name, stub_name, fieldname, getter_name, count) declarations +#define DO_ENTRY_EMPTY5(blob_name, stub_name, fieldname, getter_name, init_function) + +// ignore do_arch_blob(blob_name, size) declarations +#define DO_ARCH_BLOB_EMPTY2(arch, size) + +// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name) declarations +#define DO_ARCH_ENTRY_EMPTY5(arch, blob_name, stub_name, field_name, getter_name) + +// ignore do_arch_entry(arch, blob_name, stub_name, fieldname, getter_name, init_function) declarations +#define DO_ARCH_ENTRY_EMPTY6(arch, blob_name, stub_name, field_name, getter_name, init_function) + // client macro to operate only on StubGenerator blobs #define STUBGEN_BLOBS_DO(do_blob) \ @@ -1081,7 +1181,7 @@ DO_ARCH_BLOB_EMPTY2, \ DO_ARCH_ENTRY_EMPTY5, DO_ARCH_ENTRY_EMPTY6) \ -// client macro to operate only on StubGenerator blobs and stubs +// client macros to operate only on StubGenerator blobs and stubs #define STUBGEN_BLOBS_STUBS_DO(do_blob, end_blob, do_stub) \ STUBGEN_ALL_DO(do_blob, end_blob, \ @@ -1091,6 +1191,17 @@ DO_ARCH_BLOB_EMPTY2, \ DO_ARCH_ENTRY_EMPTY5,DO_ARCH_ENTRY_EMPTY6) \ +// client macro to operate only on StubGenerator generci and arch entries + +#define STUBGEN_ALL_ENTRIES_DO(do_entry, do_entry_init, do_entry_array, \ + do_arch_entry, do_arch_entry_init) \ + STUBGEN_ALL_DO(DO_BLOB_EMPTY1, DO_BLOB_EMPTY1, \ + DO_STUB_EMPTY2, \ + do_entry, do_entry_init, \ + do_entry_array, \ + DO_ARCH_BLOB_EMPTY2, \ + do_arch_entry, do_arch_entry_init) \ + // client macro to operate only on StubGenerator entries #define STUBGEN_ENTRIES_DO(do_entry, do_entry_init, do_entry_array) \ diff --git a/src/hotspot/share/runtime/stubInfo.cpp b/src/hotspot/share/runtime/stubInfo.cpp new file mode 100644 index 00000000000..c3bcab58ea8 --- /dev/null +++ b/src/hotspot/share/runtime/stubInfo.cpp @@ -0,0 +1,1080 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "code/codeBlob.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/stubDeclarations.hpp" +#include "runtime/stubInfo.hpp" + +// define static data fields of class Stubdata + +struct StubInfo::GroupDetails StubInfo::_group_table[StubInfo::GROUP_TABLE_SIZE]; +struct StubInfo::BlobDetails StubInfo::_blob_table[StubInfo::BLOB_TABLE_SIZE]; +struct StubInfo::StubDetails StubInfo::_stub_table[StubInfo::STUB_TABLE_SIZE]; +struct StubInfo::EntryDetails StubInfo::_entry_table[StubInfo::ENTRY_TABLE_SIZE]; + +// helpers to access table elements using enums as indices + +struct StubInfo::GroupDetails& StubInfo::group_details(StubGroup g) { + int idx = static_cast(g); + assert(idx >= 0 && idx < GROUP_TABLE_SIZE, "invalid stub group index %d", idx); + return _group_table[idx]; +} + +struct StubInfo::BlobDetails& StubInfo::blob_details(BlobId b) { + int idx = static_cast(b); + assert(idx >= 0 && idx < BLOB_TABLE_SIZE, "invalid blob index %d", idx); + return _blob_table[idx]; +} + +struct StubInfo::StubDetails& StubInfo::stub_details(StubId s) { + int idx = static_cast(s); + assert(idx >= 0 && idx < STUB_TABLE_SIZE, "invalid stub index %d", idx); + return _stub_table[idx]; +} + +struct StubInfo::EntryDetails& StubInfo::entry_details(EntryId e) { + int idx = static_cast(e); + assert(idx >= 0 && idx < ENTRY_TABLE_SIZE, "invalid entry index %d", idx); + return _entry_table[idx]; +} + +// helpers to step through blob, stub or entry enum sequences + +BlobId StubInfo::next(BlobId id) { + int idx = static_cast(id); + // allow for id to be NO_BLOBID but not NUM_BLOBIDS + assert(idx >= -1 && idx < BLOB_TABLE_SIZE, "invalid blob index %d", idx); + return static_cast(idx + 1); +} + +StubId StubInfo::next(StubId id) { + int idx = static_cast(id); + // allow for id to be NO_STUBID but not NUM_STUBIDS + assert(idx >= -1 && idx < STUB_TABLE_SIZE, "invalid stub index %d", idx); + return static_cast(idx + 1); +} + +EntryId StubInfo::next(EntryId id) { + int idx = static_cast(id); + // allow for id to be NO_ENTRYID but not NUM_ENTRYIDS + assert(idx >= -1 && idx < ENTRY_TABLE_SIZE, "invalid entry index %d", idx); + return static_cast(idx + 1); +} + +BlobId StubInfo::next_in_group(StubGroup stub_group, BlobId blob_id) { + int idx = static_cast(blob_id); + // id must be strictly between NO_BLOBID and NUM_BLOBIDS + assert(idx > -1 && idx < STUB_TABLE_SIZE, "invalid stub index %d", idx); + assert(blob_details(blob_id)._group == stub_group, "blob does not belong to stub group!"); + struct GroupDetails& group = group_details(stub_group); + if (blob_id == group._max) { + return BlobId::NO_BLOBID; + } else { + return static_cast(idx + 1); + } +} + +StubId StubInfo::next_in_blob(BlobId blob_id, StubId stub_id) { + int idx = static_cast(stub_id); + // id must be strictly between NO_STUBID and NUM_STUBIDS + assert(idx > -1 && idx < STUB_TABLE_SIZE, "invalid stub index %d", idx); + assert(stub_details(stub_id)._blob == blob_id, "stub does not belong to blob!"); + struct BlobDetails& blob = blob_details(blob_id); + if (stub_id == blob._max) { + return StubId::NO_STUBID; + } else { + return static_cast(idx + 1); + } +} + +EntryId StubInfo::next_in_stub(StubId stub_id, EntryId entry_id) { + int idx = static_cast(entry_id); + // id must be strictly between NO_ENTRYID and NUM_ENTRYIDS + assert(idx > -1 && idx < ENTRY_TABLE_SIZE, "invalid entry index %d", idx); + assert(entry_details(entry_id)._stub == stub_id, "entry does not belong to stub!"); + struct StubDetails& stub = stub_details(stub_id); + if (entry_id == stub._max) { + return EntryId::NO_ENTRYID; + } else { + return static_cast(idx + 1); + } +} + +// name retrieval + +const char* StubInfo::name(StubGroup stub_group) { + return group_details(stub_group)._name; +} + +const char* StubInfo::name(BlobId id) { + return blob_details(id)._name; +} + +const char* StubInfo::name(StubId id) { + return stub_details(id)._name; +} + +const char* StubInfo::name(EntryId id) { + return entry_details(id)._name; +} + +int StubInfo::span(EntryId second, EntryId first) { + // normally when the two ids are equal the entry span is 1 but we + // have a special case when the base and max are both NO_ENTRYID in + // which case the entry count is 0 + int idx1 = static_cast(first); + int idx2 = static_cast(second); + assert ((idx1 < 0 && idx2 < 0) || (idx1 >= 0 && idx2 >= idx1), "bad entry ids first %d and second %d", idx1, idx2); + if (idx1 < 0) { + return 0; + } + // span is inclusive of first and second + return idx2 + 1 - idx1; +} + +int StubInfo::span(StubId second, StubId first) { + int idx1 = static_cast(first); + int idx2 = static_cast(second); + assert(idx2 >= 0 && idx2 >= idx1, "bad stub ids first %d and second %d", idx1, idx2); + // span is inclusive of first and second + return idx2 + 1 - idx1; +} + +int StubInfo::span(BlobId second, BlobId first) { + int idx1 = static_cast(first); + int idx2 = static_cast(second); + assert(idx2 >= 0 && idx2 >= idx1, "bad blob ids first %d and second %d", idx1, idx2); + // span is inclusive of first and second + return idx2 + 1 - idx1; +} + +#ifdef ASSERT +// helpers to check sequencing of blobs stubs and entries +bool StubInfo::is_next(BlobId second, BlobId first) { + return next(first) == second; +} + +bool StubInfo::is_next(StubId second, StubId first) { + return next(first) == second; +} + +bool StubInfo::is_next(EntryId second, EntryId first) { + return next(first) == second; +} +#endif // ASSERT + +// implementation of the counting methods used to populate the +// stubgroup, blob, stub and entry tables + +void StubInfo::process_shared_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry, + EntryId declaredMax) { + // for shared declarations we update the blob, stub and entry tables + // all in one go based on each unique blob declaration. We may need + // to write more than one entry table element if the stub has + // multiple entries + assert(group_cursor == StubGroup::SHARED, "must be"); + assert(is_next (declaredBlob, blob_cursor), "Out of order declaration for shared blob %s", name); + assert(is_next(declaredStub, stub_cursor), "Out of order declaration for shared stub %s", name); + assert(is_next(declaredEntry, entry_cursor), "Out of order declaration for shared entry %s", name); + assert(span(declaredMax, declaredEntry) > 0, "Invalid entry count %d for entry %s", span(declaredMax, declaredEntry), name); + // if this is the first shared blob then record it as teh base id + // and also update entry base + if (group_details(group_cursor)._base == BlobId::NO_BLOBID) { + group_details(group_cursor)._base = declaredBlob; + group_details(group_cursor)._entry_base = declaredEntry; + } + // update the high water mark for blobs and entries in the stub + // group unconditionally + group_details(group_cursor)._max = declaredBlob; + group_details(group_cursor)._entry_max = declaredMax; + // move forward to this blob + blob_cursor = declaredBlob; + // link the blob to its group and its unique stub + blob_details(blob_cursor)._group = group_cursor; + blob_details(blob_cursor)._base = declaredStub; + blob_details(blob_cursor)._max = declaredStub; + blob_details(blob_cursor)._name = name; + // move forward to this stub + stub_cursor = declaredStub; + // link the stub to its blob and its entries + stub_details(stub_cursor)._blob = declaredBlob; + stub_details(stub_cursor)._base = declaredEntry; + stub_details(stub_cursor)._max = declaredMax; + stub_details(stub_cursor)._is_entry_array = false; + stub_details(stub_cursor)._name = name; + // move forward to last entry + entry_cursor = declaredMax; + // fill out the entry table for the the declared entry up to last entry + EntryId id = declaredEntry; + entry_details(id)._stub = declaredStub; + entry_details(id)._array_base = EntryId::NO_ENTRYID; + entry_details(id)._name = name; + // fill any subsequent entries + while (id != declaredMax) { + id = next(id); + entry_details(id)._stub = declaredStub; + entry_details(id)._array_base = EntryId::NO_ENTRYID; + entry_details(id)._name = name; + } +} + +void StubInfo::process_c1_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry) { + // for c1 declarations we update the blob, stub and entry tables all + // in one go based on each unique blob declaration + assert(group_cursor == StubGroup::C1, "must be"); + assert(is_next(declaredBlob, blob_cursor), "Out of order declaration for c1 blob %s", name); + assert(is_next(declaredStub, stub_cursor), "Out of order declaration for c1 stub %s", name); + assert(is_next(declaredEntry, entry_cursor), "Out of order declaration for c1 entry %s", name); + // if this is the first c1 blob then record it and the entry + if (group_details(group_cursor)._base == BlobId::NO_BLOBID) { + group_details(group_cursor)._base = declaredBlob; + group_details(group_cursor)._entry_base = declaredEntry; + } + // update the high water mark for blobs and entries in the stub + // group unconditionally + group_details(group_cursor)._max = declaredBlob; + group_details(group_cursor)._entry_max = declaredEntry; + // move forward to this blob + blob_cursor = declaredBlob; + // link the blob to its group and its unique stub + blob_details(blob_cursor)._group = group_cursor; + blob_details(blob_cursor)._base = declaredStub; + blob_details(blob_cursor)._max = declaredStub; + blob_details(blob_cursor)._name = name; + // move forward to this stub + stub_cursor = declaredStub; + // link the stub to its blob and its entries + stub_details(stub_cursor)._blob = declaredBlob; + stub_details(stub_cursor)._base = declaredEntry; + stub_details(stub_cursor)._max = declaredEntry; + stub_details(stub_cursor)._is_entry_array = false; + stub_details(stub_cursor)._name = name; + // move forward to entry + entry_cursor = declaredEntry; + // fill out the entry table element + entry_details(entry_cursor)._stub = declaredStub; + entry_details(entry_cursor)._array_base = EntryId::NO_ENTRYID; + entry_details(entry_cursor)._name = name; +} + +void StubInfo::process_c2_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry) { + // for c2 declarations we update the blob, stub and entry tables all + // in one go based on the same details garnered from each unique + // blob, stub r jvmti stub declaration + assert(group_cursor == StubGroup::C2, "must be"); + assert(is_next(declaredBlob, blob_cursor), "Out of order declaration for c2 blob %s", name); + assert(is_next(declaredStub, stub_cursor), "Out of order declaration for c2 stub %s", name); + assert(is_next(declaredEntry, entry_cursor), "Out of order declaration for c2 entry %s", name); + // if this is the first c2 blob then record it and the entry + if (group_details(group_cursor)._base == BlobId::NO_BLOBID) { + group_details(group_cursor)._base = declaredBlob; + group_details(group_cursor)._entry_base = declaredEntry; + } + // update the high water mark for blobs and entries in the stub + // group unconditionally + group_details(group_cursor)._max = declaredBlob; + group_details(group_cursor)._entry_max = declaredEntry; + // move forward to this blob + blob_cursor = declaredBlob; + // link the blob to its group and its unique stub + blob_details(blob_cursor)._group = group_cursor; + blob_details(blob_cursor)._base = declaredStub; + blob_details(blob_cursor)._max = declaredStub; + blob_details(blob_cursor)._name = name; + // move forward to this stub + stub_cursor = declaredStub; + // link the stub to its blob and its entries + stub_details(stub_cursor)._blob = declaredBlob; + stub_details(stub_cursor)._base = declaredEntry; + stub_details(stub_cursor)._max = declaredEntry; + stub_details(stub_cursor)._is_entry_array = false; + stub_details(stub_cursor)._name = name; + // move forward to entry + entry_cursor = declaredEntry; + // fill out the entry table element + entry_details(entry_cursor)._stub = declaredStub; + entry_details(entry_cursor)._array_base = EntryId::NO_ENTRYID; + entry_details(entry_cursor)._name = name; +} + +void StubInfo::process_stubgen_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob) { + // for stubgen blob declarations we update the blob table, allowing + // us to link subsequent stubs to that blob + assert(group_cursor == StubGroup::STUBGEN, "must be"); + assert(is_next(declaredBlob, blob_cursor), "Out of order declaration for stubgen blob %s", name); + // if this is the first stubgen blob then record it + if (group_details(group_cursor)._base == BlobId::NO_BLOBID) { + group_details(group_cursor)._base = declaredBlob; + } + // update the high water mark for blobs in the stub group unconditionally + group_details(group_cursor)._max = declaredBlob; + // move forward to this blob + blob_cursor = declaredBlob; + // link the blob to its group + blob_details(blob_cursor)._group = group_cursor; + // clear the blob table base and max - they are set when we first + // encounter a stub. likewise the blob table entry base and entry + // max -- they are set when we first encounter an entry + blob_details(blob_cursor)._base = StubId::NO_STUBID; + blob_details(blob_cursor)._max = StubId::NO_STUBID; + blob_details(blob_cursor)._entry_base = EntryId::NO_ENTRYID; + blob_details(blob_cursor)._entry_max = EntryId::NO_ENTRYID; + blob_details(blob_cursor)._name = name; +} + +void StubInfo::process_stubgen_stub(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub) { + // for stubgen stub declarations we update the stub table, allowing + // us to link subsequent entries to that stub + assert(group_cursor == StubGroup::STUBGEN, "must be"); + // FIXME use stub name here + assert(declaredBlob == blob_cursor, "Stubgen stub %s in scope of incorrect blob %s", name, blob_details(blob_cursor)._name); + assert(is_next(declaredStub, stub_cursor), "Out of order declaration for stubgen stub %s", name); + // if this is the first stubgen stub then record it + if (blob_details(blob_cursor)._base == StubId::NO_STUBID) { + blob_details(blob_cursor)._base = declaredStub; + } + // update the high water mark for stubs in the blob unconditionally + blob_details(blob_cursor)._max = declaredStub; + // move forward to this stub + stub_cursor = declaredStub; + // link the stub to its blob + stub_details(stub_cursor)._blob = blob_cursor; + // clear the stub table base and max - they are set when we + // encounter an entry + stub_details(stub_cursor)._base = EntryId::NO_ENTRYID; + stub_details(stub_cursor)._max = EntryId::NO_ENTRYID; + stub_details(stub_cursor)._name = name;; +} + +void StubInfo::process_stubgen_entry(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry, + int arrayCount) { + // for stubgen entry declarations we update the entry table + assert(group_cursor == StubGroup::STUBGEN, "must be"); + assert(declaredBlob == blob_cursor, "Stubgen entry %s in scope of wrong blob %s", name, blob_details(blob_cursor)._name); + assert(declaredStub == stub_cursor, "Stubgen entry %s declares stub in scope of wrong stub %s", name, stub_details(stub_cursor)._name); + assert(is_next(declaredEntry, entry_cursor), "Out of order declaration for stubgen entry %s", name); + assert(arrayCount >= 0, "Invalid array count %d", arrayCount); + // if this is the first stubgen entry in the group then record it + if (group_details(group_cursor)._entry_base == EntryId::NO_ENTRYID) { + group_details(group_cursor)._entry_base = declaredEntry; + } + // update the high water mark for entries in the group unconditionally + group_details(group_cursor)._entry_max = declaredEntry; + // if this is the first stubgen entry in the blob then record it + if (blob_details(blob_cursor)._entry_base == EntryId::NO_ENTRYID) { + blob_details(blob_cursor)._entry_base = declaredEntry; + } + // update the high water mark for entries in the group unconditionally + blob_details(blob_cursor)._entry_max = declaredEntry; + // if this is the first stubgen entry in the stub then record it + if (stub_details(stub_cursor)._base == EntryId::NO_ENTRYID) { + stub_details(stub_cursor)._base = declaredEntry; + } + // move forward to this entry + if (arrayCount == 0) { + // simply link the entry to its blob + entry_cursor = declaredEntry; + entry_details(entry_cursor)._stub = stub_cursor; + entry_details(entry_cursor)._array_base = EntryId::NO_ENTRYID; + entry_details(entry_cursor)._name = name; + } else { + // populate multiple entries and link them all to the first entry + for (int i = 0; i < arrayCount; i++) { + entry_cursor = next(entry_cursor); + entry_details(entry_cursor)._stub = stub_cursor; + entry_details(entry_cursor)._array_base = declaredEntry; + // TODO: consider allocating names labelled with index + entry_details(entry_cursor)._name = name; + } + } + // update the high water mark for entries in the stub unconditionally + stub_details(stub_cursor)._max = entry_cursor; +} + +// The stubgroup, blob, stub and entry tables defined above are +// populated by iterating over all blob, stub and entry declarations +// and incrementally updating the associated table entries. The +// following macros invoke static methods of StubInfo that receive +// and, where appropriate, update cursors identifying current +// positions in each table. + +#define PROCESS_SHARED_BLOB(name, type) \ + process_shared_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Shared Runtime " # name "_blob", \ + BlobId:: JOIN3(shared, name, id), \ + StubId:: JOIN3(shared, name, id), \ + EntryId:: JOIN3(shared, name, id), \ + EntryId:: JOIN3(shared, name, max)); \ + +#define PROCESS_C1_BLOB(name) \ + process_c1_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "C1 Runtime " # name "_blob", \ + BlobId:: JOIN3(c1, name, id), \ + StubId:: JOIN3(c1, name, id), \ + EntryId:: JOIN3(c1, name, id)); \ + +#define PROCESS_C2_BLOB(name, type) \ + process_c2_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "C2 Runtime " # name "_blob", \ + BlobId:: JOIN3(c2, name, id), \ + StubId:: JOIN3(c2, name, id), \ + EntryId:: JOIN3(c2, name, id)); \ + +#define PROCESS_C2_STUB(name, fancy_jump, pass_tls, return_pc) \ + process_c2_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "C2 Runtime " # name "_blob", \ + BlobId:: JOIN3(c2, name, id), \ + StubId:: JOIN3(c2, name, id), \ + EntryId:: JOIN3(c2, name, id)); \ + +#define PROCESS_C2_JVMTI_STUB(name) \ + process_c2_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "C2 Runtime " # name "_blob", \ + BlobId:: JOIN3(c2, name, id), \ + StubId:: JOIN3(c2, name, id), \ + EntryId:: JOIN3(c2, name, id)); \ + +#define PROCESS_STUBGEN_BLOB(blob) \ + process_stubgen_blob(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # blob "_blob", \ + BlobId:: JOIN3(stubgen, blob, id)); \ + +#define PROCESS_STUBGEN_STUB(blob, stub) \ + process_stubgen_stub(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # stub "_stub", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id)); \ + +#define PROCESS_STUBGEN_ENTRY(blob, stub, field_name, getter_name) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # field_name "_entry", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN3(stubgen, field_name, id), \ + 0); \ + +#define PROCESS_STUBGEN_ENTRY_INIT(blob, stub, field_name, getter_name, \ + init_funcion) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # field_name "_entry", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN3(stubgen, field_name, id), \ + 0); \ + +#define PROCESS_STUBGEN_ENTRY_ARRAY(blob, stub, field_name, getter_name, \ + count) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # field_name "_entry", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN3(stubgen, field_name, id), \ + count); \ + +#define PROCESS_STUBGEN_ENTRY_ARCH(arch_name, blob, stub, field_name, \ + getter_name) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + #arch_name "_" # field_name, \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN4(stubgen, arch_name, \ + field_name, id), \ + 0); \ + +#define PROCESS_STUBGEN_ENTRY_ARCH_INIT(arch_name, blob, stub, \ + field_name, getter_name, \ + init_function) \ + process_stubgen_entry(_group_cursor, _blob_cursor, \ + _stub_cursor, _entry_cursor, \ + "Stub Generator " # arch_name "_" # field_name "_entry", \ + BlobId:: JOIN3(stubgen, blob, id), \ + StubId:: JOIN3(stubgen, stub, id), \ + EntryId:: JOIN4(stubgen, arch_name, \ + field_name, id), \ + 0); \ + +void StubInfo::populate_stub_tables() { + StubGroup _group_cursor; + BlobId _blob_cursor = BlobId::NO_BLOBID; + StubId _stub_cursor = StubId::NO_STUBID; + EntryId _entry_cursor = EntryId::NO_ENTRYID; + + _group_cursor = StubGroup::SHARED; + group_details(_group_cursor)._name = "Shared Stubs"; + group_details(_group_cursor)._base = BlobId::NO_BLOBID; + group_details(_group_cursor)._max = BlobId::NO_BLOBID; + group_details(_group_cursor)._entry_base = EntryId::NO_ENTRYID; + group_details(_group_cursor)._entry_max = EntryId::NO_ENTRYID; + SHARED_STUBS_DO(PROCESS_SHARED_BLOB); + + _group_cursor = StubGroup::C1; + group_details(_group_cursor)._name = "C1 Stubs"; + group_details(_group_cursor)._base = BlobId::NO_BLOBID; + group_details(_group_cursor)._max = BlobId::NO_BLOBID; + group_details(_group_cursor)._entry_base = EntryId::NO_ENTRYID; + group_details(_group_cursor)._entry_max = EntryId::NO_ENTRYID; + C1_STUBS_DO(PROCESS_C1_BLOB); + + _group_cursor = StubGroup::C2; + group_details(_group_cursor)._name = "C2 Stubs"; + group_details(_group_cursor)._base = BlobId::NO_BLOBID; + group_details(_group_cursor)._max = BlobId::NO_BLOBID; + group_details(_group_cursor)._entry_base = EntryId::NO_ENTRYID; + group_details(_group_cursor)._entry_max = EntryId::NO_ENTRYID; + C2_STUBS_DO(PROCESS_C2_BLOB, PROCESS_C2_STUB, PROCESS_C2_JVMTI_STUB); + + _group_cursor = StubGroup::STUBGEN; + group_details(_group_cursor)._name = "StubGen Stubs"; + group_details(_group_cursor)._base = BlobId::NO_BLOBID; + group_details(_group_cursor)._max = BlobId::NO_BLOBID; + group_details(_group_cursor)._entry_base = EntryId::NO_ENTRYID; + group_details(_group_cursor)._entry_max = EntryId::NO_ENTRYID; + STUBGEN_ALL_DO(PROCESS_STUBGEN_BLOB, DO_BLOB_EMPTY1, + PROCESS_STUBGEN_STUB, + PROCESS_STUBGEN_ENTRY, PROCESS_STUBGEN_ENTRY_INIT, + PROCESS_STUBGEN_ENTRY_ARRAY, + DO_ARCH_BLOB_EMPTY2, + PROCESS_STUBGEN_ENTRY_ARCH, PROCESS_STUBGEN_ENTRY_ARCH_INIT); + assert(next(_blob_cursor) == BlobId::NUM_BLOBIDS, "should have exhausted all blob ids!"); + assert(next(_stub_cursor) == StubId::NUM_STUBIDS, "should have exhausted all stub ids!"); + assert(next(_entry_cursor) == EntryId::NUM_ENTRYIDS, "should have exhausted all entry ids!"); +#ifdef ASSERT + // run further sanity checks + verify_stub_tables(); +#endif // ASSERT +} + +#undef PROCESS_SHARED_BLOB +#undef PROCESS_C1_BLOB +#undef PROCESS_C2_BLOB +#undef PROCESS_C2_STUB +#undef PROCESS_C2_JVMTI_STUB +#undef PROCESS_STUBGEN_BLOB +#undef PROCESS_STUBGEN_STUB +#undef PROCESS_STUBGEN_ENTRY +#undef PROCESS_STUBGEN_ENTRY_INIT +#undef PROCESS_STUBGEN_ENTRY_ARRAY +#undef PROCESS_STUBGEN_ENTRY_ARCH +#undef PROCESS_STUBGEN_ENTRY_ARCH_INIT + +#ifdef ASSERT + +void StubInfo::verify_stub_tables() { + // exercise the traversal and interconversion APIs + const int NUM_STUBGROUPS = static_cast(StubGroup::NUM_STUBGROUPS); + StubGroup groups[NUM_STUBGROUPS] = { + StubGroup::SHARED, + StubGroup::C1, + StubGroup::C2, + StubGroup::STUBGEN }; + + // check that the statically defined blob, stub and entry counts + // match the computed totals + assert(blob_count(StubGroup::SHARED) == StubInfo::SHARED_STUB_COUNT, + "miscounted number of shared blobs %d vs %d", + blob_count(StubGroup::SHARED), StubInfo::SHARED_STUB_COUNT); + + assert(stub_count(StubGroup::SHARED) == StubInfo::SHARED_STUB_COUNT, + "miscounted number of shared stubs %d vs %d", + stub_count(StubGroup::SHARED), StubInfo::SHARED_STUB_COUNT); + + assert(entry_count(StubGroup::SHARED) == StubInfo::SHARED_ENTRY_COUNT, + "miscounted number of shared entries %d vs %d", + entry_count(StubGroup::SHARED), StubInfo::SHARED_ENTRY_COUNT); + + assert(blob_count(StubGroup::C1) == StubInfo::C1_STUB_COUNT, + "miscounted number of c1 blobs %d vs %d", + blob_count(StubGroup::C1), StubInfo::C1_STUB_COUNT); + + assert(stub_count(StubGroup::C1) == StubInfo::C1_STUB_COUNT, + "miscounted number of c1 stubs %d vs %d", + stub_count(StubGroup::C1), StubInfo::C1_STUB_COUNT); + + assert(entry_count(StubGroup::C1) == StubInfo::C1_STUB_COUNT, + "miscounted number of c1 entries %d vs %d", + entry_count(StubGroup::C1), StubInfo::C1_STUB_COUNT); + + assert(blob_count(StubGroup::C2) == StubInfo::C2_STUB_COUNT, + "miscounted number of c2 blobs %d vs %d", + blob_count(StubGroup::C2), StubInfo::C2_STUB_COUNT); + + assert(stub_count(StubGroup::C2) == StubInfo::C2_STUB_COUNT, + "miscounted number of c2 stubs %d vs %d", + stub_count(StubGroup::C2), StubInfo::C2_STUB_COUNT); + + assert(entry_count(StubGroup::C2) == StubInfo::C2_STUB_COUNT, + "miscounted number of c2 entries %d vs %d", + entry_count(StubGroup::C2), StubInfo::C2_STUB_COUNT); + + assert(blob_count(StubGroup::STUBGEN) == StubInfo::STUBGEN_BLOB_COUNT, + "miscounted number of stubgen blobs %d vs %d", + blob_count(StubGroup::STUBGEN), StubInfo::STUBGEN_STUB_COUNT); + + assert(stub_count(StubGroup::STUBGEN) == StubInfo::STUBGEN_STUB_COUNT, + "miscounted number of stubgen stubs %d vs %d", + stub_count(StubGroup::STUBGEN), StubInfo::STUBGEN_STUB_COUNT); + + assert(entry_count(StubGroup::STUBGEN) == StubInfo::STUBGEN_ENTRY_COUNT, + "miscounted number of stubgen entries %d vs %d", + entry_count(StubGroup::STUBGEN), StubInfo::STUBGEN_ENTRY_COUNT); + + // 1) check that the per-group blob counts add up + for (int gidx = 0; gidx < NUM_STUBGROUPS ; gidx++) { + StubGroup group = groups[gidx]; + BlobId blob = blob_base(group); + int group_blob_total = blob_count(group); + while (blob != BlobId::NO_BLOBID) { + // predecrement total + group_blob_total--; + assert(group_blob_total > 0 || blob == blob_max(group), "must be!"); + assert(stubgroup(blob) == group, "iterated out of group %s to blob %s", name(group), name(blob)); + blob = next_in_group(group, blob); + } + assert(group_blob_total == 0, "must be!"); + } + + // 2) check that the per-group and per-blob stub counts add up + for (int gidx = 0; gidx < NUM_STUBGROUPS; gidx++) { + StubGroup group = groups[gidx]; + BlobId blob = blob_base(group); + StubId group_stub = stub_base(group); + int group_stub_total = stub_count(group); + while (blob != BlobId::NO_BLOBID) { + StubId stub = stub_base(blob); + int stub_total = stub_count(blob); + while (stub != StubId::NO_STUBID) { + // iterations via group and blob should proceed in parallel + assert(stub == group_stub, "must be!"); + // predecrement totals + group_stub_total--; + stub_total--; + assert(stub_total > 0 || stub == stub_max(blob), "must be!"); + assert(group_stub_total > 0 || stub == stub_max(group), "must be!"); + assert(stubgroup(stub) == group, "iterated out of group %s to stub %s", name(group), name(stub)); + stub = next_in_blob(blob, stub); + group_stub = next(group_stub); + } + assert(stub_total == 0, "must be!"); + blob = next_in_group(group, blob); + } + assert(group_stub_total == 0, "must be!"); + } + + // 3) check that the per-group, per-blob and per-stub entry counts add up + for (int gidx = 0; gidx < NUM_STUBGROUPS; gidx++) { + StubGroup group = groups[gidx]; + BlobId blob = blob_base(group); + StubId group_stub = stub_base(group); + EntryId group_entry = entry_base(group); + int group_entry_total = entry_count(group); + while (blob != BlobId::NO_BLOBID) { + StubId stub = stub_base(blob); + while (stub != StubId::NO_STUBID) { + EntryId entry = entry_base(stub); + int entry_total = entry_count(stub); + while (entry != EntryId::NO_ENTRYID) { + // iterations via group and blob should proceed in parallel + assert(entry == group_entry, "must be!"); + // predecrement totals + group_entry_total--; + entry_total--; + assert(entry_total > 0 || entry == entry_max(stub), "must be!"); + assert(group_entry_total > 0 || entry == entry_max(group), "must be!"); + assert(stubgroup(entry) == group, "iterated out of group %s to entry %s", name(group), name(entry)); + entry = next_in_stub(stub, entry); + group_entry = next(group_entry); + } + assert(entry_total == 0, "must be!"); + stub = next_in_blob(blob, stub); + group_stub = next(group_stub); + } + blob = next_in_group(group, blob); + } + assert(group_entry_total == 0, "must be!"); + } +} + +#endif // ASSERT + +// info support + +void StubInfo::dump_group_table(LogStream& ls) { + ls.print_cr("STUB GROUP TABLE"); + for (int i = 0; i < GROUP_TABLE_SIZE; i++) { + GroupDetails& g = _group_table[i]; + ls.print_cr("%1d: %-8s", i, g._name); + if (g._base == g._max) { + ls.print_cr(" blobs: %s(%d)", + blob_details(g._base)._name, + static_cast(g._base)); + } else { + ls.print_cr(" blobs: %s(%d) ... %s(%d)", + blob_details(g._base)._name, + static_cast(g._base), + blob_details(g._max)._name, + static_cast(g._max)); + } + } +} + +void StubInfo::dump_blob_table(LogStream& ls) { + ls.print_cr("BLOB TABLE"); + for (int i = 0; i < BLOB_TABLE_SIZE; i++) { + BlobDetails& b = _blob_table[i]; + ls.print_cr("%-3d: %s", i, b._name); + if (b._base == b._max) { + ls.print_cr(" stubs: %s(%d)", + stub_details(b._base)._name, + static_cast(b._base)); + } else { + ls.print_cr(" stubs: %s(%d) ... %s(%d)", + stub_details(b._base)._name, + static_cast(b._base), + stub_details(b._max)._name, + static_cast(b._max)); + } + } +} + +void StubInfo::dump_stub_table(LogStream& ls) { + ls.print_cr("STUB TABLE"); + for (int i = 0; i < STUB_TABLE_SIZE; i++) { + StubDetails& s = _stub_table[i]; + ls.print_cr("%-3d: %s %s", i, s._name, + (s._is_entry_array ? "array" : "")); + ls.print_cr(" blob: %d", static_cast(s._blob)); + if (s._base == s._max) { + // some stubs don't have an entry + if (s._base == EntryId::NO_ENTRYID) { + ls.print_cr(" entries: %s(%d)", + "no_entry", + static_cast(s._base)); + } else { + ls.print_cr(" entries: %s(%d)", + entry_details(s._base)._name, + static_cast(s._base)); + } + } else { + ls.print_cr(" entries: %s(%d) ... %s(%d)", + entry_details(s._base)._name, + static_cast(s._base), + entry_details(s._max)._name, + static_cast(s._max)); + } + } +} + +void StubInfo::dump_entry_table(LogStream& ls) { + ls.print_cr("ENTRY TABLE"); + for (int i = 0; i < ENTRY_TABLE_SIZE; i++) { + EntryDetails& e = _entry_table[i]; + ls.print_cr("%-3d: %s", i, e._name); + if (e._array_base != EntryId::NO_ENTRYID) { + ls.print_cr(" array base: %d", static_cast(e._array_base)); + } + ls.print_cr(" stub: %d", static_cast(e._stub)); + } +} + +void StubInfo::dump_tables(LogStream& ls) { + dump_group_table(ls); + ls.print_cr(""); + dump_blob_table(ls); + ls.print_cr(""); + dump_stub_table(ls); + ls.print_cr(""); + dump_entry_table(ls); +} + +// Global Group/Blob/Stub/Entry Id Hierarchy Traversal: + +// traverse up + +StubGroup StubInfo::stubgroup(EntryId id) { + // delegate + return stubgroup(stub(id)); +} + +StubGroup StubInfo::stubgroup(BlobId id) { + return blob_details(id)._group; +} + +StubGroup StubInfo::stubgroup(StubId id) { + // delegate + return stubgroup(blob(id)); +} + +StubId StubInfo::stub(EntryId id) { + return entry_details(id)._stub; +} + +BlobId StubInfo::blob(EntryId id) { + // delegate + return blob(stub(id)); +} + +BlobId StubInfo::blob(StubId id) { + return stub_details(id)._blob; +} + +// traverse down + +BlobId StubInfo::blob_base(StubGroup stub_group) { + return group_details(stub_group)._base; +} + +BlobId StubInfo::blob_max(StubGroup stub_group) { + return group_details(stub_group)._max; +} + +int StubInfo::blob_count(StubGroup stub_group) { + return span(blob_max(stub_group), blob_base(stub_group)); +} + +StubId StubInfo::stub_base(StubGroup stub_group) { + // delegate + return stub_base(blob_base(stub_group)); +} + +StubId StubInfo::stub_max(StubGroup stub_group) { + // delegate + return stub_max(blob_max(stub_group)); +} + +int StubInfo::stub_count(StubGroup stub_group) { + return span(stub_max(stub_group), stub_base(stub_group)); +} + +EntryId StubInfo::entry_base(StubGroup stub_group) { + return group_details(stub_group)._entry_base; +} + +EntryId StubInfo::entry_max(StubGroup stub_group) { + return group_details(stub_group)._entry_max; +} + +int StubInfo::entry_count(StubGroup stub_group) { + return span(entry_max(stub_group), entry_base(stub_group)); +} + +StubId StubInfo::stub_base(BlobId id) { + return blob_details(id)._base; +} + +StubId StubInfo::stub_max(BlobId id) { + return blob_details(id)._max; +} + +int StubInfo::stub_count(BlobId id) { + return span(stub_max(id), stub_base(id)); +} + +EntryId StubInfo::entry_base(StubId id) { + return stub_details(id)._base; +} + +EntryId StubInfo::entry_max(StubId id) { + return stub_details(id)._max; +} + +int StubInfo::entry_count(StubId id) { + return span(entry_max(id), entry_base(id)); +} + +EntryId StubInfo::entry_base(BlobId id) { + return blob_details(id)._entry_base; +} + +EntryId StubInfo::entry_max(BlobId id) { + return blob_details(id)._entry_max; +} + +int StubInfo::entry_count(BlobId id) { + return span(entry_base(id), entry_max(id)); +} + +// Global <-> Local Id Management: + +// private helpers + +bool StubInfo::has_group(BlobId id, StubGroup group) { + return stubgroup(id) == group; +} + +bool StubInfo::has_group(StubId id, StubGroup group) { + return stubgroup(id) == group; +} + +bool StubInfo::has_group(EntryId id, StubGroup group) { + return stubgroup(id) == group; +} + +// Convert a blob, entry or stub id to a unique, zero-based offset in +// the range of blob/stub/entry ids for a given stub group. + +int StubInfo::local_offset(StubGroup group, BlobId id) { + assert(has_group(id, group), "id %s is not a %s blob!", name(id), name(group)); + BlobId base = blob_base(group); + int s = span(id, base); + assert(s >= 1, "must be"); + return s - 1; +} + +int StubInfo::local_offset(StubGroup group, StubId id) { + assert(has_group(id, group), "id %s is not a %s stub!", name(id), name(group)); + StubId base = stub_base(group); + int s = span(id, base); + assert(s >= 1, "must be"); + return s - 1; +} + +int StubInfo::local_offset(StubGroup group, EntryId id) { + assert(has_group(id, group), "id %s is not a %s entry!", name(id), name(group)); + EntryId base = entry_base(group); + int s = span(id, base); + assert(s >= 1, "must be"); + return s - 1; +} + +// public API + +// check that a stub belongs to an expected stub group + +bool StubInfo::is_shared(StubId id) { + return has_group(id, StubGroup::SHARED); +} + +bool StubInfo::is_c1(StubId id) { + return has_group(id, StubGroup::C1); +} + +bool StubInfo::is_c2(StubId id) { + return has_group(id, StubGroup::C2); +} + +bool StubInfo::is_stubgen(StubId id) { + return has_group(id, StubGroup::STUBGEN); +} + +// check that a stub belongs to an expected stub group + +bool StubInfo::is_shared(BlobId id) { + return has_group(id, StubGroup::SHARED); +} + +bool StubInfo::is_c1(BlobId id) { + return has_group(id, StubGroup::C1); +} + +bool StubInfo::is_c2(BlobId id) { + return has_group(id, StubGroup::C2); +} + +bool StubInfo::is_stubgen(BlobId id) { + return has_group(id, StubGroup::STUBGEN); +} + +// Convert a stub id to a unique, zero-based offset in the range of +// stub ids for a given stub group. + +int StubInfo::shared_offset(StubId id) { + return local_offset(StubGroup::SHARED, id); +} + +int StubInfo::c1_offset(StubId id) { + return local_offset(StubGroup::C1, id); +} + +int StubInfo::c2_offset(StubId id) { + return local_offset(StubGroup::C2, id); +} + +int StubInfo::stubgen_offset(StubId id) { + return local_offset(StubGroup::STUBGEN, id); +} + +// initialization function called to populate blob. stub and entry +// tables. this must be called before any stubs are generated +void initialize_stub_info() { + ResourceMark rm; + StubInfo::populate_stub_tables(); + + LogTarget(Debug, stubs) lt; + if (lt.is_enabled()) { + LogStream ls(lt); + StubInfo::dump_tables(ls); + } +} diff --git a/src/hotspot/share/runtime/stubInfo.hpp b/src/hotspot/share/runtime/stubInfo.hpp new file mode 100644 index 00000000000..eaea07f7e6c --- /dev/null +++ b/src/hotspot/share/runtime/stubInfo.hpp @@ -0,0 +1,684 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2025, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_STUBINFO_HPP +#define SHARE_RUNTIME_STUBINFO_HPP + +#include "logging/logStream.hpp" +#include "runtime/stubDeclarations.hpp" + +// class StubInfo records details of the global stubgroup, blob, stub +// and entry hierarchy and provides APIs that +// +// 1) allow relationships between blobs, stubs and their entries to be +// identified. +// +// 2) Support conversion from a global blob/stub/entry id to a +// corresponding, unique, group-local blob/stub/entry offset from the +// first blob/stub/entry in the same stubgroup + +// We have four distinct stub groups, each of which includes multiple +// blobs, stubs and entries. + +enum class StubGroup : int { + SHARED, + C1, + C2, + STUBGEN, + NUM_STUBGROUPS +}; + +// Generated code elements are used to implement the following enums: +// +// Global and Stub Group Local Blob/Stub/Entry Enumerations: +// +// The following enums uniquely list every generated blob, stub and +// entry across all four stub groups. +// +// enum StubId; // unique id for every stub in the above groups +// enum BlobId; // unique id for every blob in the above groups +// enum EntryId; // unique id for every entry in the above groups +// +// +// Management APIs for these enums are defined in class StubInfo. The +// API methods rely on a small amount of code and data genertaed from +// the blob, stub and entry declarations. +// +// Global Group/Blob/Stub/Entry Id Hierarchy Traversal: +// +// traverse up +// +// StubGroup StubInfo::stubgroup(EntryId); +// StubGroup StubInfo::stubgroup(BlobId); +// StubGroup StubInfo::stubgroup(StubId); +// +// StubId StubInfo::stub(EntryId); +// BlobId StubInfo::blob(EntryId); +// BlobId StubInfo::blob(StubId); +// +// traverse down +// +// BlobId StubInfo::blob_base(StubGroup) +// BlobId StubInfo::blob_max(StubGroup) +// int StubInfo::blob_count(StubGroup) +// +// StubId StubInfo::stub_base(StubGroup) +// StubId StubInfo::stub_max(StubGroup) +// int StubInfo::stub_count(StubGroup) +// +// EntryId StubInfo::entry_base(StubGroup) +// EntryId StubInfo::entry_max(StubGroup) +// int StubInfo::entry_count(StubGroup) +// +// StubId StubInfo::stub_base(BlobId); +// StubId StubInfo::stub_max(BlobId); +// int StubInfo::stub_count(BlobId); +// +// EntryId StubInfo::entry_base(StubId); +// EntryId StubInfo::entry_max(StubId); +// int StubInfo::entry_count(StubId); +// +// EntryId StubInfo::entry_base(BlobId); +// EntryId StubInfo::entry_max(BlobId); +// int StubInfo::entry_count(BlobId); +// +// +// Global <-> Local Id Management: +// +// check that a stub belongs to an expected stub group +// +// bool StubInfo::is_shared(StubId id); +// bool StubInfo::is_c1(StubId id); +// bool StubInfo::is_c2(StubId id); +// bool StubInfo::is_stubgen(StubId id); +// +// Convert a stub id to a unique, zero-based offset in the range of +// stub ids for a given stub group. +// +// int StubInfo::shared_offset(StubId id); +// int StubInfo::c1_offset(StubId id); +// int StubInfo::c2_offset(StubId id); +// int StubInfo::stubgen_offset(StubId id); +// +// Convert a blob id to a unique, zero-based offset in the range of +// blob ids for a given stub group. we only need this for stubgen +// blobs as for all other stub groups the stub indices and blob +// indices are identical. +// +// int StubInfo::stubgen_offset(BlobId id); +// +// Convert an entry id to a unique, zero-based offset in the range +// of entry ids for a given stub group. we only need this for shared +// and stubgen blobs as for all other stub groups the stub indices +// and entry indices are identical. +// +// int StubInfo::shared_offset(EntryId id); +// int StubInfo::stubgen_offset(EntryId id); +// +// n.b. invalid interconversions from a global id to the wrong type of +// group id are caught by asserts + + +// Generate global blob, stub and entry enums from blob, stubs and +// entry declarations + +// Global enumeration for all blobs +// +// n.b. the stubgroup is included in tag because the same name may be +// reused across groups (e.g. c1 and c2 both use new_instance) +// +// enum BlobId { +// shared_deopt_id, +// . . . +// c1_unwind_exception_id, +// . . . +// c2_uncommon_trap_id, +// . . . +// stubgen_initial_id, +// . . . +// NUM_BLOBIDS, +// }; + + +#define SHARED_DECLARE_TAG(name, type) JOIN3(shared, name, id) , +#define C1_DECLARE_TAG(name) JOIN3(c1, name, id) , +#define C2_DECLARE_TAG1(name) JOIN3(c2, name, id) , +#define C2_DECLARE_TAG2(name, _1) JOIN3(c2, name, id) , +#define C2_DECLARE_TAG4(name, _1, _2, _3) JOIN3(c2, name, id) , +#define STUBGEN_DECLARE_TAG(name) JOIN3(stubgen, name, id) , + +enum class BlobId : int { + NO_BLOBID = -1, + // declare an enum tag for each shared runtime blob + SHARED_STUBS_DO(SHARED_DECLARE_TAG) + // declare an enum tag for each c1 runtime blob + C1_STUBS_DO(C1_DECLARE_TAG) + // declare an enum tag for each opto runtime blob or stub + C2_STUBS_DO(C2_DECLARE_TAG2, + C2_DECLARE_TAG4, + C2_DECLARE_TAG1) + // declare an enum tag for each stubgen blob + STUBGEN_BLOBS_DO(STUBGEN_DECLARE_TAG) + NUM_BLOBIDS +}; + +#undef SHARED_DECLARE_TAG +#undef C1_DECLARE_TAG +#undef C2_DECLARE_TAG1 +#undef C2_DECLARE_TAG2 +#undef C2_DECLARE_TAG4 +#undef STUBGEN_DECLARE_TAG + +// Global enumeration for all stubs +// +// n.b. the stubgroup is included in tag because the same name may be +// reused across groups (e.g. c1 and c2 both use new_instance). For +// stubgen stubs the blob name is omitted from the tag because all +// stub names may not be reused scross different stubgen blobs. +// +// enum StubId { +// shared_deopt_id, +// . . . +// c1_unwind_exception_id, +// . . . +// c2_uncommon_trap_id, +// . . . +// stubgen_call_stub_id, +// stubgen_forward_exception_id, +// . . . +// NUM_BLOBIDS, +// }; +// + +#define SHARED_DECLARE_TAG(name, type) JOIN3(shared, name, id) , +#define C1_DECLARE_TAG(name) JOIN3(c1, name, id) , +#define C2_DECLARE_TAG1(name) JOIN3(c2, name, id) , +#define C2_DECLARE_TAG2(name, _1) JOIN3(c2, name, id) , +#define C2_DECLARE_TAG4(name, _1, _2, _3) JOIN3(c2, name, id) , +#define STUBGEN_DECLARE_TAG(blob, name) JOIN3(stubgen, name, id) , + +enum class StubId : int { + NO_STUBID = -1, + // declare an enum tag for each shared runtime blob + SHARED_STUBS_DO(SHARED_DECLARE_TAG) + // declare an enum tag for each c1 runtime blob + C1_STUBS_DO(C1_DECLARE_TAG) + // declare an enum tag for each opto runtime blob or stub + C2_STUBS_DO(C2_DECLARE_TAG2, + C2_DECLARE_TAG4, + C2_DECLARE_TAG1) + // declare an enum tag for each stubgen runtime stub + STUBGEN_STUBS_DO(STUBGEN_DECLARE_TAG) + NUM_STUBIDS +}; + +#undef SHARED_DECLARE_TAG +#undef C1_DECLARE_TAG +#undef C2_DECLARE_TAG1 +#undef C2_DECLARE_TAG2 +#undef C2_DECLARE_TAG4 +#undef STUBGEN_DECLARE_TAG + + +// +// Global enumeration for all entries +// +// n.b. the stubgroup is included in tag because the same name may be +// reused across groups (e.g. c1 and c2 both use new_instance) +// +// enum EntryId : int { +// NO_ENTRYID = -1, +// shared_deopt_id, +// shared_deopt_max = +// shared_deopt_id + DeoptimzationBlob::NUM_ENTRIES -1, +// . . . +// c1_unwind_exception_id, +// . . . +// c2_uncommon_trap_id, +// . . . +// stubgen_call_stub_id, +// stubgen_call_stub_return_address_id, +// stubgen_forward_exception_id, +// . . . +// stubgen_aarch64_large_array_equals_id, +// . . . +// stubgen_lookup_secondary_supers_table_stubs_id, +// stubgen_lookup_secondary_supers_table_stubs_max = +// stubgen_lookup_secondary_supers_table_stubs_id + +// Klass::SECONDARY_SUPERS_TABLE_SIZE, +// . . . +// NUM_ENTRYIDS, +// }; +// +// - global id tags include a stub group prefix because some of the +// stub names are used in more than one group (e.g. new_instance, +// forward_exception). arch specific stubgen stubs also include the +// arch name in the tag. +// +// - for shared stub entries we only need to allocate a single enum +// tag for most blobs since they have only one entry. However, we need +// to bump up the index by an extra 3 (or 5 with JVMCI included) when +// we are generating the deoptimization blob because it has 4 +// (respectively, 6) entries. So, in that case we allocate a single +// enum tag identifying the index of the first entry and a max tag +// identifying the index of the last entry +// +// - for stubgen stubs which employ an array of entries we allocate a +// single enum tag identifying the index of the first entry and a max +// tag identifying the index of the last entry e.g. for +// lookup_secondary_supers_table we generate +// +// . . . +// stubgen_lookup_secondary_supers_table_stubs_id, +// stubgen_lookup_secondary_supers_table_stubs_max = stubgen_lookup_secondary_supers_table_stubs_id + Klass::SECONDARY_SUPERS_TABLE_SIZE, +// . . . +// + +// macro to declare tags for shared entries with a base id for the +// first (and usually only) entry and a max id that identifies the +// last (usually same as first) entry in the blob, ensuring the entry +// for the next stub has the correct index. + +#define SHARED_DECLARE_TAG(name, type) \ + JOIN3(shared, name, id), \ + JOIN3(shared, name, max) = JOIN3(shared, name, id) + \ + type ::ENTRY_COUNT - 1, \ + +// macros to declare a tag for a C1 generated blob or a C2 generated +// blob, stub or JVMTI stub all of which have a single unique entry + +#define C1_DECLARE_TAG(name) \ + JOIN3(c1, name, id), \ + +#define C2_DECLARE_BLOB_TAG(name, type) \ + JOIN3(c2, name, id), \ + +#define C2_DECLARE_STUB_TAG(name, fancy_jump, pass_tls, return_pc) \ + JOIN3(c2, name, id), \ + +#define C2_DECLARE_JVMTI_STUB_TAG(name) \ + JOIN3(c2, name, id), \ + +// macros to declare a tag for a StubGen normal entry or initialized +// entry + +#define STUBGEN_DECLARE_TAG(blob_name, stub_name, \ + field_name, getter_name) \ + JOIN3(stubgen, field_name, id), \ + +#define STUBGEN_DECLARE_INIT_TAG(blob_name, stub_name, \ + field_name, getter_name, \ + init_function) \ + JOIN3(stubgen, field_name, id), \ + +// macro to declare a tag for a StubGen entry array. this macro +// declares a base id for the first entry then a max id that +// identifies the last entry in the array, ensuring the entry for the +// next stub has the correct index. + +#define STUBGEN_DECLARE_ARRAY_TAG(blob_name, stub_name, \ + field_name, getter_name, \ + count) \ + JOIN3(stubgen, field_name, id), \ + JOIN3(stubgen, field_name, max) = JOIN3(stubgen, field_name, id) + \ + count - 1, \ + +// macros to declare a tag for StubGen arch entries + +#define STUBGEN_DECLARE_ARCH_TAG(arch_name, blob_name, stub_name, \ + field_name, getter_name) \ + JOIN4(stubgen, arch_name, field_name, id), \ + +#define STUBGEN_DECLARE_ARCH_INIT_TAG(arch_name, blob_name, stub_name, \ + field_name, getter_name, \ + init_function) \ + JOIN4(stubgen, arch_name, field_name, id), \ + +// the above macros are enough to declare the enum + +enum class EntryId : int { + NO_ENTRYID = -1, + // declare an enum tag for each shared runtime blob + SHARED_STUBS_DO(SHARED_DECLARE_TAG) + // declare an enum tag for each c1 runtime blob + C1_STUBS_DO(C1_DECLARE_TAG) + // declare an enum tag for each opto runtime blob or stub + C2_STUBS_DO(C2_DECLARE_BLOB_TAG, + C2_DECLARE_STUB_TAG, + C2_DECLARE_JVMTI_STUB_TAG) + // declare an enum tag for each stubgen entry or, in the case of an + // array of entries for the first and last entries. + STUBGEN_ALL_ENTRIES_DO(STUBGEN_DECLARE_TAG, + STUBGEN_DECLARE_INIT_TAG, + STUBGEN_DECLARE_ARRAY_TAG, + STUBGEN_DECLARE_ARCH_TAG, + STUBGEN_DECLARE_ARCH_INIT_TAG) + NUM_ENTRYIDS +}; + +#undef SHARED_DECLARE_TAG +#undef C1_DECLARE_TAG +#undef C2_DECLARE_BLOB_TAG +#undef C2_DECLARE_STUB_TAG +#undef C2_DECLARE_JVMTI_STUB_TAG +#undef STUBGEN_DECLARE_TAG +#undef STUBGEN_DECLARE_INIT_TAG +#undef STUBGEN_DECLARE_ARRAY_TAG +#undef STUBGEN_DECLARE_ARCH_TAG +#undef STUBGEN_DECLARE_ARCH_INIT_TAG + +// we need static init expressions for blob, stub and entry counts in +// each stubgroup + +#define SHARED_STUB_COUNT_INITIALIZER \ + 0 SHARED_STUBS_DO(COUNT2) + +#define SHARED_ENTRY_COUNT_INITIALIZER \ + 0 SHARED_STUBS_DO(SHARED_COUNT2) + +#define C1_STUB_COUNT_INITIALIZER \ + 0 C1_STUBS_DO(COUNT1) + +#define C2_STUB_COUNT_INITIALIZER \ + 0 C2_STUBS_DO(COUNT2, COUNT4, COUNT1) + +#define STUBGEN_BLOB_COUNT_INITIALIZER \ + 0 STUBGEN_BLOBS_DO(COUNT1) + +#define STUBGEN_STUB_COUNT_INITIALIZER \ + 0 STUBGEN_STUBS_DO(COUNT2) + +#define STUBGEN_ENTRY_COUNT_INITIALIZER \ + 0 STUBGEN_ALL_ENTRIES_DO(COUNT4, COUNT5, \ + STUBGEN_COUNT5, \ + COUNT5, COUNT6) + +// Declare management class StubInfo + +class StubInfo: AllStatic { +private: + // element types for tables recording stubgroup, blob, stub and + // entry properties and relationships + + // map each stubgroup to its initial and final blobs + struct GroupDetails { + BlobId _base; // first blob id belonging to stub group + BlobId _max; // last blob id belonging to stub group + // some stubs have no entries so we have to explicitly track the + // first and last entry associated with the group rather than + // deriving it from the first and last blob/stub pair + EntryId _entry_base; // first entry id belonging to stub + EntryId _entry_max; // last entry id belonging to stub + const char* _name; // name of stubgroup + }; + + // a blob table element enables the stub group of a guven blob to be + // identified and all stubs within the blob to be identified + // + // invariant: the number of stubs in a blob must be 1 unless the + // blob belongs to the StubGen stub group + + struct BlobDetails { + StubGroup _group; // stub group to which blob belongs + StubId _base; // first stub id belonging to blob + StubId _max; // last stub id belonging to blob + // some stubs have no entries so we have to explicitly track the + // first and last entry associated with the blob rather than + // deriving it from the first and last stub + EntryId _entry_base; // first entry id belonging to stub + EntryId _entry_max; // last entry id belonging to stub + const char* _name; // name of blob + }; + + // a stub table element enables the blob of a given stub to be + // identified and all entries within the stub to be identified + // + // invariant: the number of entries in a blob must be 1 unless the + // blob belongs to the StubGen group or the Shared stub group + + struct StubDetails { + BlobId _blob; // blob to which stub belongs + EntryId _base; // first entry id belonging to stub + EntryId _max; // last entry id belonging to stub + bool _is_entry_array; // true iff stub has array of entries + const char* _name; // name of stub + }; + + // a stub table element enables the blob of a given stub to be + // identified and all entries within the stub to be identified + // + // invariant: the number of entries in a blob must be 1 unless the + // blob belongs to the StubGen group or the Shared stub group + + struct EntryDetails { + StubId _stub; // stub to which the entry belongs + EntryId _array_base; // base entry id for entry array stubs + const char* _name; // name of stub + }; + + // tables are sized and indexed using the global ids + static const int GROUP_TABLE_SIZE = static_cast(StubGroup::NUM_STUBGROUPS); + static const int BLOB_TABLE_SIZE = static_cast(BlobId::NUM_BLOBIDS); + static const int STUB_TABLE_SIZE = static_cast(StubId::NUM_STUBIDS); + static const int ENTRY_TABLE_SIZE = static_cast(EntryId::NUM_ENTRYIDS); + + static struct GroupDetails _group_table[GROUP_TABLE_SIZE]; + static struct BlobDetails _blob_table[BLOB_TABLE_SIZE]; + static struct StubDetails _stub_table[STUB_TABLE_SIZE]; + static struct EntryDetails _entry_table[ENTRY_TABLE_SIZE]; + + // helpers to access table elements using enums as indices + static struct GroupDetails& group_details(StubGroup g); + static struct BlobDetails& blob_details(BlobId b); + static struct StubDetails& stub_details(StubId s); + static struct EntryDetails& entry_details(EntryId e); + + // helpers for counting entries/stubs in a given stub/blob + + static int span(EntryId second, EntryId first); + static int span(StubId second, StubId first); + static int span(BlobId second, BlobId first); + + // helper for testing whether a blob, stub or entry lies in a + // specific stubgroup + static bool has_group(BlobId id, StubGroup group); + static bool has_group(StubId id, StubGroup group); + static bool has_group(EntryId id, StubGroup group); + + // helpers for computing blob, stub or entry offsets within + // a specific stub group + + static int local_offset(StubGroup group, BlobId id); + static int local_offset(StubGroup group, StubId id); + static int local_offset(StubGroup group, EntryId id); + + // implementation of methods used to populate the stubgroup, blob, + // stub and entry tables + static void process_shared_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry, + EntryId declaredMax); + static void process_c1_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry); + static void process_c2_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry); + static void process_stubgen_blob(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob); + static void process_stubgen_stub(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub); + static void process_stubgen_entry(StubGroup& group_cursor, + BlobId& blob_cursor, + StubId& stub_cursor, + EntryId& entry_cursor, + const char* name, + BlobId declaredBlob, + StubId declaredStub, + EntryId declaredEntry, + int arrayCount); + + static void dump_group_table(LogStream& ls); + static void dump_blob_table(LogStream& ls); + static void dump_stub_table(LogStream& ls); + static void dump_entry_table(LogStream& ls); + + static void verify_stub_tables(); +public: + + // Define statically sized counts for blobs, stubs and entries in + // each stub group. n.b. we omit cases where the blob or entry count + // equals the stub count. + static const int SHARED_STUB_COUNT = SHARED_STUB_COUNT_INITIALIZER; + static const int SHARED_ENTRY_COUNT = SHARED_ENTRY_COUNT_INITIALIZER; + + static const int C1_STUB_COUNT = C1_STUB_COUNT_INITIALIZER; + + static const int C2_STUB_COUNT = C2_STUB_COUNT_INITIALIZER; + + static const int STUBGEN_STUB_COUNT = STUBGEN_STUB_COUNT_INITIALIZER; + static const int STUBGEN_BLOB_COUNT = STUBGEN_BLOB_COUNT_INITIALIZER; + static const int STUBGEN_ENTRY_COUNT = STUBGEN_ENTRY_COUNT_INITIALIZER; + + // init method called from a static initializer + static void populate_stub_tables(); + // for logging + static void dump_tables(LogStream& ls); + + // helpers to step through blob, stub or entry enum sequences. + // input id may be NO_BLOB/STUB/ENTRYID. returned id may be + // NUM_BLOB/STUB/ENTRYIDs + static BlobId next(BlobId id); + static StubId next(StubId id); + static EntryId next(EntryId id); + + // helpers to step through blob/stub/entry enum sequence within + // (respectively) the enclosing group/blob/stub. returned id will be + // a valid blob/stub/entry id or NO_BLOB/STUB/ENTRYID if the + // group/blob/stub contains no more stubs/entries. + static BlobId next_in_group(StubGroup stub_group, BlobId blob_id); + static StubId next_in_blob(BlobId blob_id, StubId stub_id); + static EntryId next_in_stub(StubId stub_id, EntryId entry_id); + +#ifdef ASSERT + // helpers to check sequencing of blobs stubs and entries + static bool is_next(BlobId second, BlobId first); + static bool is_next(StubId second, StubId first); + static bool is_next(EntryId second, EntryId first); +#endif // ASSERT + + // name retrieval + static const char* name(StubGroup stub_group); + static const char* name(BlobId id); + static const char* name(StubId id); + static const char* name(EntryId id); + + // Global Group/Blob/Stub/Entry Id Hierarchy Traversal: + + // traverse up + + static StubGroup stubgroup(EntryId id); + static StubGroup stubgroup(BlobId id); + static StubGroup stubgroup(StubId id); + + static StubId stub(EntryId id); + static BlobId blob(EntryId id); + static BlobId blob(StubId id); + + // traverse down + + static BlobId blob_base(StubGroup stub_group); + static BlobId blob_max(StubGroup stub_group); + static int blob_count(StubGroup stub_group); + + static StubId stub_base(StubGroup stub_group); + static StubId stub_max(StubGroup stub_group); + static int stub_count(StubGroup stub_group); + + static EntryId entry_base(StubGroup stub_group); + static EntryId entry_max(StubGroup stub_group); + static int entry_count(StubGroup stub_group); + + static StubId stub_base(BlobId id); + static StubId stub_max(BlobId id); + static int stub_count(BlobId id); + + static EntryId entry_base(BlobId id); + static EntryId entry_max(BlobId id); + static int entry_count(BlobId id); + + static EntryId entry_base(StubId id); + static EntryId entry_max(StubId id); + static int entry_count(StubId id); + + // Global <-> Local Id Management: + + // check that a blob/stub belongs to an expected stub group + + static bool is_shared(StubId id); + static bool is_c1(StubId id); + static bool is_c2(StubId id); + static bool is_stubgen(StubId id); + + static bool is_shared(BlobId id); + static bool is_c1(BlobId id); + static bool is_c2(BlobId id); + static bool is_stubgen(BlobId id); + + // Convert a stub id to a unique, zero-based offset in the range of + // stub ids for a given stub group. + + static int shared_offset(StubId id); + static int c1_offset(StubId id); + static int c2_offset(StubId id); + static int stubgen_offset(StubId id); +}; + + +#endif // SHARE_RUNTIME_STUBINFO_HPP diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 256d51bf5c2..2c50fe50915 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -48,141 +48,61 @@ address UnsafeMemoryAccess::_common_exit_stub_pc = nullptr; // Implementation of StubRoutines - for a description of how to // declare new blobs, stubs and entries , see stubDefinitions.hpp. -// define arrays to hold stub and blob names - -// use a template to generate the initializer for the blob names array - -#define DEFINE_BLOB_NAME(blob_name) \ - # blob_name, - -const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = { - STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME) -}; - -#undef DEFINE_BLOB_NAME - -#define DEFINE_STUB_NAME(blob_name, stub_name) \ - # stub_name , \ - -// use a template to generate the initializer for the stub names array -const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = { - STUBGEN_STUBS_DO(DEFINE_STUB_NAME) -}; - -#undef DEFINE_STUB_NAME - // Define fields used to store blobs -#define DEFINE_BLOB_FIELD(blob_name) \ +#define DEFINE_STUBGEN_BLOB_FIELD(blob_name) \ BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr; -STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD) +STUBGEN_BLOBS_DO(DEFINE_STUBGEN_BLOB_FIELD) -#undef DEFINE_BLOB_FIELD +#undef DEFINE_STUBGEN_BLOB_FIELD -// Define fields used to store stub entries +// Define fields used to store stubgen stub entries -#define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \ +#define DEFINE_STUBGEN_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \ address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr; -#define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ +#define DEFINE_STUBGEN_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \ address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function); -#define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ +#define DEFINE_STUBGEN_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \ address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr }; -STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY) +STUBGEN_ENTRIES_DO(DEFINE_STUBGEN_ENTRY_FIELD, DEFINE_STUBGEN_ENTRY_FIELD_INIT, DEFINE_STUBGEN_ENTRY_FIELD_ARRAY) -#undef DEFINE_ENTRY_FIELD_ARRAY -#undef DEFINE_ENTRY_FIELD_INIT -#undef DEFINE_ENTRY_FIELD +#undef DEFINE_STUBGEN_ENTRY_FIELD_ARRAY +#undef DEFINE_STUBGEN_ENTRY_FIELD_INIT +#undef DEFINE_STUBGEN_ENTRY_FIELD jint StubRoutines::_verify_oop_count = 0; address StubRoutines::_string_indexof_array[4] = { nullptr }; -const char* StubRoutines::get_blob_name(StubGenBlobId id) { - assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id"); - return _blob_names[id]; +const char* StubRoutines::get_blob_name(BlobId id) { + assert(StubInfo::is_stubgen(id), "not a stubgen blob %s", StubInfo::name(id)); + return StubInfo::name(id); } -const char* StubRoutines::get_stub_name(StubGenStubId id) { - assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id"); - return _stub_names[id]; +const char* StubRoutines::get_stub_name(StubId id) { + assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id)); + return StubInfo::name(id); } #ifdef ASSERT +// translate a stub id to an associated blob id while checking that it +// is a stubgen stub -// array holding start and end indices for stub ids associated with a -// given blob. Given a blob with id (StubGenBlobId) blob_id for any -// stub with id (StubGenStubId) stub_id declared within the blob: -// _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1] - -static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1]; - -// macro used to compute blob limits -#define BLOB_COUNT(blob_name) \ - counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name; \ - _blob_limits[++index] = counter; \ - -// macro that checks stubs are associated with the correct blobs -#define STUB_VERIFY(blob_name, stub_name) \ - localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \ - globalStubId = (int) (StubGenStubId:: stub_name ## _id); \ - blobId = (int) (StubGenBlobId:: blob_name ## _id); \ - assert((globalStubId >= _blob_limits[blobId] && \ - globalStubId < _blob_limits[blobId+1]), \ - "stub " # stub_name " uses incorrect blob name " # blob_name); \ - assert(globalStubId == _blob_limits[blobId] + localStubId, \ - "stub " # stub_name " id found at wrong offset!"); \ - -bool verifyStubIds() { - // first compute the blob limits - int counter = 0; - int index = 0; - // populate offsets table with cumulative total of local enum counts - STUBGEN_BLOBS_DO(BLOB_COUNT); - - // ensure 1) global stub ids lie in the range of the associated blob - // and 2) each blob's base + local stub id == global stub id - int globalStubId, blobId, localStubId; - STUBGEN_STUBS_DO(STUB_VERIFY); - return true; -} - -#undef BLOB_COUNT -#undef STUB_VERIFY - -// ensure we verify the blob ids when this compile unit is first entered -bool _verified_stub_ids = verifyStubIds(); - - -// macro used by stub to blob translation - -#define BLOB_CHECK_OFFSET(blob_name) \ - if (id < _blob_limits[((int)blobId) + 1]) { return blobId; } \ - blobId = StubGenBlobId:: blob_name ## _id; \ - -// translate a global stub id to an associated blob id based on the -// computed blob limits - -StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) { - int id = (int)stubId; - assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!"); - // start with no blob to catch stub id == -1 - StubGenBlobId blobId = StubGenBlobId::NO_BLOBID; - STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET); - // if we reach here we should have the last blob id - assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id"); - return blobId; +BlobId StubRoutines::stub_to_blob(StubId id) { + assert(StubInfo::is_stubgen(id), "not a stubgen stub %s", StubInfo::name(id)); + return StubInfo::blob(id); } #endif // ASSERT // Initialization -extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators +extern void StubGenerator_generate(CodeBuffer* code, BlobId blob_id); // only interface to generators void UnsafeMemoryAccess::create_table(int max_size) { UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size]; @@ -212,11 +132,12 @@ address UnsafeMemoryAccess::page_error_continue_pc(address pc) { } -static BufferBlob* initialize_stubs(StubGenBlobId blob_id, +static BufferBlob* initialize_stubs(BlobId blob_id, int code_size, int max_aligned_stubs, const char* timer_msg, const char* buffer_name, const char* assert_msg) { + assert(StubInfo::is_stubgen(blob_id), "not a stubgen blob %s", StubInfo::name(blob_id)); ResourceMark rm; if (code_size == 0) { LogTarget(Info, stubs) lt; @@ -252,11 +173,11 @@ static BufferBlob* initialize_stubs(StubGenBlobId blob_id, #define DEFINE_BLOB_INIT_METHOD(blob_name) \ void StubRoutines::initialize_ ## blob_name ## _stubs() { \ if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) { \ - StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name); \ + BlobId blob_id = BlobId:: JOIN3(stubgen, blob_name, id); \ int size = _ ## blob_name ## _code_size; \ int max_aligned_size = 10; \ const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \ - const char* name = "StubRoutines (" # blob_name "stubs)"; \ + const char* name = "StubRoutines (" # blob_name " stubs)"; \ const char* assert_msg = "_" # blob_name "_code_size"; \ STUBGEN_BLOB_FIELD_NAME(blob_name) = \ initialize_stubs(blob_id, size, max_aligned_size, timer_msg, \ @@ -271,9 +192,9 @@ STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD) #define DEFINE_BLOB_INIT_FUNCTION(blob_name) \ -void blob_name ## _stubs_init() { \ - StubRoutines::initialize_ ## blob_name ## _stubs(); \ -} + void blob_name ## _stubs_init() { \ + StubRoutines::initialize_ ## blob_name ## _stubs(); \ + } STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION) @@ -283,7 +204,7 @@ STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION) * we generate the underlying driver method but this wrapper is needed * to perform special handling depending on where the compiler init * gets called from. it ought to be possible to remove this at some - * point and have adeterminate ordered init. + * point and have a determinate ordered init. */ void compiler_stubs_init(bool in_compiler_thread) { @@ -301,7 +222,6 @@ void compiler_stubs_init(bool in_compiler_thread) { } } - // // Default versions of arraycopy functions // diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index bd60b931f0c..b333d6d74b9 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -31,7 +31,7 @@ #include "runtime/frame.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/stubCodeGenerator.hpp" -#include "runtime/stubDeclarations.hpp" +#include "runtime/stubInfo.hpp" #include "runtime/threadWXSetters.inline.hpp" #include "utilities/macros.hpp" @@ -151,51 +151,6 @@ class UnsafeMemoryAccessMark : public StackObj { ~UnsafeMemoryAccessMark(); }; -// declare stubgen blob id enum - -#define BLOB_ENUM_DECLARE(blob_name) \ - STUB_ID_NAME(blob_name), - -enum StubGenBlobId : int { - NO_BLOBID = -1, - STUBGEN_BLOBS_DO(BLOB_ENUM_DECLARE) - NUM_BLOBIDS -}; - -#undef BLOB_ENUM_DECLARE - -// declare blob local stub id enums - -#define BLOB_LOCAL_ENUM_START(blob_name) \ - enum StubGenStubId_ ## blob_name { \ - NO_STUBID_ ## blob_name = -1, - -#define BLOB_LOCAL_ENUM_END(blob_name) \ - NUM_STUBIDS_ ## blob_name \ - }; - -#define BLOB_LOCAL_STUB_ENUM_DECLARE(blob_name, stub_name) \ - blob_name ## _ ## stub_name ## _id, - -STUBGEN_BLOBS_STUBS_DO(BLOB_LOCAL_ENUM_START, BLOB_LOCAL_ENUM_END, BLOB_LOCAL_STUB_ENUM_DECLARE) - -#undef BLOB_LOCAL_ENUM_START -#undef BLOB_LOCAL_ENUM_END -#undef BLOB_LOCAL_STUB_ENUM_DECLARE - -// declare global stub id enum - -#define STUB_ENUM_DECLARE(blob_name, stub_name) \ - STUB_ID_NAME(stub_name) , - -enum StubGenStubId : int { - NO_STUBID = -1, - STUBGEN_STUBS_DO(STUB_ENUM_DECLARE) - NUM_STUBIDS -}; - -#undef STUB_ENUM_DECLARE - class StubRoutines: AllStatic { public: @@ -208,17 +163,8 @@ public: #include CPU_HEADER(stubRoutines) -// declare blob and stub name storage and associated lookup methods - -private: - static bool _inited_names; - static const char* _blob_names[StubGenBlobId::NUM_BLOBIDS]; - static const char* _stub_names[StubGenStubId::NUM_STUBIDS]; - -public: - static bool init_names(); - static const char* get_blob_name(StubGenBlobId id); - static const char* get_stub_name(StubGenStubId id); + static const char* get_blob_name(BlobId id); + static const char* get_stub_name(StubId id); // declare blob fields @@ -329,8 +275,7 @@ public: #undef DEFINE_BLOB_GETTER #ifdef ASSERT - // provide a translation from stub id to its associated blob id - static StubGenBlobId stub_to_blob(StubGenStubId stubId); + static BlobId stub_to_blob(StubId id); #endif // Debugging