diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp index 32a7011ac26..405ac4b2310 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp @@ -333,9 +333,9 @@ int SaveLiveRegisters::iterate_over_register_mask(IterationAction action, int of } } else if (vm_reg->is_ConditionRegister()) { // NOP. Conditions registers are covered by save_LR_CR - } else if (vm_reg->is_VectorSRegister()) { + } else if (vm_reg->is_VectorRegister()) { assert(SuperwordUseVSX, "or should not reach here"); - VectorSRegister vs_reg = vm_reg->as_VectorSRegister(); + VectorSRegister vs_reg = (vm_reg->as_VectorRegister()).to_vsr(); if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) { reg_save_index += (2 + (reg_save_index & 1)); // 2 slots + alignment if needed diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 1504c977fc7..80a39610a4c 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -255,329 +255,168 @@ register %{ reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v // ---------------------------- -// Vector-Scalar Registers +// Vector Registers // ---------------------------- - // 1st 32 VSRs are aliases for the FPRs which are already defined above. - reg_def VSR0 (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad()); - reg_def VSR0_H (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad()); - reg_def VSR0_J (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad()); - reg_def VSR0_K (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad()); - reg_def VSR1 (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad()); - reg_def VSR1_H (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad()); - reg_def VSR1_J (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad()); - reg_def VSR1_K (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad()); + reg_def VR0 (SOC, SOC, Op_RegF, 0, VR0->as_VMReg() ); + reg_def VR0_H(SOC, SOC, Op_RegF, 0, VR0->as_VMReg()->next() ); + reg_def VR0_J(SOC, SOC, Op_RegF, 0, VR0->as_VMReg()->next(2)); + reg_def VR0_K(SOC, SOC, Op_RegF, 0, VR0->as_VMReg()->next(3)); - reg_def VSR2 (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad()); - reg_def VSR2_H (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad()); - reg_def VSR2_J (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad()); - reg_def VSR2_K (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad()); + reg_def VR1 (SOC, SOC, Op_RegF, 1, VR1->as_VMReg() ); + reg_def VR1_H(SOC, SOC, Op_RegF, 1, VR1->as_VMReg()->next() ); + reg_def VR1_J(SOC, SOC, Op_RegF, 1, VR1->as_VMReg()->next(2)); + reg_def VR1_K(SOC, SOC, Op_RegF, 1, VR1->as_VMReg()->next(3)); - reg_def VSR3 (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad()); - reg_def VSR3_H (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad()); - reg_def VSR3_J (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad()); - reg_def VSR3_K (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad()); + reg_def VR2 (SOC, SOC, Op_RegF, 2, VR2->as_VMReg() ); + reg_def VR2_H(SOC, SOC, Op_RegF, 2, VR2->as_VMReg()->next() ); + reg_def VR2_J(SOC, SOC, Op_RegF, 2, VR2->as_VMReg()->next(2)); + reg_def VR2_K(SOC, SOC, Op_RegF, 2, VR2->as_VMReg()->next(3)); - reg_def VSR4 (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad()); - reg_def VSR4_H (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad()); - reg_def VSR4_J (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad()); - reg_def VSR4_K (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad()); + reg_def VR3 (SOC, SOC, Op_RegF, 3, VR3->as_VMReg() ); + reg_def VR3_H(SOC, SOC, Op_RegF, 3, VR3->as_VMReg()->next() ); + reg_def VR3_J(SOC, SOC, Op_RegF, 3, VR3->as_VMReg()->next(2)); + reg_def VR3_K(SOC, SOC, Op_RegF, 3, VR3->as_VMReg()->next(3)); - reg_def VSR5 (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad()); - reg_def VSR5_H (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad()); - reg_def VSR5_J (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad()); - reg_def VSR5_K (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad()); + reg_def VR4 (SOC, SOC, Op_RegF, 4, VR4->as_VMReg() ); + reg_def VR4_H(SOC, SOC, Op_RegF, 4, VR4->as_VMReg()->next() ); + reg_def VR4_J(SOC, SOC, Op_RegF, 4, VR4->as_VMReg()->next(2)); + reg_def VR4_K(SOC, SOC, Op_RegF, 4, VR4->as_VMReg()->next(3)); - reg_def VSR6 (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad()); - reg_def VSR6_H (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad()); - reg_def VSR6_J (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad()); - reg_def VSR6_K (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad()); + reg_def VR5 (SOC, SOC, Op_RegF, 5, VR5->as_VMReg() ); + reg_def VR5_H(SOC, SOC, Op_RegF, 5, VR5->as_VMReg()->next() ); + reg_def VR5_J(SOC, SOC, Op_RegF, 5, VR5->as_VMReg()->next(2)); + reg_def VR5_K(SOC, SOC, Op_RegF, 5, VR5->as_VMReg()->next(3)); - reg_def VSR7 (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad()); - reg_def VSR7_H (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad()); - reg_def VSR7_J (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad()); - reg_def VSR7_K (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad()); + reg_def VR6 (SOC, SOC, Op_RegF, 6, VR6->as_VMReg() ); + reg_def VR6_H(SOC, SOC, Op_RegF, 6, VR6->as_VMReg()->next() ); + reg_def VR6_J(SOC, SOC, Op_RegF, 6, VR6->as_VMReg()->next(2)); + reg_def VR6_K(SOC, SOC, Op_RegF, 6, VR6->as_VMReg()->next(3)); - reg_def VSR8 (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad()); - reg_def VSR8_H (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad()); - reg_def VSR8_J (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad()); - reg_def VSR8_K (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad()); + reg_def VR7 (SOC, SOC, Op_RegF, 7, VR7->as_VMReg() ); + reg_def VR7_H(SOC, SOC, Op_RegF, 7, VR7->as_VMReg()->next() ); + reg_def VR7_J(SOC, SOC, Op_RegF, 7, VR7->as_VMReg()->next(2)); + reg_def VR7_K(SOC, SOC, Op_RegF, 7, VR7->as_VMReg()->next(3)); - reg_def VSR9 (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad()); - reg_def VSR9_H (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad()); - reg_def VSR9_J (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad()); - reg_def VSR9_K (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad()); + reg_def VR8 (SOC, SOC, Op_RegF, 8, VR8->as_VMReg() ); + reg_def VR8_H(SOC, SOC, Op_RegF, 8, VR8->as_VMReg()->next() ); + reg_def VR8_J(SOC, SOC, Op_RegF, 8, VR8->as_VMReg()->next(2)); + reg_def VR8_K(SOC, SOC, Op_RegF, 8, VR8->as_VMReg()->next(3)); - reg_def VSR10 (SOC, SOC, Op_RegF, 10, VMRegImpl::Bad()); - reg_def VSR10_H(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad()); - reg_def VSR10_J(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad()); - reg_def VSR10_K(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad()); + reg_def VR9 (SOC, SOC, Op_RegF, 9, VR9->as_VMReg() ); + reg_def VR9_H(SOC, SOC, Op_RegF, 9, VR9->as_VMReg()->next() ); + reg_def VR9_J(SOC, SOC, Op_RegF, 9, VR9->as_VMReg()->next(2)); + reg_def VR9_K(SOC, SOC, Op_RegF, 9, VR9->as_VMReg()->next(3)); - reg_def VSR11 (SOC, SOC, Op_RegF, 11, VMRegImpl::Bad()); - reg_def VSR11_H(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad()); - reg_def VSR11_J(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad()); - reg_def VSR11_K(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad()); + reg_def VR10 (SOC, SOC, Op_RegF, 10, VR10->as_VMReg() ); + reg_def VR10_H(SOC, SOC, Op_RegF, 10, VR10->as_VMReg()->next() ); + reg_def VR10_J(SOC, SOC, Op_RegF, 10, VR10->as_VMReg()->next(2)); + reg_def VR10_K(SOC, SOC, Op_RegF, 10, VR10->as_VMReg()->next(3)); - reg_def VSR12 (SOC, SOC, Op_RegF, 12, VMRegImpl::Bad()); - reg_def VSR12_H(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad()); - reg_def VSR12_J(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad()); - reg_def VSR12_K(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad()); + reg_def VR11 (SOC, SOC, Op_RegF, 11, VR11->as_VMReg() ); + reg_def VR11_H(SOC, SOC, Op_RegF, 11, VR11->as_VMReg()->next() ); + reg_def VR11_J(SOC, SOC, Op_RegF, 11, VR11->as_VMReg()->next(2)); + reg_def VR11_K(SOC, SOC, Op_RegF, 11, VR11->as_VMReg()->next(3)); - reg_def VSR13 (SOC, SOC, Op_RegF, 13, VMRegImpl::Bad()); - reg_def VSR13_H(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad()); - reg_def VSR13_J(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad()); - reg_def VSR13_K(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad()); + reg_def VR12 (SOC, SOC, Op_RegF, 12, VR12->as_VMReg() ); + reg_def VR12_H(SOC, SOC, Op_RegF, 12, VR12->as_VMReg()->next() ); + reg_def VR12_J(SOC, SOC, Op_RegF, 12, VR12->as_VMReg()->next(2)); + reg_def VR12_K(SOC, SOC, Op_RegF, 12, VR12->as_VMReg()->next(3)); - reg_def VSR14 (SOC, SOC, Op_RegF, 14, VMRegImpl::Bad()); - reg_def VSR14_H(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad()); - reg_def VSR14_J(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad()); - reg_def VSR14_K(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad()); + reg_def VR13 (SOC, SOC, Op_RegF, 13, VR13->as_VMReg() ); + reg_def VR13_H(SOC, SOC, Op_RegF, 13, VR13->as_VMReg()->next() ); + reg_def VR13_J(SOC, SOC, Op_RegF, 13, VR13->as_VMReg()->next(2)); + reg_def VR13_K(SOC, SOC, Op_RegF, 13, VR13->as_VMReg()->next(3)); - reg_def VSR15 (SOC, SOC, Op_RegF, 15, VMRegImpl::Bad()); - reg_def VSR15_H(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad()); - reg_def VSR15_J(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad()); - reg_def VSR15_K(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad()); + reg_def VR14 (SOC, SOC, Op_RegF, 14, VR14->as_VMReg() ); + reg_def VR14_H(SOC, SOC, Op_RegF, 14, VR14->as_VMReg()->next() ); + reg_def VR14_J(SOC, SOC, Op_RegF, 14, VR14->as_VMReg()->next(2)); + reg_def VR14_K(SOC, SOC, Op_RegF, 14, VR14->as_VMReg()->next(3)); - reg_def VSR16 (SOC, SOC, Op_RegF, 16, VMRegImpl::Bad()); - reg_def VSR16_H(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad()); - reg_def VSR16_J(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad()); - reg_def VSR16_K(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad()); + reg_def VR15 (SOC, SOC, Op_RegF, 15, VR15->as_VMReg() ); + reg_def VR15_H(SOC, SOC, Op_RegF, 15, VR15->as_VMReg()->next() ); + reg_def VR15_J(SOC, SOC, Op_RegF, 15, VR15->as_VMReg()->next(2)); + reg_def VR15_K(SOC, SOC, Op_RegF, 15, VR15->as_VMReg()->next(3)); - reg_def VSR17 (SOC, SOC, Op_RegF, 17, VMRegImpl::Bad()); - reg_def VSR17_H(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad()); - reg_def VSR17_J(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad()); - reg_def VSR17_K(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad()); + reg_def VR16 (SOC, SOC, Op_RegF, 16, VR16->as_VMReg() ); + reg_def VR16_H(SOC, SOC, Op_RegF, 16, VR16->as_VMReg()->next() ); + reg_def VR16_J(SOC, SOC, Op_RegF, 16, VR16->as_VMReg()->next(2)); + reg_def VR16_K(SOC, SOC, Op_RegF, 16, VR16->as_VMReg()->next(3)); - reg_def VSR18 (SOC, SOC, Op_RegF, 18, VMRegImpl::Bad()); - reg_def VSR18_H(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad()); - reg_def VSR18_J(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad()); - reg_def VSR18_K(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad()); + reg_def VR17 (SOC, SOC, Op_RegF, 17, VR17->as_VMReg() ); + reg_def VR17_H(SOC, SOC, Op_RegF, 17, VR17->as_VMReg()->next() ); + reg_def VR17_J(SOC, SOC, Op_RegF, 17, VR17->as_VMReg()->next(2)); + reg_def VR17_K(SOC, SOC, Op_RegF, 17, VR17->as_VMReg()->next(3)); - reg_def VSR19 (SOC, SOC, Op_RegF, 19, VMRegImpl::Bad()); - reg_def VSR19_H(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad()); - reg_def VSR19_J(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad()); - reg_def VSR19_K(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad()); + reg_def VR18 (SOC, SOC, Op_RegF, 18, VR18->as_VMReg() ); + reg_def VR18_H(SOC, SOC, Op_RegF, 18, VR18->as_VMReg()->next() ); + reg_def VR18_J(SOC, SOC, Op_RegF, 18, VR18->as_VMReg()->next(2)); + reg_def VR18_K(SOC, SOC, Op_RegF, 18, VR18->as_VMReg()->next(3)); - reg_def VSR20 (SOC, SOC, Op_RegF, 20, VMRegImpl::Bad()); - reg_def VSR20_H(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad()); - reg_def VSR20_J(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad()); - reg_def VSR20_K(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad()); + reg_def VR19 (SOC, SOC, Op_RegF, 19, VR19->as_VMReg() ); + reg_def VR19_H(SOC, SOC, Op_RegF, 19, VR19->as_VMReg()->next() ); + reg_def VR19_J(SOC, SOC, Op_RegF, 19, VR19->as_VMReg()->next(2)); + reg_def VR19_K(SOC, SOC, Op_RegF, 19, VR19->as_VMReg()->next(3)); - reg_def VSR21 (SOC, SOC, Op_RegF, 21, VMRegImpl::Bad()); - reg_def VSR21_H(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad()); - reg_def VSR21_J(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad()); - reg_def VSR21_K(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad()); + reg_def VR20 (SOC, SOE, Op_RegF, 20, VR20->as_VMReg() ); + reg_def VR20_H(SOC, SOE, Op_RegF, 20, VR20->as_VMReg()->next() ); + reg_def VR20_J(SOC, SOE, Op_RegF, 20, VR20->as_VMReg()->next(2)); + reg_def VR20_K(SOC, SOE, Op_RegF, 20, VR20->as_VMReg()->next(3)); - reg_def VSR22 (SOC, SOC, Op_RegF, 22, VMRegImpl::Bad()); - reg_def VSR22_H(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad()); - reg_def VSR22_J(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad()); - reg_def VSR22_K(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad()); + reg_def VR21 (SOC, SOE, Op_RegF, 21, VR21->as_VMReg() ); + reg_def VR21_H(SOC, SOE, Op_RegF, 21, VR21->as_VMReg()->next() ); + reg_def VR21_J(SOC, SOE, Op_RegF, 21, VR21->as_VMReg()->next(2)); + reg_def VR21_K(SOC, SOE, Op_RegF, 21, VR21->as_VMReg()->next(3)); - reg_def VSR23 (SOC, SOC, Op_RegF, 23, VMRegImpl::Bad()); - reg_def VSR23_H(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad()); - reg_def VSR23_J(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad()); - reg_def VSR23_K(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad()); + reg_def VR22 (SOC, SOE, Op_RegF, 22, VR22->as_VMReg() ); + reg_def VR22_H(SOC, SOE, Op_RegF, 22, VR22->as_VMReg()->next() ); + reg_def VR22_J(SOC, SOE, Op_RegF, 22, VR22->as_VMReg()->next(2)); + reg_def VR22_K(SOC, SOE, Op_RegF, 22, VR22->as_VMReg()->next(3)); - reg_def VSR24 (SOC, SOC, Op_RegF, 24, VMRegImpl::Bad()); - reg_def VSR24_H(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad()); - reg_def VSR24_J(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad()); - reg_def VSR24_K(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad()); + reg_def VR23 (SOC, SOE, Op_RegF, 23, VR23->as_VMReg() ); + reg_def VR23_H(SOC, SOE, Op_RegF, 23, VR23->as_VMReg()->next() ); + reg_def VR23_J(SOC, SOE, Op_RegF, 23, VR23->as_VMReg()->next(2)); + reg_def VR23_K(SOC, SOE, Op_RegF, 23, VR23->as_VMReg()->next(3)); - reg_def VSR25 (SOC, SOC, Op_RegF, 25, VMRegImpl::Bad()); - reg_def VSR25_H(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad()); - reg_def VSR25_J(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad()); - reg_def VSR25_K(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad()); + reg_def VR24 (SOC, SOE, Op_RegF, 24, VR24->as_VMReg() ); + reg_def VR24_H(SOC, SOE, Op_RegF, 24, VR24->as_VMReg()->next() ); + reg_def VR24_J(SOC, SOE, Op_RegF, 24, VR24->as_VMReg()->next(2)); + reg_def VR24_K(SOC, SOE, Op_RegF, 24, VR24->as_VMReg()->next(3)); - reg_def VSR26 (SOC, SOC, Op_RegF, 26, VMRegImpl::Bad()); - reg_def VSR26_H(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad()); - reg_def VSR26_J(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad()); - reg_def VSR26_K(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad()); + reg_def VR25 (SOC, SOE, Op_RegF, 25, VR25->as_VMReg() ); + reg_def VR25_H(SOC, SOE, Op_RegF, 25, VR25->as_VMReg()->next() ); + reg_def VR25_J(SOC, SOE, Op_RegF, 25, VR25->as_VMReg()->next(2)); + reg_def VR25_K(SOC, SOE, Op_RegF, 25, VR25->as_VMReg()->next(3)); - reg_def VSR27 (SOC, SOC, Op_RegF, 27, VMRegImpl::Bad()); - reg_def VSR27_H(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad()); - reg_def VSR27_J(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad()); - reg_def VSR27_K(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad()); + reg_def VR26 (SOC, SOE, Op_RegF, 26, VR26->as_VMReg() ); + reg_def VR26_H(SOC, SOE, Op_RegF, 26, VR26->as_VMReg()->next() ); + reg_def VR26_J(SOC, SOE, Op_RegF, 26, VR26->as_VMReg()->next(2)); + reg_def VR26_K(SOC, SOE, Op_RegF, 26, VR26->as_VMReg()->next(3)); - reg_def VSR28 (SOC, SOC, Op_RegF, 28, VMRegImpl::Bad()); - reg_def VSR28_H(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad()); - reg_def VSR28_J(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad()); - reg_def VSR28_K(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad()); + reg_def VR27 (SOC, SOE, Op_RegF, 27, VR27->as_VMReg() ); + reg_def VR27_H(SOC, SOE, Op_RegF, 27, VR27->as_VMReg()->next() ); + reg_def VR27_J(SOC, SOE, Op_RegF, 27, VR27->as_VMReg()->next(2)); + reg_def VR27_K(SOC, SOE, Op_RegF, 27, VR27->as_VMReg()->next(3)); - reg_def VSR29 (SOC, SOC, Op_RegF, 29, VMRegImpl::Bad()); - reg_def VSR29_H(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad()); - reg_def VSR29_J(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad()); - reg_def VSR29_K(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad()); + reg_def VR28 (SOC, SOE, Op_RegF, 28, VR28->as_VMReg() ); + reg_def VR28_H(SOC, SOE, Op_RegF, 28, VR28->as_VMReg()->next() ); + reg_def VR28_J(SOC, SOE, Op_RegF, 28, VR28->as_VMReg()->next(2)); + reg_def VR28_K(SOC, SOE, Op_RegF, 28, VR28->as_VMReg()->next(3)); - reg_def VSR30 (SOC, SOC, Op_RegF, 30, VMRegImpl::Bad()); - reg_def VSR30_H(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad()); - reg_def VSR30_J(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad()); - reg_def VSR30_K(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad()); + reg_def VR29 (SOC, SOE, Op_RegF, 29, VR29->as_VMReg() ); + reg_def VR29_H(SOC, SOE, Op_RegF, 29, VR29->as_VMReg()->next() ); + reg_def VR29_J(SOC, SOE, Op_RegF, 29, VR29->as_VMReg()->next(2)); + reg_def VR29_K(SOC, SOE, Op_RegF, 29, VR29->as_VMReg()->next(3)); - reg_def VSR31 (SOC, SOC, Op_RegF, 31, VMRegImpl::Bad()); - reg_def VSR31_H(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad()); - reg_def VSR31_J(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad()); - reg_def VSR31_K(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad()); + reg_def VR30 (SOC, SOE, Op_RegF, 30, VR30->as_VMReg() ); + reg_def VR30_H(SOC, SOE, Op_RegF, 30, VR30->as_VMReg()->next() ); + reg_def VR30_J(SOC, SOE, Op_RegF, 30, VR30->as_VMReg()->next(2)); + reg_def VR30_K(SOC, SOE, Op_RegF, 30, VR30->as_VMReg()->next(3)); - // 2nd 32 VSRs are aliases for the VRs which are only defined here. - reg_def VSR32 (SOC, SOC, Op_RegF, 32, VSR32->as_VMReg() ); - reg_def VSR32_H(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next() ); - reg_def VSR32_J(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next(2)); - reg_def VSR32_K(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next(3)); - - reg_def VSR33 (SOC, SOC, Op_RegF, 33, VSR33->as_VMReg() ); - reg_def VSR33_H(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next() ); - reg_def VSR33_J(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next(2)); - reg_def VSR33_K(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next(3)); - - reg_def VSR34 (SOC, SOC, Op_RegF, 34, VSR34->as_VMReg() ); - reg_def VSR34_H(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next() ); - reg_def VSR34_J(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next(2)); - reg_def VSR34_K(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next(3)); - - reg_def VSR35 (SOC, SOC, Op_RegF, 35, VSR35->as_VMReg() ); - reg_def VSR35_H(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next() ); - reg_def VSR35_J(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next(2)); - reg_def VSR35_K(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next(3)); - - reg_def VSR36 (SOC, SOC, Op_RegF, 36, VSR36->as_VMReg() ); - reg_def VSR36_H(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next() ); - reg_def VSR36_J(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next(2)); - reg_def VSR36_K(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next(3)); - - reg_def VSR37 (SOC, SOC, Op_RegF, 37, VSR37->as_VMReg() ); - reg_def VSR37_H(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next() ); - reg_def VSR37_J(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next(2)); - reg_def VSR37_K(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next(3)); - - reg_def VSR38 (SOC, SOC, Op_RegF, 38, VSR38->as_VMReg() ); - reg_def VSR38_H(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next() ); - reg_def VSR38_J(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next(2)); - reg_def VSR38_K(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next(3)); - - reg_def VSR39 (SOC, SOC, Op_RegF, 39, VSR39->as_VMReg() ); - reg_def VSR39_H(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next() ); - reg_def VSR39_J(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next(2)); - reg_def VSR39_K(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next(3)); - - reg_def VSR40 (SOC, SOC, Op_RegF, 40, VSR40->as_VMReg() ); - reg_def VSR40_H(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next() ); - reg_def VSR40_J(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next(2)); - reg_def VSR40_K(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next(3)); - - reg_def VSR41 (SOC, SOC, Op_RegF, 41, VSR41->as_VMReg() ); - reg_def VSR41_H(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next() ); - reg_def VSR41_J(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next(2)); - reg_def VSR41_K(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next(3)); - - reg_def VSR42 (SOC, SOC, Op_RegF, 42, VSR42->as_VMReg() ); - reg_def VSR42_H(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next() ); - reg_def VSR42_J(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next(2)); - reg_def VSR42_K(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next(3)); - - reg_def VSR43 (SOC, SOC, Op_RegF, 43, VSR43->as_VMReg() ); - reg_def VSR43_H(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next() ); - reg_def VSR43_J(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next(2)); - reg_def VSR43_K(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next(3)); - - reg_def VSR44 (SOC, SOC, Op_RegF, 44, VSR44->as_VMReg() ); - reg_def VSR44_H(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next() ); - reg_def VSR44_J(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next(2)); - reg_def VSR44_K(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next(3)); - - reg_def VSR45 (SOC, SOC, Op_RegF, 45, VSR45->as_VMReg() ); - reg_def VSR45_H(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next() ); - reg_def VSR45_J(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next(2)); - reg_def VSR45_K(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next(3)); - - reg_def VSR46 (SOC, SOC, Op_RegF, 46, VSR46->as_VMReg() ); - reg_def VSR46_H(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next() ); - reg_def VSR46_J(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next(2)); - reg_def VSR46_K(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next(3)); - - reg_def VSR47 (SOC, SOC, Op_RegF, 47, VSR47->as_VMReg() ); - reg_def VSR47_H(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next() ); - reg_def VSR47_J(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next(2)); - reg_def VSR47_K(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next(3)); - - reg_def VSR48 (SOC, SOC, Op_RegF, 48, VSR48->as_VMReg() ); - reg_def VSR48_H(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next() ); - reg_def VSR48_J(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next(2)); - reg_def VSR48_K(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next(3)); - - reg_def VSR49 (SOC, SOC, Op_RegF, 49, VSR49->as_VMReg() ); - reg_def VSR49_H(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next() ); - reg_def VSR49_J(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next(2)); - reg_def VSR49_K(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next(3)); - - reg_def VSR50 (SOC, SOC, Op_RegF, 50, VSR50->as_VMReg() ); - reg_def VSR50_H(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next() ); - reg_def VSR50_J(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next(2)); - reg_def VSR50_K(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next(3)); - - reg_def VSR51 (SOC, SOC, Op_RegF, 51, VSR51->as_VMReg() ); - reg_def VSR51_H(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next() ); - reg_def VSR51_J(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next(2)); - reg_def VSR51_K(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next(3)); - - reg_def VSR52 (SOC, SOE, Op_RegF, 52, VSR52->as_VMReg() ); - reg_def VSR52_H(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next() ); - reg_def VSR52_J(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next(2)); - reg_def VSR52_K(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next(3)); - - reg_def VSR53 (SOC, SOE, Op_RegF, 53, VSR53->as_VMReg() ); - reg_def VSR53_H(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next() ); - reg_def VSR53_J(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next(2)); - reg_def VSR53_K(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next(3)); - - reg_def VSR54 (SOC, SOE, Op_RegF, 54, VSR54->as_VMReg() ); - reg_def VSR54_H(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next() ); - reg_def VSR54_J(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next(2)); - reg_def VSR54_K(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next(3)); - - reg_def VSR55 (SOC, SOE, Op_RegF, 55, VSR55->as_VMReg() ); - reg_def VSR55_H(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next() ); - reg_def VSR55_J(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next(2)); - reg_def VSR55_K(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next(3)); - - reg_def VSR56 (SOC, SOE, Op_RegF, 56, VSR56->as_VMReg() ); - reg_def VSR56_H(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next() ); - reg_def VSR56_J(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next(2)); - reg_def VSR56_K(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next(3)); - - reg_def VSR57 (SOC, SOE, Op_RegF, 57, VSR57->as_VMReg() ); - reg_def VSR57_H(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next() ); - reg_def VSR57_J(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next(2)); - reg_def VSR57_K(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next(3)); - - reg_def VSR58 (SOC, SOE, Op_RegF, 58, VSR58->as_VMReg() ); - reg_def VSR58_H(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next() ); - reg_def VSR58_J(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next(2)); - reg_def VSR58_K(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next(3)); - - reg_def VSR59 (SOC, SOE, Op_RegF, 59, VSR59->as_VMReg() ); - reg_def VSR59_H(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next() ); - reg_def VSR59_J(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next(2)); - reg_def VSR59_K(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next(3)); - - reg_def VSR60 (SOC, SOE, Op_RegF, 60, VSR60->as_VMReg() ); - reg_def VSR60_H(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next() ); - reg_def VSR60_J(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next(2)); - reg_def VSR60_K(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next(3)); - - reg_def VSR61 (SOC, SOE, Op_RegF, 61, VSR61->as_VMReg() ); - reg_def VSR61_H(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next() ); - reg_def VSR61_J(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next(2)); - reg_def VSR61_K(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next(3)); - - reg_def VSR62 (SOC, SOE, Op_RegF, 62, VSR62->as_VMReg() ); - reg_def VSR62_H(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next() ); - reg_def VSR62_J(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next(2)); - reg_def VSR62_K(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next(3)); - - reg_def VSR63 (SOC, SOE, Op_RegF, 63, VSR63->as_VMReg() ); - reg_def VSR63_H(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next() ); - reg_def VSR63_J(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next(2)); - reg_def VSR63_K(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next(3)); + reg_def VR31 (SOC, SOE, Op_RegF, 31, VR31->as_VMReg() ); + reg_def VR31_H(SOC, SOE, Op_RegF, 31, VR31->as_VMReg()->next() ); + reg_def VR31_J(SOC, SOE, Op_RegF, 31, VR31->as_VMReg()->next(2)); + reg_def VR31_K(SOC, SOE, Op_RegF, 31, VR31->as_VMReg()->next(3)); // ---------------------------- // Specify priority of register selection within phases of register @@ -696,70 +535,38 @@ alloc_class chunk1 ( ); alloc_class chunk2 ( - VSR0 , VSR0_H , VSR0_J , VSR0_K , - VSR1 , VSR1_H , VSR1_J , VSR1_K , - VSR2 , VSR2_H , VSR2_J , VSR2_K , - VSR3 , VSR3_H , VSR3_J , VSR3_K , - VSR4 , VSR4_H , VSR4_J , VSR4_K , - VSR5 , VSR5_H , VSR5_J , VSR5_K , - VSR6 , VSR6_H , VSR6_J , VSR6_K , - VSR7 , VSR7_H , VSR7_J , VSR7_K , - VSR8 , VSR8_H , VSR8_J , VSR8_K , - VSR9 , VSR9_H , VSR9_J , VSR9_K , - VSR10, VSR10_H, VSR10_J, VSR10_K, - VSR11, VSR11_H, VSR11_J, VSR11_K, - VSR12, VSR12_H, VSR12_J, VSR12_K, - VSR13, VSR13_H, VSR13_J, VSR13_K, - VSR14, VSR14_H, VSR14_J, VSR14_K, - VSR15, VSR15_H, VSR15_J, VSR15_K, - VSR16, VSR16_H, VSR16_J, VSR16_K, - VSR17, VSR17_H, VSR17_J, VSR17_K, - VSR18, VSR18_H, VSR18_J, VSR18_K, - VSR19, VSR19_H, VSR19_J, VSR19_K, - VSR20, VSR20_H, VSR20_J, VSR20_K, - VSR21, VSR21_H, VSR21_J, VSR21_K, - VSR22, VSR22_H, VSR22_J, VSR22_K, - VSR23, VSR23_H, VSR23_J, VSR23_K, - VSR24, VSR24_H, VSR24_J, VSR24_K, - VSR25, VSR25_H, VSR25_J, VSR25_K, - VSR26, VSR26_H, VSR26_J, VSR26_K, - VSR27, VSR27_H, VSR27_J, VSR27_K, - VSR28, VSR28_H, VSR28_J, VSR28_K, - VSR29, VSR29_H, VSR29_J, VSR29_K, - VSR30, VSR30_H, VSR30_J, VSR30_K, - VSR31, VSR31_H, VSR31_J, VSR31_K, - VSR32, VSR32_H, VSR32_J, VSR32_K, - VSR33, VSR33_H, VSR33_J, VSR33_K, - VSR34, VSR34_H, VSR34_J, VSR34_K, - VSR35, VSR35_H, VSR35_J, VSR35_K, - VSR36, VSR36_H, VSR36_J, VSR36_K, - VSR37, VSR37_H, VSR37_J, VSR37_K, - VSR38, VSR38_H, VSR38_J, VSR38_K, - VSR39, VSR39_H, VSR39_J, VSR39_K, - VSR40, VSR40_H, VSR40_J, VSR40_K, - VSR41, VSR41_H, VSR41_J, VSR41_K, - VSR42, VSR42_H, VSR42_J, VSR42_K, - VSR43, VSR43_H, VSR43_J, VSR43_K, - VSR44, VSR44_H, VSR44_J, VSR44_K, - VSR45, VSR45_H, VSR45_J, VSR45_K, - VSR46, VSR46_H, VSR46_J, VSR46_K, - VSR47, VSR47_H, VSR47_J, VSR47_K, - VSR48, VSR48_H, VSR48_J, VSR48_K, - VSR49, VSR49_H, VSR49_J, VSR49_K, - VSR50, VSR50_H, VSR50_J, VSR50_K, - VSR51, VSR51_H, VSR51_J, VSR51_K, - VSR52, VSR52_H, VSR52_J, VSR52_K, - VSR53, VSR53_H, VSR53_J, VSR53_K, - VSR54, VSR54_H, VSR54_J, VSR54_K, - VSR55, VSR55_H, VSR55_J, VSR55_K, - VSR56, VSR56_H, VSR56_J, VSR56_K, - VSR57, VSR57_H, VSR57_J, VSR57_K, - VSR58, VSR58_H, VSR58_J, VSR58_K, - VSR59, VSR59_H, VSR59_J, VSR59_K, - VSR60, VSR60_H, VSR60_J, VSR60_K, - VSR61, VSR61_H, VSR61_J, VSR61_K, - VSR62, VSR62_H, VSR62_J, VSR62_K, - VSR63, VSR63_H, VSR63_J, VSR63_K + VR0 , VR0_H , VR0_J , VR0_K , + VR1 , VR1_H , VR1_J , VR1_K , + VR2 , VR2_H , VR2_J , VR2_K , + VR3 , VR3_H , VR3_J , VR3_K , + VR4 , VR4_H , VR4_J , VR4_K , + VR5 , VR5_H , VR5_J , VR5_K , + VR6 , VR6_H , VR6_J , VR6_K , + VR7 , VR7_H , VR7_J , VR7_K , + VR8 , VR8_H , VR8_J , VR8_K , + VR9 , VR9_H , VR9_J , VR9_K , + VR10, VR10_H, VR10_J, VR10_K, + VR11, VR11_H, VR11_J, VR11_K, + VR12, VR12_H, VR12_J, VR12_K, + VR13, VR13_H, VR13_J, VR13_K, + VR14, VR14_H, VR14_J, VR14_K, + VR15, VR15_H, VR15_J, VR15_K, + VR16, VR16_H, VR16_J, VR16_K, + VR17, VR17_H, VR17_J, VR17_K, + VR18, VR18_H, VR18_J, VR18_K, + VR19, VR19_H, VR19_J, VR19_K, + VR20, VR20_H, VR20_J, VR20_K, + VR21, VR21_H, VR21_J, VR21_K, + VR22, VR22_H, VR22_J, VR22_K, + VR23, VR23_H, VR23_J, VR23_K, + VR24, VR24_H, VR24_J, VR24_K, + VR25, VR25_H, VR25_J, VR25_K, + VR26, VR26_H, VR26_J, VR26_K, + VR27, VR27_H, VR27_J, VR27_K, + VR28, VR28_H, VR28_J, VR28_K, + VR29, VR29_H, VR29_J, VR29_K, + VR30, VR30_H, VR30_J, VR30_K, + VR31, VR31_H, VR31_J, VR31_K ); alloc_class chunk3 ( @@ -1163,39 +970,39 @@ reg_class dbl_reg( // Vector-Scalar Register Class // ---------------------------- -reg_class vs_reg( - VSR32, VSR32_H, VSR32_J, VSR32_K, - VSR33, VSR33_H, VSR33_J, VSR33_K, - VSR34, VSR34_H, VSR34_J, VSR34_K, - VSR35, VSR35_H, VSR35_J, VSR35_K, - VSR36, VSR36_H, VSR36_J, VSR36_K, - VSR37, VSR37_H, VSR37_J, VSR37_K, - VSR38, VSR38_H, VSR38_J, VSR38_K, - VSR39, VSR39_H, VSR39_J, VSR39_K, - VSR40, VSR40_H, VSR40_J, VSR40_K, - VSR41, VSR41_H, VSR41_J, VSR41_K, - VSR42, VSR42_H, VSR42_J, VSR42_K, - VSR43, VSR43_H, VSR43_J, VSR43_K, - VSR44, VSR44_H, VSR44_J, VSR44_K, - VSR45, VSR45_H, VSR45_J, VSR45_K, - VSR46, VSR46_H, VSR46_J, VSR46_K, - VSR47, VSR47_H, VSR47_J, VSR47_K, - VSR48, VSR48_H, VSR48_J, VSR48_K, - VSR49, VSR49_H, VSR49_J, VSR49_K, - VSR50, VSR50_H, VSR50_J, VSR50_K, - VSR51, VSR51_H, VSR51_J, VSR51_K, - VSR52, VSR52_H, VSR52_J, VSR52_K, // non-volatile - VSR53, VSR53_H, VSR53_J, VSR53_K, // non-volatile - VSR54, VSR54_H, VSR54_J, VSR54_K, // non-volatile - VSR55, VSR55_H, VSR55_J, VSR55_K, // non-volatile - VSR56, VSR56_H, VSR56_J, VSR56_K, // non-volatile - VSR57, VSR57_H, VSR57_J, VSR57_K, // non-volatile - VSR58, VSR58_H, VSR58_J, VSR58_K, // non-volatile - VSR59, VSR59_H, VSR59_J, VSR59_K, // non-volatile - VSR60, VSR60_H, VSR60_J, VSR60_K, // non-volatile - VSR61, VSR61_H, VSR61_J, VSR61_K, // non-volatile - VSR62, VSR62_H, VSR62_J, VSR62_K, // non-volatile - VSR63, VSR63_H, VSR63_J, VSR63_K // non-volatile +reg_class v_reg( + VR0 , VR0_H , VR0_J , VR0_K , + VR1 , VR1_H , VR1_J , VR1_K , + VR2 , VR2_H , VR2_J , VR2_K , + VR3 , VR3_H , VR3_J , VR3_K , + VR4 , VR4_H , VR4_J , VR4_K , + VR5 , VR5_H , VR5_J , VR5_K , + VR6 , VR6_H , VR6_J , VR6_K , + VR7 , VR7_H , VR7_J , VR7_K , + VR8 , VR8_H , VR8_J , VR8_K , + VR9 , VR9_H , VR9_J , VR9_K , + VR10, VR10_H, VR10_J, VR10_K, + VR11, VR11_H, VR11_J, VR11_K, + VR12, VR12_H, VR12_J, VR12_K, + VR13, VR13_H, VR13_J, VR13_K, + VR14, VR14_H, VR14_J, VR14_K, + VR15, VR15_H, VR15_J, VR15_K, + VR16, VR16_H, VR16_J, VR16_K, + VR17, VR17_H, VR17_J, VR17_K, + VR18, VR18_H, VR18_J, VR18_K, + VR19, VR19_H, VR19_J, VR19_K, + VR20, VR20_H, VR20_J, VR20_K, + VR21, VR21_H, VR21_J, VR21_K, + VR22, VR22_H, VR22_J, VR22_K, + VR23, VR23_H, VR23_J, VR23_K, + VR24, VR24_H, VR24_J, VR24_K, + VR25, VR25_H, VR25_J, VR25_K, + VR26, VR26_H, VR26_J, VR26_K, + VR27, VR27_H, VR27_J, VR27_K, + VR28, VR28_H, VR28_J, VR28_K, + VR29, VR29_H, VR29_J, VR29_K, + VR30, VR30_H, VR30_J, VR30_K, + VR31, VR31_H, VR31_J, VR31_K ); %} @@ -1908,9 +1715,9 @@ const Pipeline * MachEpilogNode::pipeline() const { // ============================================================================= -// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or +// Figure out which register class each belongs in: rc_int, rc_float, rc_vec or // rc_stack. -enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack }; +enum RC { rc_bad, rc_int, rc_float, rc_vec, rc_stack }; static enum RC rc_class(OptoReg::Name reg) { // Return the register class for the given register. The given register @@ -1924,12 +1731,12 @@ static enum RC rc_class(OptoReg::Name reg) { if (reg < ConcreteRegisterImpl::max_gpr) return rc_int; // We have 64 floating-point register halves, starting at index 64. - STATIC_ASSERT((int)ConcreteRegisterImpl::max_fpr == (int)MachRegisterNumbers::VSR0_num); + STATIC_ASSERT((int)ConcreteRegisterImpl::max_fpr == (int)MachRegisterNumbers::VR0_num); if (reg < ConcreteRegisterImpl::max_fpr) return rc_float; // We have 64 vector-scalar registers, starting at index 128. - STATIC_ASSERT((int)ConcreteRegisterImpl::max_vsr == (int)MachRegisterNumbers::CR0_num); - if (reg < ConcreteRegisterImpl::max_vsr) return rc_vs; + STATIC_ASSERT((int)ConcreteRegisterImpl::max_vr == (int)MachRegisterNumbers::CR0_num); + if (reg < ConcreteRegisterImpl::max_vr) return rc_vec; // Condition and special purpose registers are not allocated. We only accept stack from here. assert(OptoReg::is_stack(reg), "what else is it?"); @@ -2005,9 +1812,9 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r } size += 16; } - // VectorSRegister->Memory Spill. - else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) { - VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]); + // VectorRegister->Memory Spill. + else if (src_lo_rc == rc_vec && dst_lo_rc == rc_stack) { + VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr(); int dst_offset = ra_->reg2offset(dst_lo); if (PowerArchitecturePPC64 >= 9) { if (is_aligned(dst_offset, 16)) { @@ -2032,9 +1839,9 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r size += 8; } } - // Memory->VectorSRegister Spill. - else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) { - VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]); + // Memory->VectorRegister Spill. + else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vec) { + VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr(); int src_offset = ra_->reg2offset(src_lo); if (PowerArchitecturePPC64 >= 9) { if (is_aligned(src_offset, 16)) { @@ -2057,17 +1864,17 @@ uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *r size += 8; } } - // VectorSRegister->VectorSRegister. - else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) { - VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]); - VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]); + // VectorRegister->VectorRegister. + else if (src_lo_rc == rc_vec && dst_lo_rc == rc_vec) { + VectorSRegister Rsrc = as_VectorRegister(Matcher::_regEncode[src_lo]).to_vsr(); + VectorSRegister Rdst = as_VectorRegister(Matcher::_regEncode[dst_lo]).to_vsr(); if (masm) { __ xxlor(Rdst, Rsrc, Rsrc); } size += 4; } else { - ShouldNotReachHere(); // No VSR spill. + ShouldNotReachHere(); // No VR spill. } return size; } @@ -4048,7 +3855,7 @@ ins_attrib ins_is_late_expanded_null_check_candidate(false); // Formats are generated automatically for constants and base registers. operand vecX() %{ - constraint(ALLOC_IN_RC(vs_reg)); + constraint(ALLOC_IN_RC(v_reg)); match(VecX); format %{ %} @@ -5624,7 +5431,7 @@ instruct loadV16_Power8(vecX dst, indirect mem) %{ format %{ "LXVD2X $dst, $mem \t// load 16-byte Vector" %} size(4); ins_encode %{ - __ lxvd2x($dst$$VectorSRegister, $mem$$Register); + __ lxvd2x($dst$$VectorRegister.to_vsr(), $mem$$Register); %} ins_pipe(pipe_class_default); %} @@ -5637,7 +5444,7 @@ instruct loadV16_Power9(vecX dst, memoryAlg16 mem) %{ format %{ "LXV $dst, $mem \t// load 16-byte Vector" %} size(4); ins_encode %{ - __ lxv($dst$$VectorSRegister, $mem$$disp, $mem$$Register); + __ lxv($dst$$VectorRegister.to_vsr(), $mem$$disp, $mem$$Register); %} ins_pipe(pipe_class_default); %} @@ -6664,7 +6471,7 @@ instruct storeV16_Power8(indirect mem, vecX src) %{ format %{ "STXVD2X $mem, $src \t// store 16-byte Vector" %} size(4); ins_encode %{ - __ stxvd2x($src$$VectorSRegister, $mem$$Register); + __ stxvd2x($src$$VectorRegister.to_vsr(), $mem$$Register); %} ins_pipe(pipe_class_default); %} @@ -6677,7 +6484,7 @@ instruct storeV16_Power9(memoryAlg16 mem, vecX src) %{ format %{ "STXV $mem, $src \t// store 16-byte Vector" %} size(4); ins_encode %{ - __ stxv($src$$VectorSRegister, $mem$$disp, $mem$$Register); + __ stxv($src$$VectorRegister.to_vsr(), $mem$$disp, $mem$$Register); %} ins_pipe(pipe_class_default); %} @@ -12634,9 +12441,9 @@ instruct bytes_reverse_int_vec(iRegIdst dst, iRegIsrc src, vecX tmpV) %{ "\tMFVSRWZ $dst, $tmpV" %} ins_encode %{ - __ mtvsrwz($tmpV$$VectorSRegister, $src$$Register); - __ xxbrw($tmpV$$VectorSRegister, $tmpV$$VectorSRegister); - __ mfvsrwz($dst$$Register, $tmpV$$VectorSRegister); + __ mtvsrwz($tmpV$$VectorRegister.to_vsr(), $src$$Register); + __ xxbrw($tmpV$$VectorRegister.to_vsr(), $tmpV$$VectorRegister->to_vsr()); + __ mfvsrwz($dst$$Register, $tmpV$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -12706,9 +12513,9 @@ instruct bytes_reverse_long_vec(iRegLdst dst, iRegLsrc src, vecX tmpV) %{ "\tMFVSRD $dst, $tmpV" %} ins_encode %{ - __ mtvsrd($tmpV$$VectorSRegister, $src$$Register); - __ xxbrd($tmpV$$VectorSRegister, $tmpV$$VectorSRegister); - __ mfvsrd($dst$$Register, $tmpV$$VectorSRegister); + __ mtvsrd($tmpV$$VectorRegister->to_vsr(), $src$$Register); + __ xxbrd($tmpV$$VectorRegister->to_vsr(), $tmpV$$VectorRegister->to_vsr()); + __ mfvsrd($dst$$Register, $tmpV$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -12947,7 +12754,7 @@ instruct mtvsrwz(vecX temp1, iRegIsrc src) %{ format %{ "MTVSRWZ $temp1, $src \t// Move to 16-byte register" %} size(4); ins_encode %{ - __ mtvsrwz($temp1$$VectorSRegister, $src$$Register); + __ mtvsrwz($temp1$$VectorRegister->to_vsr(), $src$$Register); %} ins_pipe(pipe_class_default); %} @@ -12958,7 +12765,7 @@ instruct xxspltw(vecX dst, vecX src, immI8 imm1) %{ format %{ "XXSPLTW $dst, $src, $imm1 \t// Splat word" %} size(4); ins_encode %{ - __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); + __ xxspltw($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr(), $imm1$$constant); %} ins_pipe(pipe_class_default); %} @@ -12969,7 +12776,7 @@ instruct xscvdpspn_regF(vecX dst, regF src) %{ format %{ "XSCVDPSPN $dst, $src \t// Convert scalar single precision to vector single precision" %} size(4); ins_encode %{ - __ xscvdpspn($dst$$VectorSRegister, $src$$FloatRegister->to_vsr()); + __ xscvdpspn($dst$$VectorRegister->to_vsr(), $src$$FloatRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13076,7 +12883,7 @@ instruct repl16B_immI0(vecX dst, immI_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate16B" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13089,7 +12896,7 @@ instruct repl16B_immIminus1(vecX dst, immI_minus1 src) %{ format %{ "XXLEQV $dst, $src \t// replicate16B" %} size(4); ins_encode %{ - __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxleqv($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13154,7 +12961,7 @@ instruct repl8S_immI0(vecX dst, immI_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate8S" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13167,7 +12974,7 @@ instruct repl8S_immIminus1(vecX dst, immI_minus1 src) %{ format %{ "XXLEQV $dst, $src \t// replicate8S" %} size(4); ins_encode %{ - __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxleqv($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13232,7 +13039,7 @@ instruct repl4I_immI0(vecX dst, immI_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate4I" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13245,7 +13052,7 @@ instruct repl4I_immIminus1(vecX dst, immI_minus1 src) %{ format %{ "XXLEQV $dst, $dst, $dst \t// replicate4I" %} size(4); ins_encode %{ - __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxleqv($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13302,7 +13109,7 @@ instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VADDUBM $dst,$src1,$src2\t// add packed16B" %} size(4); ins_encode %{ - __ vaddubm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vaddubm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13313,7 +13120,7 @@ instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VADDUHM $dst,$src1,$src2\t// add packed8S" %} size(4); ins_encode %{ - __ vadduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vadduhm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13324,7 +13131,7 @@ instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VADDUWM $dst,$src1,$src2\t// add packed4I" %} size(4); ins_encode %{ - __ vadduwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vadduwm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13335,7 +13142,7 @@ instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VADDFP $dst,$src1,$src2\t// add packed4F" %} size(4); ins_encode %{ - __ vaddfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vaddfp($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13346,7 +13153,7 @@ instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VADDUDM $dst,$src1,$src2\t// add packed2L" %} size(4); ins_encode %{ - __ vaddudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vaddudm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13357,7 +13164,7 @@ instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVADDDP $dst,$src1,$src2\t// add packed2D" %} size(4); ins_encode %{ - __ xvadddp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvadddp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13370,7 +13177,7 @@ instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VSUBUBM $dst,$src1,$src2\t// sub packed16B" %} size(4); ins_encode %{ - __ vsububm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vsububm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13381,7 +13188,7 @@ instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VSUBUHM $dst,$src1,$src2\t// sub packed8S" %} size(4); ins_encode %{ - __ vsubuhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vsubuhm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13392,7 +13199,7 @@ instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VSUBUWM $dst,$src1,$src2\t// sub packed4I" %} size(4); ins_encode %{ - __ vsubuwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vsubuwm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13403,7 +13210,7 @@ instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VSUBFP $dst,$src1,$src2\t// sub packed4F" %} size(4); ins_encode %{ - __ vsubfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vsubfp($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13414,7 +13221,7 @@ instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VSUBUDM $dst,$src1,$src2\t// sub packed2L" %} size(4); ins_encode %{ - __ vsubudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vsubudm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13425,7 +13232,7 @@ instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVSUBDP $dst,$src1,$src2\t// sub packed2D" %} size(4); ins_encode %{ - __ xvsubdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvsubdp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13440,8 +13247,8 @@ instruct vmul8S_reg(vecX dst, vecX src1, vecX src2, vecX tmp) %{ format %{ "VMLADDUHM $dst,$src1,$src2\t// mul packed8S" %} size(8); ins_encode %{ - __ vspltish($tmp$$VectorSRegister->to_vr(), 0); - __ vmladduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr(), $tmp$$VectorSRegister->to_vr()); + __ vspltish($tmp$$VectorRegister, 0); + __ vmladduhm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister, $tmp$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13452,7 +13259,7 @@ instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "VMULUWM $dst,$src1,$src2\t// mul packed4I" %} size(4); ins_encode %{ - __ vmuluwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vmuluwm($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13463,7 +13270,7 @@ instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVMULSP $dst,$src1,$src2\t// mul packed4F" %} size(4); ins_encode %{ - __ xvmulsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmulsp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13474,7 +13281,7 @@ instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVMULDP $dst,$src1,$src2\t// mul packed2D" %} size(4); ins_encode %{ - __ xvmuldp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmuldp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13487,7 +13294,7 @@ instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVDIVSP $dst,$src1,$src2\t// div packed4F" %} size(4); ins_encode %{ - __ xvdivsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvdivsp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13498,7 +13305,7 @@ instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{ format %{ "XVDIVDP $dst,$src1,$src2\t// div packed2D" %} size(4); ins_encode %{ - __ xvdivdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvdivdp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13513,10 +13320,10 @@ instruct vmin_reg(vecX dst, vecX src1, vecX src2) %{ BasicType bt = Matcher::vector_element_basic_type(this); switch (bt) { case T_INT: - __ vminsw($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vminsw($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); break; case T_LONG: - __ vminsd($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vminsd($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); break; default: ShouldNotReachHere(); @@ -13533,10 +13340,10 @@ instruct vmax_reg(vecX dst, vecX src1, vecX src2) %{ BasicType bt = Matcher::vector_element_basic_type(this); switch (bt) { case T_INT: - __ vmaxsw($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vmaxsw($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); break; case T_LONG: - __ vmaxsd($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vmaxsd($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); break; default: ShouldNotReachHere(); @@ -13550,7 +13357,7 @@ instruct vand(vecX dst, vecX src1, vecX src2) %{ size(4); format %{ "VAND $dst,$src1,$src2\t// and vectors" %} ins_encode %{ - __ vand($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vand($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13560,7 +13367,7 @@ instruct vor(vecX dst, vecX src1, vecX src2) %{ size(4); format %{ "VOR $dst,$src1,$src2\t// or vectors" %} ins_encode %{ - __ vor($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vor($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13570,7 +13377,7 @@ instruct vxor(vecX dst, vecX src1, vecX src2) %{ size(4); format %{ "VXOR $dst,$src1,$src2\t// xor vectors" %} ins_encode %{ - __ vxor($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr()); + __ vxor($dst$$VectorRegister, $src1$$VectorRegister, $src2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13588,8 +13395,8 @@ instruct reductionI_arith_logic(iRegIdst dst, iRegIsrc srcInt, vecX srcVec, vecX size(24); ins_encode %{ int opcode = this->ideal_Opcode(); - __ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorSRegister->to_vr(), - $tmp1$$VectorSRegister->to_vr(), $tmp2$$VectorSRegister->to_vr()); + __ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorRegister, + $tmp1$$VectorRegister, $tmp2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13604,8 +13411,8 @@ instruct reductionI_min_max(iRegIdst dst, iRegIsrc srcInt, vecX srcVec, vecX tmp size(28); ins_encode %{ int opcode = this->ideal_Opcode(); - __ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorSRegister->to_vr(), - $tmp1$$VectorSRegister->to_vr(), $tmp2$$VectorSRegister->to_vr()); + __ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorRegister, + $tmp1$$VectorRegister, $tmp2$$VectorRegister); %} ins_pipe(pipe_class_default); %} @@ -13618,7 +13425,7 @@ instruct vabs4F_reg(vecX dst, vecX src) %{ format %{ "XVABSSP $dst,$src\t// absolute packed4F" %} size(4); ins_encode %{ - __ xvabssp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvabssp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13629,7 +13436,7 @@ instruct vabs2D_reg(vecX dst, vecX src) %{ format %{ "XVABSDP $dst,$src\t// absolute packed2D" %} size(4); ins_encode %{ - __ xvabsdp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvabsdp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13666,13 +13473,13 @@ instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{ ins_encode %{ switch ($rmode$$constant) { case RoundDoubleModeNode::rmode_rint: - __ xvrdpic($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvrdpic($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); break; case RoundDoubleModeNode::rmode_floor: - __ xvrdpim($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvrdpim($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); break; case RoundDoubleModeNode::rmode_ceil: - __ xvrdpip($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvrdpip($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); break; default: ShouldNotReachHere(); @@ -13689,7 +13496,7 @@ instruct vneg4F_reg(vecX dst, vecX src) %{ format %{ "XVNEGSP $dst,$src\t// negate packed4F" %} size(4); ins_encode %{ - __ xvnegsp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvnegsp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13700,7 +13507,7 @@ instruct vneg2D_reg(vecX dst, vecX src) %{ format %{ "XVNEGDP $dst,$src\t// negate packed2D" %} size(4); ins_encode %{ - __ xvnegdp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvnegdp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13713,7 +13520,7 @@ instruct vsqrt4F_reg(vecX dst, vecX src) %{ format %{ "XVSQRTSP $dst,$src\t// sqrt packed4F" %} size(4); ins_encode %{ - __ xvsqrtsp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvsqrtsp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13724,7 +13531,7 @@ instruct vsqrt2D_reg(vecX dst, vecX src) %{ format %{ "XVSQRTDP $dst,$src\t// sqrt packed2D" %} size(4); ins_encode %{ - __ xvsqrtdp($dst$$VectorSRegister, $src$$VectorSRegister); + __ xvsqrtdp($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13740,16 +13547,16 @@ instruct vpopcnt_reg(vecX dst, vecX src) %{ BasicType bt = Matcher::vector_element_basic_type(this); switch (bt) { case T_BYTE: - __ vpopcntb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vpopcntb($dst$$VectorRegister, $src$$VectorRegister); break; case T_SHORT: - __ vpopcnth($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vpopcnth($dst$$VectorRegister, $src$$VectorRegister); break; case T_INT: - __ vpopcntw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vpopcntw($dst$$VectorRegister, $src$$VectorRegister); break; case T_LONG: - __ vpopcntd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vpopcntd($dst$$VectorRegister, $src$$VectorRegister); break; default: ShouldNotReachHere(); @@ -13766,16 +13573,16 @@ instruct vcount_leading_zeros_reg(vecX dst, vecX src) %{ BasicType bt = Matcher::vector_element_basic_type(this); switch (bt) { case T_BYTE: - __ vclzb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vclzb($dst$$VectorRegister, $src$$VectorRegister); break; case T_SHORT: - __ vclzh($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vclzh($dst$$VectorRegister, $src$$VectorRegister); break; case T_INT: - __ vclzw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vclzw($dst$$VectorRegister, $src$$VectorRegister); break; case T_LONG: - __ vclzd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vclzd($dst$$VectorRegister, $src$$VectorRegister); break; default: ShouldNotReachHere(); @@ -13792,16 +13599,16 @@ instruct vcount_trailing_zeros_reg(vecX dst, vecX src) %{ BasicType bt = Matcher::vector_element_basic_type(this); switch (bt) { case T_BYTE: - __ vctzb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vctzb($dst$$VectorRegister, $src$$VectorRegister); break; case T_SHORT: - __ vctzh($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vctzh($dst$$VectorRegister, $src$$VectorRegister); break; case T_INT: - __ vctzw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vctzw($dst$$VectorRegister, $src$$VectorRegister); break; case T_LONG: - __ vctzd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr()); + __ vctzd($dst$$VectorRegister, $src$$VectorRegister); break; default: ShouldNotReachHere(); @@ -13821,7 +13628,7 @@ instruct vfma4F(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvmaddasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmaddasp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13837,7 +13644,7 @@ instruct vfma4F_neg1(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvnmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvnmsubasp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13852,7 +13659,7 @@ instruct vfma4F_neg2(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmsubasp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13867,7 +13674,7 @@ instruct vfma2D(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvmaddadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmaddadp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13883,7 +13690,7 @@ instruct vfma2D_neg1(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvnmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvnmsubadp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13898,7 +13705,7 @@ instruct vfma2D_neg2(vecX dst, vecX src1, vecX src2) %{ size(4); ins_encode %{ assert(UseFMA, "Needs FMA instructions support."); - __ xvmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister); + __ xvmsubadp($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -13993,7 +13800,7 @@ instruct repl4F_immF0(vecX dst, immF_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate4F" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -14006,7 +13813,7 @@ instruct repl2D_reg_Ex(vecX dst, regD src) %{ format %{ "XXPERMDI $dst, $src, $src, 0 \t// Splat doubleword" %} size(4); ins_encode %{ - __ xxpermdi($dst$$VectorSRegister, $src$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr(), 0); + __ xxpermdi($dst$$VectorRegister->to_vsr(), $src$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr(), 0); %} ins_pipe(pipe_class_default); %} @@ -14019,7 +13826,7 @@ instruct repl2D_immD0(vecX dst, immD_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate2D" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -14031,7 +13838,7 @@ instruct mtvsrd(vecX dst, iRegLsrc src) %{ format %{ "MTVSRD $dst, $src \t// Move to 16-byte register" %} size(4); ins_encode %{ - __ mtvsrd($dst$$VectorSRegister, $src$$Register); + __ mtvsrd($dst$$VectorRegister->to_vsr(), $src$$Register); %} ins_pipe(pipe_class_default); %} @@ -14042,7 +13849,7 @@ instruct xxspltd(vecX dst, vecX src, immI8 zero) %{ format %{ "XXSPLATD $dst, $src, $zero \t// Splat doubleword" %} size(4); ins_encode %{ - __ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant); + __ xxpermdi($dst$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr(), $src$$VectorRegister->to_vsr(), $zero$$constant); %} ins_pipe(pipe_class_default); %} @@ -14053,7 +13860,7 @@ instruct xxpermdi(vecX dst, vecX src1, vecX src2, immI8 zero) %{ format %{ "XXPERMDI $dst, $src1, $src2, $zero \t// Splat doubleword" %} size(4); ins_encode %{ - __ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant); + __ xxpermdi($dst$$VectorRegister->to_vsr(), $src1$$VectorRegister->to_vsr(), $src2$$VectorRegister->to_vsr(), $zero$$constant); %} ins_pipe(pipe_class_default); %} @@ -14078,7 +13885,7 @@ instruct repl2L_immI0(vecX dst, immI_0 zero) %{ format %{ "XXLXOR $dst, $zero \t// replicate2L" %} size(4); ins_encode %{ - __ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxlxor($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} @@ -14091,7 +13898,7 @@ instruct repl2L_immIminus1(vecX dst, immI_minus1 src) %{ format %{ "XXLEQV $dst, $src \t// replicate2L" %} size(4); ins_encode %{ - __ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister); + __ xxleqv($dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr(), $dst$$VectorRegister->to_vsr()); %} ins_pipe(pipe_class_default); %} diff --git a/src/hotspot/cpu/ppc/register_ppc.hpp b/src/hotspot/cpu/ppc/register_ppc.hpp index b7949750dcc..2613d6c9822 100644 --- a/src/hotspot/cpu/ppc/register_ppc.hpp +++ b/src/hotspot/cpu/ppc/register_ppc.hpp @@ -321,6 +321,7 @@ class VectorRegister { // accessors constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; } + inline VMReg as_VMReg() const; // testers constexpr bool is_valid() const { return (0 <= _encoding && _encoding < number_of_registers); } @@ -392,7 +393,6 @@ class VectorSRegister { // accessors constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; } - inline VMReg as_VMReg() const; VectorSRegister successor() const { return VectorSRegister(encoding() + 1); } // testers @@ -484,8 +484,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl { enum { max_gpr = Register::number_of_registers * 2, max_fpr = max_gpr + FloatRegister::number_of_registers * 2, - max_vsr = max_fpr + VectorSRegister::number_of_registers * 4, - max_cnd = max_vsr + ConditionRegister::number_of_registers, + max_vr = max_fpr + VectorRegister::number_of_registers * 4, + max_cnd = max_vr + ConditionRegister::number_of_registers, max_spr = max_cnd + SpecialRegister::number_of_registers, // This number must be large enough to cover REG_COUNT (defined by c2) registers. // There is no requirement that any ordering here matches any ordering c2 gives diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp index 4ec2483b267..37d6c9e6d51 100644 --- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp @@ -111,13 +111,13 @@ class RegisterSaver { int_reg, float_reg, special_reg, - vs_reg + vec_reg } RegisterType; typedef enum { reg_size = 8, half_reg_size = reg_size / 2, - vs_reg_size = 16 + vec_reg_size = 16 } RegisterConstants; typedef struct { @@ -137,8 +137,8 @@ class RegisterSaver { #define RegisterSaver_LiveSpecialReg(regname) \ { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() } -#define RegisterSaver_LiveVSReg(regname) \ - { RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() } +#define RegisterSaver_LiveVecReg(regname) \ + { RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() } static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { // Live registers which get spilled to the stack. Register @@ -220,42 +220,42 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = { RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below) }; -static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = { +static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = { // - // live vector scalar registers (optional, only these ones are used by C2): + // live vector registers (optional, only these ones are used by C2): // - RegisterSaver_LiveVSReg( VSR32 ), - RegisterSaver_LiveVSReg( VSR33 ), - RegisterSaver_LiveVSReg( VSR34 ), - RegisterSaver_LiveVSReg( VSR35 ), - RegisterSaver_LiveVSReg( VSR36 ), - RegisterSaver_LiveVSReg( VSR37 ), - RegisterSaver_LiveVSReg( VSR38 ), - RegisterSaver_LiveVSReg( VSR39 ), - RegisterSaver_LiveVSReg( VSR40 ), - RegisterSaver_LiveVSReg( VSR41 ), - RegisterSaver_LiveVSReg( VSR42 ), - RegisterSaver_LiveVSReg( VSR43 ), - RegisterSaver_LiveVSReg( VSR44 ), - RegisterSaver_LiveVSReg( VSR45 ), - RegisterSaver_LiveVSReg( VSR46 ), - RegisterSaver_LiveVSReg( VSR47 ), - RegisterSaver_LiveVSReg( VSR48 ), - RegisterSaver_LiveVSReg( VSR49 ), - RegisterSaver_LiveVSReg( VSR50 ), - RegisterSaver_LiveVSReg( VSR51 ), - RegisterSaver_LiveVSReg( VSR52 ), - RegisterSaver_LiveVSReg( VSR53 ), - RegisterSaver_LiveVSReg( VSR54 ), - RegisterSaver_LiveVSReg( VSR55 ), - RegisterSaver_LiveVSReg( VSR56 ), - RegisterSaver_LiveVSReg( VSR57 ), - RegisterSaver_LiveVSReg( VSR58 ), - RegisterSaver_LiveVSReg( VSR59 ), - RegisterSaver_LiveVSReg( VSR60 ), - RegisterSaver_LiveVSReg( VSR61 ), - RegisterSaver_LiveVSReg( VSR62 ), - RegisterSaver_LiveVSReg( VSR63 ) + RegisterSaver_LiveVecReg( VR0 ), + RegisterSaver_LiveVecReg( VR1 ), + RegisterSaver_LiveVecReg( VR2 ), + RegisterSaver_LiveVecReg( VR3 ), + RegisterSaver_LiveVecReg( VR4 ), + RegisterSaver_LiveVecReg( VR5 ), + RegisterSaver_LiveVecReg( VR6 ), + RegisterSaver_LiveVecReg( VR7 ), + RegisterSaver_LiveVecReg( VR8 ), + RegisterSaver_LiveVecReg( VR9 ), + RegisterSaver_LiveVecReg( VR10 ), + RegisterSaver_LiveVecReg( VR11 ), + RegisterSaver_LiveVecReg( VR12 ), + RegisterSaver_LiveVecReg( VR13 ), + RegisterSaver_LiveVecReg( VR14 ), + RegisterSaver_LiveVecReg( VR15 ), + RegisterSaver_LiveVecReg( VR16 ), + RegisterSaver_LiveVecReg( VR17 ), + RegisterSaver_LiveVecReg( VR18 ), + RegisterSaver_LiveVecReg( VR19 ), + RegisterSaver_LiveVecReg( VR20 ), + RegisterSaver_LiveVecReg( VR21 ), + RegisterSaver_LiveVecReg( VR22 ), + RegisterSaver_LiveVecReg( VR23 ), + RegisterSaver_LiveVecReg( VR24 ), + RegisterSaver_LiveVecReg( VR25 ), + RegisterSaver_LiveVecReg( VR26 ), + RegisterSaver_LiveVecReg( VR27 ), + RegisterSaver_LiveVecReg( VR28 ), + RegisterSaver_LiveVecReg( VR29 ), + RegisterSaver_LiveVecReg( VR30 ), + RegisterSaver_LiveVecReg( VR31 ) }; @@ -277,10 +277,10 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble // calculate frame size const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / sizeof(RegisterSaver::LiveRegType); - const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / + const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) / sizeof(RegisterSaver::LiveRegType)) : 0; - const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; + const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size; const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes) + frame::native_abi_reg_args_size; @@ -298,8 +298,8 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble // Save some registers in the last (non-vector) slots of the new frame so we // can use them as scratch regs or to determine the return pc. - __ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); - __ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP); + __ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP); + __ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP); // save the flags // Do the save_LR by hand and adjust the return pc if requested. @@ -360,37 +360,37 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble // the utilized instructions (PowerArchitecturePPC64). assert(is_aligned(offset, StackAlignmentInBytes), "should be"); if (PowerArchitecturePPC64 >= 10) { - assert(is_even(vsregstosave_num), "expectation"); - for (int i = 0; i < vsregstosave_num; i += 2) { - int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; - assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); + assert(is_even(vecregstosave_num), "expectation"); + for (int i = 0; i < vecregstosave_num; i += 2) { + int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; + assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); - __ stxvp(as_VectorSRegister(reg_num), offset, R1_SP); + __ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); // Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad). if (generate_oop_map) { map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), - RegisterSaver_LiveVSRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg); - map->set_callee_saved(VMRegImpl::stack2reg((offset + vs_reg_size) >> 2), - RegisterSaver_LiveVSRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg); + RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg); + map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2), + RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg); } - offset += (2 * vs_reg_size); + offset += (2 * vec_reg_size); } } else { - for (int i = 0; i < vsregstosave_num; i++) { - int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; + for (int i = 0; i < vecregstosave_num; i++) { + int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; if (PowerArchitecturePPC64 >= 9) { - __ stxv(as_VectorSRegister(reg_num), offset, R1_SP); + __ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP); } else { __ li(R31, offset); - __ stxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); + __ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP); } // Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad). if (generate_oop_map) { - VMReg vsr = RegisterSaver_LiveVSRegs[i].vmreg; + VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg; map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr); } - offset += vs_reg_size; + offset += vec_reg_size; } } @@ -411,10 +411,10 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, bool save_vectors) { const int regstosave_num = sizeof(RegisterSaver_LiveRegs) / sizeof(RegisterSaver::LiveRegType); - const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) / + const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) / sizeof(RegisterSaver::LiveRegType)) : 0; - const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size; + const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size; const int register_save_offset = frame_size_in_bytes - register_save_size; @@ -456,26 +456,26 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, assert(is_aligned(offset, StackAlignmentInBytes), "should be"); if (PowerArchitecturePPC64 >= 10) { - for (int i = 0; i < vsregstosave_num; i += 2) { - int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; - assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); + for (int i = 0; i < vecregstosave_num; i += 2) { + int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; + assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!"); - __ lxvp(as_VectorSRegister(reg_num), offset, R1_SP); + __ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); - offset += (2 * vs_reg_size); + offset += (2 * vec_reg_size); } } else { - for (int i = 0; i < vsregstosave_num; i++) { - int reg_num = RegisterSaver_LiveVSRegs[i].reg_num; + for (int i = 0; i < vecregstosave_num; i++) { + int reg_num = RegisterSaver_LiveVecRegs[i].reg_num; if (PowerArchitecturePPC64 >= 9) { - __ lxv(as_VectorSRegister(reg_num), offset, R1_SP); + __ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP); } else { __ li(R31, offset); - __ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP); + __ lxvd2x(as_VectorRegister(reg_num).to_vsr(), R31, R1_SP); } - offset += vs_reg_size; + offset += vec_reg_size; } } @@ -486,7 +486,7 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm, __ mtlr(R31); // restore scratch register's value - __ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP); + __ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP); // pop the frame __ addi(R1_SP, R1_SP, frame_size_in_bytes); diff --git a/src/hotspot/cpu/ppc/vmreg_ppc.cpp b/src/hotspot/cpu/ppc/vmreg_ppc.cpp index 2ed68578a80..0edbf700ec6 100644 --- a/src/hotspot/cpu/ppc/vmreg_ppc.cpp +++ b/src/hotspot/cpu/ppc/vmreg_ppc.cpp @@ -47,7 +47,7 @@ void VMRegImpl::set_regName() { } VectorSRegister vsreg = ::as_VectorSRegister(0); - for ( ; i < ConcreteRegisterImpl::max_vsr; ) { + for ( ; i < ConcreteRegisterImpl::max_vr; ) { regName[i++] = vsreg->name(); regName[i++] = vsreg->name(); regName[i++] = vsreg->name(); diff --git a/src/hotspot/cpu/ppc/vmreg_ppc.hpp b/src/hotspot/cpu/ppc/vmreg_ppc.hpp index 4e25c8b3cea..194b5fd93ef 100644 --- a/src/hotspot/cpu/ppc/vmreg_ppc.hpp +++ b/src/hotspot/cpu/ppc/vmreg_ppc.hpp @@ -35,13 +35,13 @@ inline bool is_FloatRegister() { value() < ConcreteRegisterImpl::max_fpr; } -inline bool is_VectorSRegister() { +inline bool is_VectorRegister() { return value() >= ConcreteRegisterImpl::max_fpr && - value() < ConcreteRegisterImpl::max_vsr; + value() < ConcreteRegisterImpl::max_vr; } inline bool is_ConditionRegister() { - return value() >= ConcreteRegisterImpl::max_vsr && + return value() >= ConcreteRegisterImpl::max_vr && value() < ConcreteRegisterImpl::max_cnd; } @@ -60,15 +60,15 @@ inline FloatRegister as_FloatRegister() { return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1); } -inline VectorSRegister as_VectorSRegister() { - assert(is_VectorSRegister(), "must be"); - return ::as_VectorSRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2); +inline VectorRegister as_VectorRegister() { + assert(is_VectorRegister(), "must be"); + return ::as_VectorRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2); } inline bool is_concrete() { assert(is_reg(), "must be"); if (is_Register() || is_FloatRegister()) return is_even(value()); - if (is_VectorSRegister()) { + if (is_VectorRegister()) { int base = value() - ConcreteRegisterImpl::max_fpr; return (base & 3) == 0; } diff --git a/src/hotspot/cpu/ppc/vmreg_ppc.inline.hpp b/src/hotspot/cpu/ppc/vmreg_ppc.inline.hpp index 2424df8da01..a7810266b89 100644 --- a/src/hotspot/cpu/ppc/vmreg_ppc.inline.hpp +++ b/src/hotspot/cpu/ppc/vmreg_ppc.inline.hpp @@ -40,13 +40,13 @@ inline VMReg FloatRegister::as_VMReg() const { return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr); } -inline VMReg VectorSRegister::as_VMReg() const { +inline VMReg VectorRegister::as_VMReg() const { // Four halves, multiply by 4. return VMRegImpl::as_VMReg((encoding() << 2) + ConcreteRegisterImpl::max_fpr); } inline VMReg ConditionRegister::as_VMReg() const { - return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vsr); + return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vr); } inline VMReg SpecialRegister::as_VMReg() const {