From 2af4d20abfda4113a2bfcf34dfad87187c0f584d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Lund=C3=A9n?= Date: Tue, 21 Oct 2025 13:17:14 +0000 Subject: [PATCH] 8370031: Make RegMask copy constructor explicit and replace RegMask operator= with named function Reviewed-by: mhaessig, rcastanedalo --- src/hotspot/cpu/aarch64/aarch64.ad | 30 ++-- src/hotspot/cpu/arm/arm.ad | 16 +- src/hotspot/cpu/ppc/ppc.ad | 16 +- src/hotspot/cpu/riscv/riscv.ad | 30 ++-- src/hotspot/cpu/s390/s390.ad | 8 +- src/hotspot/cpu/x86/x86_64.ad | 42 +++--- src/hotspot/share/opto/chaitin.hpp | 2 +- src/hotspot/share/opto/divnode.cpp | 16 +- src/hotspot/share/opto/ifg.cpp | 4 +- src/hotspot/share/opto/matcher.cpp | 179 ++++++++++++----------- src/hotspot/share/opto/matcher.hpp | 8 +- src/hotspot/share/opto/postaloc.cpp | 2 +- src/hotspot/share/opto/regmask.hpp | 82 +++++------ test/hotspot/gtest/opto/test_regmask.cpp | 8 +- 14 files changed, 221 insertions(+), 222 deletions(-) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index 0cdf3c1b8b5..5734519301e 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -1266,20 +1266,20 @@ source %{ // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29) // registers conditionally reserved. - _ANY_REG32_mask = _ALL_REG32_mask; + _ANY_REG32_mask.assignFrom(_ALL_REG32_mask); _ANY_REG32_mask.remove(OptoReg::as_OptoReg(r31_sp->as_VMReg())); - _ANY_REG_mask = _ALL_REG_mask; + _ANY_REG_mask.assignFrom(_ALL_REG_mask); - _PTR_REG_mask = _ALL_REG_mask; + _PTR_REG_mask.assignFrom(_ALL_REG_mask); - _NO_SPECIAL_REG32_mask = _ALL_REG32_mask; + _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask); _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask); - _NO_SPECIAL_REG_mask = _ALL_REG_mask; + _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask); _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask); - _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask; + _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask); _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask); // r27 is not allocatable when compressed oops is on and heapbase is not @@ -1297,7 +1297,7 @@ source %{ _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg())); } - _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask; + _NO_SPECIAL_NO_RFP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask); _NO_SPECIAL_NO_RFP_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg())); } @@ -2545,27 +2545,27 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { return false; } -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODI projection of divmodI. -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for DIVL projection of divmodL. -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODL projection of divmodL. -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } bool size_fits_all_mem_uses(AddPNode* addp, int shift) { diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 68fece5263d..31a442be624 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -1131,27 +1131,27 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { } // Register for DIVI projection of divmodI -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODI projection of divmodI -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for DIVL projection of divmodL -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODL projection of divmodL -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } bool maybe_far_call(const CallNode *n) { diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index 2c83b2d5765..03dbd0e780b 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -2450,27 +2450,27 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { } // Register for DIVI projection of divmodI. -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODI projection of divmodI. -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for DIVL projection of divmodL. -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODL projection of divmodL. -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } %} diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 00364d7dab7..83c59af9113 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1092,22 +1092,22 @@ RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask; void reg_mask_init() { - _ANY_REG32_mask = _ALL_REG32_mask; + _ANY_REG32_mask.assignFrom(_ALL_REG32_mask); _ANY_REG32_mask.remove(OptoReg::as_OptoReg(x0->as_VMReg())); - _ANY_REG_mask = _ALL_REG_mask; + _ANY_REG_mask.assignFrom(_ALL_REG_mask); _ANY_REG_mask.subtract(_ZR_REG_mask); - _PTR_REG_mask = _ALL_REG_mask; + _PTR_REG_mask.assignFrom(_ALL_REG_mask); _PTR_REG_mask.subtract(_ZR_REG_mask); - _NO_SPECIAL_REG32_mask = _ALL_REG32_mask; + _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask); _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask); - _NO_SPECIAL_REG_mask = _ALL_REG_mask; + _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask); _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask); - _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask; + _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask); _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask); // x27 is not allocatable when compressed oops is on @@ -1124,7 +1124,7 @@ void reg_mask_init() { _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg())); } - _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask; + _NO_SPECIAL_NO_FP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask); _NO_SPECIAL_NO_FP_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg())); } @@ -2129,27 +2129,27 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { return false; } -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODI projection of divmodI. -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for DIVL projection of divmodL. -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } // Register for MODL projection of divmodL. -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { ShouldNotReachHere(); - return RegMask(); + return RegMask::EMPTY; } bool size_fits_all_mem_uses(AddPNode* addp, int shift) { diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index cfc8b19534b..ab991896b53 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -1961,22 +1961,22 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { } // Register for DIVI projection of divmodI -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { return _Z_RARG4_INT_REG_mask; } // Register for MODI projection of divmodI -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { return _Z_RARG3_INT_REG_mask; } // Register for DIVL projection of divmodL -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { return _Z_RARG4_LONG_REG_mask; } // Register for MODL projection of divmodL -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { return _Z_RARG3_LONG_REG_mask; } diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index 27daa51b39e..62306b562d6 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -497,7 +497,7 @@ void reg_mask_init() { // _ALL_REG_mask is generated by adlc from the all_reg register class below. // We derive a number of subsets from it. - _ANY_REG_mask = _ALL_REG_mask; + _ANY_REG_mask.assignFrom(_ALL_REG_mask); if (PreserveFramePointer) { _ANY_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg())); @@ -508,7 +508,7 @@ void reg_mask_init() { _ANY_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()->next())); } - _PTR_REG_mask = _ANY_REG_mask; + _PTR_REG_mask.assignFrom(_ANY_REG_mask); _PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg())); _PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next())); _PTR_REG_mask.remove(OptoReg::as_OptoReg(r15->as_VMReg())); @@ -520,43 +520,43 @@ void reg_mask_init() { } } - _STACK_OR_PTR_REG_mask = _PTR_REG_mask; + _STACK_OR_PTR_REG_mask.assignFrom(_PTR_REG_mask); _STACK_OR_PTR_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask()); - _PTR_REG_NO_RBP_mask = _PTR_REG_mask; + _PTR_REG_NO_RBP_mask.assignFrom(_PTR_REG_mask); _PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg())); _PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next())); - _PTR_NO_RAX_REG_mask = _PTR_REG_mask; + _PTR_NO_RAX_REG_mask.assignFrom(_PTR_REG_mask); _PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg())); _PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next())); - _PTR_NO_RAX_RBX_REG_mask = _PTR_NO_RAX_REG_mask; + _PTR_NO_RAX_RBX_REG_mask.assignFrom(_PTR_NO_RAX_REG_mask); _PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg())); _PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next())); - _LONG_REG_mask = _PTR_REG_mask; - _STACK_OR_LONG_REG_mask = _LONG_REG_mask; + _LONG_REG_mask.assignFrom(_PTR_REG_mask); + _STACK_OR_LONG_REG_mask.assignFrom(_LONG_REG_mask); _STACK_OR_LONG_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask()); - _LONG_NO_RAX_RDX_REG_mask = _LONG_REG_mask; + _LONG_NO_RAX_RDX_REG_mask.assignFrom(_LONG_REG_mask); _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg())); _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next())); _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg())); _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next())); - _LONG_NO_RCX_REG_mask = _LONG_REG_mask; + _LONG_NO_RCX_REG_mask.assignFrom(_LONG_REG_mask); _LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg())); _LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next())); - _LONG_NO_RBP_R13_REG_mask = _LONG_REG_mask; + _LONG_NO_RBP_R13_REG_mask.assignFrom(_LONG_REG_mask); _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg())); _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next())); _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg())); _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()->next())); - _INT_REG_mask = _ALL_INT_REG_mask; + _INT_REG_mask.assignFrom(_ALL_INT_REG_mask); if (!UseAPX) { for (uint i = 0; i < sizeof(egprs)/sizeof(Register); i++) { _INT_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg())); @@ -570,23 +570,23 @@ void reg_mask_init() { _INT_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg())); } - _STACK_OR_INT_REG_mask = _INT_REG_mask; + _STACK_OR_INT_REG_mask.assignFrom(_INT_REG_mask); _STACK_OR_INT_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask()); - _INT_NO_RAX_RDX_REG_mask = _INT_REG_mask; + _INT_NO_RAX_RDX_REG_mask.assignFrom(_INT_REG_mask); _INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg())); _INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg())); - _INT_NO_RCX_REG_mask = _INT_REG_mask; + _INT_NO_RCX_REG_mask.assignFrom(_INT_REG_mask); _INT_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg())); - _INT_NO_RBP_R13_REG_mask = _INT_REG_mask; + _INT_NO_RBP_R13_REG_mask.assignFrom(_INT_REG_mask); _INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg())); _INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg())); // _FLOAT_REG_LEGACY_mask/_FLOAT_REG_EVEX_mask is generated by adlc // from the float_reg_legacy/float_reg_evex register class. - _FLOAT_REG_mask = VM_Version::supports_evex() ? _FLOAT_REG_EVEX_mask : _FLOAT_REG_LEGACY_mask; + _FLOAT_REG_mask.assignFrom(VM_Version::supports_evex() ? _FLOAT_REG_EVEX_mask : _FLOAT_REG_LEGACY_mask); } static bool generate_vzeroupper(Compile* C) { @@ -1678,22 +1678,22 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { } // Register for DIVI projection of divmodI -RegMask Matcher::divI_proj_mask() { +const RegMask& Matcher::divI_proj_mask() { return INT_RAX_REG_mask(); } // Register for MODI projection of divmodI -RegMask Matcher::modI_proj_mask() { +const RegMask& Matcher::modI_proj_mask() { return INT_RDX_REG_mask(); } // Register for DIVL projection of divmodL -RegMask Matcher::divL_proj_mask() { +const RegMask& Matcher::divL_proj_mask() { return LONG_RAX_REG_mask(); } // Register for MODL projection of divmodL -RegMask Matcher::modL_proj_mask() { +const RegMask& Matcher::modL_proj_mask() { return LONG_RDX_REG_mask(); } diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index 22cc4259c6f..b477c54fcae 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -128,7 +128,7 @@ public: // count of bits in the current mask. int get_invalid_mask_size() const { return _mask_size; } const RegMask &mask() const { return _mask; } - void set_mask( const RegMask &rm ) { _mask = rm; DEBUG_ONLY(_msize_valid=0;)} + void set_mask(const RegMask& rm) { _mask.assignFrom(rm); DEBUG_ONLY(_msize_valid = 0;) } void init_mask(Arena* arena) { new (&_mask) RegMask(arena); } void and_with( const RegMask &rm ) { _mask.and_with(rm); DEBUG_ONLY(_msize_valid=0;)} void subtract( const RegMask &rm ) { _mask.subtract(rm); DEBUG_ONLY(_msize_valid=0;)} diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp index 823745ea8e7..06ba1856941 100644 --- a/src/hotspot/share/opto/divnode.cpp +++ b/src/hotspot/share/opto/divnode.cpp @@ -1668,10 +1668,10 @@ Node *DivModINode::match( const ProjNode *proj, const Matcher *match ) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { - rm = match->divI_proj_mask(); + rm.assignFrom(match->divI_proj_mask()); } else { assert(proj->_con == mod_proj_num, "must be div or mod projection"); - rm = match->modI_proj_mask(); + rm.assignFrom(match->modI_proj_mask()); } return new MachProjNode(this, proj->_con, rm, ideal_reg); } @@ -1683,10 +1683,10 @@ Node *DivModLNode::match( const ProjNode *proj, const Matcher *match ) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { - rm = match->divL_proj_mask(); + rm.assignFrom(match->divL_proj_mask()); } else { assert(proj->_con == mod_proj_num, "must be div or mod projection"); - rm = match->modL_proj_mask(); + rm.assignFrom(match->modL_proj_mask()); } return new MachProjNode(this, proj->_con, rm, ideal_reg); } @@ -1721,10 +1721,10 @@ Node* UDivModINode::match( const ProjNode *proj, const Matcher *match ) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { - rm = match->divI_proj_mask(); + rm.assignFrom(match->divI_proj_mask()); } else { assert(proj->_con == mod_proj_num, "must be div or mod projection"); - rm = match->modI_proj_mask(); + rm.assignFrom(match->modI_proj_mask()); } return new MachProjNode(this, proj->_con, rm, ideal_reg); } @@ -1736,10 +1736,10 @@ Node* UDivModLNode::match( const ProjNode *proj, const Matcher *match ) { uint ideal_reg = proj->ideal_reg(); RegMask rm; if (proj->_con == div_proj_num) { - rm = match->divL_proj_mask(); + rm.assignFrom(match->divL_proj_mask()); } else { assert(proj->_con == mod_proj_num, "must be div or mod projection"); - rm = match->modL_proj_mask(); + rm.assignFrom(match->modL_proj_mask()); } return new MachProjNode(this, proj->_con, rm, ideal_reg); } diff --git a/src/hotspot/share/opto/ifg.cpp b/src/hotspot/share/opto/ifg.cpp index 681d2f28cb1..1480e806f76 100644 --- a/src/hotspot/share/opto/ifg.cpp +++ b/src/hotspot/share/opto/ifg.cpp @@ -729,7 +729,7 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, } // Remove bound register(s) from 'l's choices - old = interfering_lrg.mask(); + old.assignFrom(interfering_lrg.mask()); uint old_size = interfering_lrg.mask_size(); // Remove the bits from LRG 'mask' from LRG 'l' so 'l' no @@ -738,7 +738,7 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, assert(!interfering_lrg._is_vector || !interfering_lrg._fat_proj, "sanity"); if (interfering_lrg.num_regs() > 1 && !interfering_lrg._fat_proj) { - r2mask = mask; + r2mask.assignFrom(mask); // Leave only aligned set of bits. r2mask.smear_to_sets(interfering_lrg.num_regs()); // It includes vector case. diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index 7621fc1bb3e..c63cefe7ac2 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -195,7 +195,7 @@ void Matcher::match( ) { OptoRegPair regs = return_value(ireg); // And mask for same - _return_value_mask = RegMask(regs.first()); + _return_value_mask.assignFrom(RegMask(regs.first())); if( OptoReg::is_valid(regs.second()) ) _return_value_mask.insert(regs.second()); } @@ -422,11 +422,11 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) { new (rms + i) RegMask(Compile::current()->comp_arena()); } // Do all the pre-defined register masks - rms[TypeFunc::Control ] = RegMask::EMPTY; - rms[TypeFunc::I_O ] = RegMask::EMPTY; - rms[TypeFunc::Memory ] = RegMask::EMPTY; - rms[TypeFunc::ReturnAdr] = ret_adr; - rms[TypeFunc::FramePtr ] = fp; + rms[TypeFunc::Control ].assignFrom(RegMask::EMPTY); + rms[TypeFunc::I_O ].assignFrom(RegMask::EMPTY); + rms[TypeFunc::Memory ].assignFrom(RegMask::EMPTY); + rms[TypeFunc::ReturnAdr].assignFrom(ret_adr); + rms[TypeFunc::FramePtr ].assignFrom(fp); return rms; } @@ -488,44 +488,44 @@ void Matcher::init_first_stack_mask() { assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); RegMask scalable_stack_mask(aligned_stack_mask, C->comp_arena()); - *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP]; + idealreg2spillmask[Op_RegP]->assignFrom(*idealreg2regmask[Op_RegP]); #ifdef _LP64 - *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN]; - idealreg2spillmask[Op_RegN]->or_with(C->FIRST_STACK_mask()); - idealreg2spillmask[Op_RegP]->or_with(aligned_stack_mask); + idealreg2spillmask[Op_RegN]->assignFrom(*idealreg2regmask[Op_RegN]); + idealreg2spillmask[Op_RegN]->or_with(C->FIRST_STACK_mask()); + idealreg2spillmask[Op_RegP]->or_with(aligned_stack_mask); #else idealreg2spillmask[Op_RegP]->or_with(C->FIRST_STACK_mask()); #endif - *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI]; - idealreg2spillmask[Op_RegI]->or_with(C->FIRST_STACK_mask()); - *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL]; - idealreg2spillmask[Op_RegL]->or_with(aligned_stack_mask); - *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF]; - idealreg2spillmask[Op_RegF]->or_with(C->FIRST_STACK_mask()); - *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD]; - idealreg2spillmask[Op_RegD]->or_with(aligned_stack_mask); + idealreg2spillmask[Op_RegI]->assignFrom(*idealreg2regmask[Op_RegI]); + idealreg2spillmask[Op_RegI]->or_with(C->FIRST_STACK_mask()); + idealreg2spillmask[Op_RegL]->assignFrom(*idealreg2regmask[Op_RegL]); + idealreg2spillmask[Op_RegL]->or_with(aligned_stack_mask); + idealreg2spillmask[Op_RegF]->assignFrom(*idealreg2regmask[Op_RegF]); + idealreg2spillmask[Op_RegF]->or_with(C->FIRST_STACK_mask()); + idealreg2spillmask[Op_RegD]->assignFrom(*idealreg2regmask[Op_RegD]); + idealreg2spillmask[Op_RegD]->or_with(aligned_stack_mask); if (Matcher::has_predicated_vectors()) { - *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask]; - idealreg2spillmask[Op_RegVectMask]->or_with(aligned_stack_mask); + idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]); + idealreg2spillmask[Op_RegVectMask]->or_with(aligned_stack_mask); } else { - *idealreg2spillmask[Op_RegVectMask] = RegMask::EMPTY; + idealreg2spillmask[Op_RegVectMask]->assignFrom(RegMask::EMPTY); } if (Matcher::vector_size_supported(T_BYTE,4)) { - *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS]; - idealreg2spillmask[Op_VecS]->or_with(C->FIRST_STACK_mask()); + idealreg2spillmask[Op_VecS]->assignFrom(*idealreg2regmask[Op_VecS]); + idealreg2spillmask[Op_VecS]->or_with(C->FIRST_STACK_mask()); } else { - *idealreg2spillmask[Op_VecS] = RegMask::EMPTY; + idealreg2spillmask[Op_VecS]->assignFrom(RegMask::EMPTY); } if (Matcher::vector_size_supported(T_FLOAT,2)) { // For VecD we need dual alignment and 8 bytes (2 slots) for spills. // RA guarantees such alignment since it is needed for Double and Long values. - *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD]; - idealreg2spillmask[Op_VecD]->or_with(aligned_stack_mask); + idealreg2spillmask[Op_VecD]->assignFrom(*idealreg2regmask[Op_VecD]); + idealreg2spillmask[Op_VecD]->or_with(aligned_stack_mask); } else { - *idealreg2spillmask[Op_VecD] = RegMask::EMPTY; + idealreg2spillmask[Op_VecD]->assignFrom(RegMask::EMPTY); } if (Matcher::vector_size_supported(T_FLOAT,4)) { @@ -541,12 +541,12 @@ void Matcher::init_first_stack_mask() { aligned_stack_mask.remove(in); in = OptoReg::add(in, -1); } - aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); - assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; - idealreg2spillmask[Op_VecX]->or_with(aligned_stack_mask); + aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); + idealreg2spillmask[Op_VecX]->assignFrom(*idealreg2regmask[Op_VecX]); + idealreg2spillmask[Op_VecX]->or_with(aligned_stack_mask); } else { - *idealreg2spillmask[Op_VecX] = RegMask::EMPTY; + idealreg2spillmask[Op_VecX]->assignFrom(RegMask::EMPTY); } if (Matcher::vector_size_supported(T_FLOAT,8)) { @@ -556,12 +556,12 @@ void Matcher::init_first_stack_mask() { aligned_stack_mask.remove(in); in = OptoReg::add(in, -1); } - aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); - assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY]; - idealreg2spillmask[Op_VecY]->or_with(aligned_stack_mask); + aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); + idealreg2spillmask[Op_VecY]->assignFrom(*idealreg2regmask[Op_VecY]); + idealreg2spillmask[Op_VecY]->or_with(aligned_stack_mask); } else { - *idealreg2spillmask[Op_VecY] = RegMask::EMPTY; + idealreg2spillmask[Op_VecY]->assignFrom(RegMask::EMPTY); } if (Matcher::vector_size_supported(T_FLOAT,16)) { @@ -571,12 +571,12 @@ void Matcher::init_first_stack_mask() { aligned_stack_mask.remove(in); in = OptoReg::add(in, -1); } - aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ); - assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ]; - idealreg2spillmask[Op_VecZ]->or_with(aligned_stack_mask); + aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ); + assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack"); + idealreg2spillmask[Op_VecZ]->assignFrom(*idealreg2regmask[Op_VecZ]); + idealreg2spillmask[Op_VecZ]->or_with(aligned_stack_mask); } else { - *idealreg2spillmask[Op_VecZ] = RegMask::EMPTY; + idealreg2spillmask[Op_VecZ]->assignFrom(RegMask::EMPTY); } if (Matcher::supports_scalable_vector()) { @@ -593,7 +593,7 @@ void Matcher::init_first_stack_mask() { // For RegVectMask scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots()); assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack"); - *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask]; + idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]); idealreg2spillmask[Op_RegVectMask]->or_with(scalable_stack_mask); } @@ -605,12 +605,12 @@ void Matcher::init_first_stack_mask() { } // For VecA - scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA); - assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack"); - *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA]; - idealreg2spillmask[Op_VecA]->or_with(scalable_stack_mask); + scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA); + assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack"); + idealreg2spillmask[Op_VecA]->assignFrom(*idealreg2regmask[Op_VecA]); + idealreg2spillmask[Op_VecA]->or_with(scalable_stack_mask); } else { - *idealreg2spillmask[Op_VecA] = RegMask::EMPTY; + idealreg2spillmask[Op_VecA]->assignFrom(RegMask::EMPTY); } if (UseFPUForSpilling) { @@ -639,20 +639,20 @@ void Matcher::init_first_stack_mask() { // Make up debug masks. Any spill slot plus callee-save (SOE) registers. // Caller-save (SOC, AS) registers are assumed to be trashable by the various // inline-cache fixup routines. - *idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN]; - *idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI]; - *idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL]; - *idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF]; - *idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD]; - *idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP]; - *idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask]; + idealreg2debugmask[Op_RegN]->assignFrom(*idealreg2spillmask[Op_RegN]); + idealreg2debugmask[Op_RegI]->assignFrom(*idealreg2spillmask[Op_RegI]); + idealreg2debugmask[Op_RegL]->assignFrom(*idealreg2spillmask[Op_RegL]); + idealreg2debugmask[Op_RegF]->assignFrom(*idealreg2spillmask[Op_RegF]); + idealreg2debugmask[Op_RegD]->assignFrom(*idealreg2spillmask[Op_RegD]); + idealreg2debugmask[Op_RegP]->assignFrom(*idealreg2spillmask[Op_RegP]); + idealreg2debugmask[Op_RegVectMask]->assignFrom(*idealreg2spillmask[Op_RegVectMask]); - *idealreg2debugmask [Op_VecA] = *idealreg2spillmask[Op_VecA]; - *idealreg2debugmask [Op_VecS] = *idealreg2spillmask[Op_VecS]; - *idealreg2debugmask [Op_VecD] = *idealreg2spillmask[Op_VecD]; - *idealreg2debugmask [Op_VecX] = *idealreg2spillmask[Op_VecX]; - *idealreg2debugmask [Op_VecY] = *idealreg2spillmask[Op_VecY]; - *idealreg2debugmask [Op_VecZ] = *idealreg2spillmask[Op_VecZ]; + idealreg2debugmask[Op_VecA]->assignFrom(*idealreg2spillmask[Op_VecA]); + idealreg2debugmask[Op_VecS]->assignFrom(*idealreg2spillmask[Op_VecS]); + idealreg2debugmask[Op_VecD]->assignFrom(*idealreg2spillmask[Op_VecD]); + idealreg2debugmask[Op_VecX]->assignFrom(*idealreg2spillmask[Op_VecX]); + idealreg2debugmask[Op_VecY]->assignFrom(*idealreg2spillmask[Op_VecY]); + idealreg2debugmask[Op_VecZ]->assignFrom(*idealreg2spillmask[Op_VecZ]); // Prevent stub compilations from attempting to reference // callee-saved (SOE) registers from debug info @@ -702,8 +702,9 @@ void Matcher::Fixup_Save_On_Entry( ) { RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask ); // Returns have 0 or 1 returned values depending on call signature. // Return register is specified by return_value in the AD file. - if (ret_edge_cnt > TypeFunc::Parms) - ret_rms[TypeFunc::Parms+0] = _return_value_mask; + if (ret_edge_cnt > TypeFunc::Parms) { + ret_rms[TypeFunc::Parms + 0].assignFrom(_return_value_mask); + } // Input RegMask array shared by all ForwardExceptions uint forw_exc_edge_cnt = TypeFunc::Parms; @@ -715,7 +716,7 @@ void Matcher::Fixup_Save_On_Entry( ) { // Rethrow takes exception oop only, but in the argument 0 slot. OptoReg::Name reg = find_receiver(); if (reg >= 0) { - reth_rms[TypeFunc::Parms] = mreg2regmask[reg]; + reth_rms[TypeFunc::Parms].assignFrom(mreg2regmask[reg]); #ifdef _LP64 // Need two slots for ptrs in 64-bit land reth_rms[TypeFunc::Parms].insert(OptoReg::add(OptoReg::Name(reg), 1)); @@ -737,8 +738,8 @@ void Matcher::Fixup_Save_On_Entry( ) { for( i=1; i < root->req(); i++ ) { MachReturnNode *m = root->in(i)->as_MachReturn(); if( m->ideal_Opcode() == Op_TailCall ) { - tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); - tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); + tail_call_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0)); + tail_call_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1)); break; } } @@ -750,8 +751,8 @@ void Matcher::Fixup_Save_On_Entry( ) { for( i=1; i < root->req(); i++ ) { MachReturnNode *m = root->in(i)->as_MachReturn(); if( m->ideal_Opcode() == Op_TailJump ) { - tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0); - tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1); + tail_jump_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0)); + tail_jump_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1)); break; } } @@ -784,14 +785,14 @@ void Matcher::Fixup_Save_On_Entry( ) { if( is_save_on_entry(i) ) { // Add the save-on-entry to the mask array - ret_rms [ ret_edge_cnt] = mreg2regmask[i]; - reth_rms [ reth_edge_cnt] = mreg2regmask[i]; - tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i]; - tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i]; - forw_exc_rms [ forw_exc_edge_cnt] = mreg2regmask[i]; + ret_rms [ ret_edge_cnt].assignFrom(mreg2regmask[i]); + reth_rms [ reth_edge_cnt].assignFrom(mreg2regmask[i]); + tail_call_rms[tail_call_edge_cnt].assignFrom(mreg2regmask[i]); + tail_jump_rms[tail_jump_edge_cnt].assignFrom(mreg2regmask[i]); + forw_exc_rms [ forw_exc_edge_cnt].assignFrom(mreg2regmask[i]); // Halts need the SOE registers, but only in the stack as debug info. // A just-prior uncommon-trap or deoptimization will use the SOE regs. - halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]]; + halt_rms [ halt_edge_cnt].assignFrom(*idealreg2spillmask[_register_save_type[i]]); Node *mproj; @@ -815,12 +816,12 @@ void Matcher::Fixup_Save_On_Entry( ) { _register_save_type[i-1] == Op_RegF && _register_save_type[i ] == Op_RegF && is_save_on_entry(i-1) ) { - ret_rms [ ret_edge_cnt] = RegMask::EMPTY; - reth_rms [ reth_edge_cnt] = RegMask::EMPTY; - tail_call_rms[tail_call_edge_cnt] = RegMask::EMPTY; - tail_jump_rms[tail_jump_edge_cnt] = RegMask::EMPTY; - forw_exc_rms [ forw_exc_edge_cnt] = RegMask::EMPTY; - halt_rms [ halt_edge_cnt] = RegMask::EMPTY; + ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY); + reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY); + tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY); + tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY); + forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY); + halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY); mproj = C->top(); } // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's @@ -843,12 +844,12 @@ void Matcher::Fixup_Save_On_Entry( ) { _register_save_type[i-1] == Op_RegI && _register_save_type[i ] == Op_RegI && is_save_on_entry(i-1) ) { - ret_rms [ ret_edge_cnt] = RegMask::EMPTY; - reth_rms [ reth_edge_cnt] = RegMask::EMPTY; - tail_call_rms[tail_call_edge_cnt] = RegMask::EMPTY; - tail_jump_rms[tail_jump_edge_cnt] = RegMask::EMPTY; - forw_exc_rms [ forw_exc_edge_cnt] = RegMask::EMPTY; - halt_rms [ halt_edge_cnt] = RegMask::EMPTY; + ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY); + reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY); + tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY); + tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY); + forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY); + halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY); mproj = C->top(); } else { // Make a projection for it off the Start @@ -875,7 +876,7 @@ void Matcher::init_spill_mask( Node *ret ) { if( idealreg2regmask[Op_RegI] ) return; // One time only init OptoReg::c_frame_pointer = c_frame_pointer(); - c_frame_ptr_mask = RegMask(c_frame_pointer()); + c_frame_ptr_mask.assignFrom(RegMask(c_frame_pointer())); #ifdef _LP64 // pointers are twice as big c_frame_ptr_mask.insert(OptoReg::add(c_frame_pointer(), 1)); @@ -1240,8 +1241,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) { } // Do all the pre-defined non-Empty register masks - msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask; - msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask; + msfpt->_in_rms[TypeFunc::ReturnAdr].assignFrom(_return_addr_mask); + msfpt->_in_rms[TypeFunc::FramePtr ].assignFrom(c_frame_ptr_mask); // Place first outgoing argument can possibly be put. OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots()); diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp index 0b609b70ab5..e4396b423ac 100644 --- a/src/hotspot/share/opto/matcher.hpp +++ b/src/hotspot/share/opto/matcher.hpp @@ -408,14 +408,14 @@ public: static int inline_cache_reg_encode(); // Register for DIVI projection of divmodI - static RegMask divI_proj_mask(); + static const RegMask& divI_proj_mask(); // Register for MODI projection of divmodI - static RegMask modI_proj_mask(); + static const RegMask& modI_proj_mask(); // Register for DIVL projection of divmodL - static RegMask divL_proj_mask(); + static const RegMask& divL_proj_mask(); // Register for MODL projection of divmodL - static RegMask modL_proj_mask(); + static const RegMask& modL_proj_mask(); // Use hardware DIV instruction when it is faster than // a code which use multiply for division by constant. diff --git a/src/hotspot/share/opto/postaloc.cpp b/src/hotspot/share/opto/postaloc.cpp index 8eb9167921b..c961340e71a 100644 --- a/src/hotspot/share/opto/postaloc.cpp +++ b/src/hotspot/share/opto/postaloc.cpp @@ -767,7 +767,7 @@ void PhaseChaitin::post_allocate_copy_removal() { if (!is_adjacent) { // Nearly always adjacent // Sparc occasionally has non-adjacent pairs. // Find the actual other value - RegMask tmp = lrgs(lidx).mask(); + RegMask tmp(lrgs(lidx).mask()); tmp.remove(nreg); nreg_lo = tmp.find_first_elem(); } diff --git a/src/hotspot/share/opto/regmask.hpp b/src/hotspot/share/opto/regmask.hpp index 832499d951d..453fbb45d33 100644 --- a/src/hotspot/share/opto/regmask.hpp +++ b/src/hotspot/share/opto/regmask.hpp @@ -299,39 +299,6 @@ class RegMask { } } - // Make us a copy of src - void copy(const RegMask& src) { - assert(_offset == src._offset, "offset mismatch"); - _hwm = src._hwm; - _lwm = src._lwm; - - // Copy base mask - memcpy(_rm_word, src._rm_word, sizeof(uintptr_t) * RM_SIZE_IN_WORDS); - _infinite_stack = src._infinite_stack; - - // Copy extension - if (src._rm_word_ext != nullptr) { - assert(src._rm_size_in_words > RM_SIZE_IN_WORDS, "sanity"); - assert(_original_ext_address == &_rm_word_ext, "clone sanity check"); - grow(src._rm_size_in_words, false); - memcpy(_rm_word_ext, src._rm_word_ext, - sizeof(uintptr_t) * (src._rm_size_in_words - RM_SIZE_IN_WORDS)); - } - - // If the source is smaller than us, we need to set the gap according to - // the sources infinite_stack flag. - if (src._rm_size_in_words < _rm_size_in_words) { - int value = 0; - if (src.is_infinite_stack()) { - value = 0xFF; - _hwm = rm_word_max_index(); - } - set_range(src._rm_size_in_words, value, _rm_size_in_words - src._rm_size_in_words); - } - - assert(valid_watermarks(), "post-condition"); - } - // Make the watermarks as tight as possible. void trim_watermarks() { if (_hwm < _lwm) { @@ -453,21 +420,52 @@ public: } explicit RegMask(OptoReg::Name reg) : RegMask(reg, nullptr) {} - // ---------------------------------------- - // Deep copying constructors and assignment - // ---------------------------------------- + // Make us represent the same set of registers as src. + void assignFrom(const RegMask& src) { + assert(_offset == src._offset, "offset mismatch"); + _hwm = src._hwm; + _lwm = src._lwm; + // Copy base mask + memcpy(_rm_word, src._rm_word, sizeof(uintptr_t) * RM_SIZE_IN_WORDS); + _infinite_stack = src._infinite_stack; + + // Copy extension + if (src._rm_word_ext != nullptr) { + assert(src._rm_size_in_words > RM_SIZE_IN_WORDS, "sanity"); + assert(_original_ext_address == &_rm_word_ext, "clone sanity check"); + grow(src._rm_size_in_words, false); + memcpy(_rm_word_ext, src._rm_word_ext, + sizeof(uintptr_t) * (src._rm_size_in_words - RM_SIZE_IN_WORDS)); + } + + // If the source is smaller than us, we need to set the gap according to + // the sources infinite_stack flag. + if (src._rm_size_in_words < _rm_size_in_words) { + int value = 0; + if (src.is_infinite_stack()) { + value = 0xFF; + _hwm = rm_word_max_index(); + } + set_range(src._rm_size_in_words, value, _rm_size_in_words - src._rm_size_in_words); + } + + assert(valid_watermarks(), "post-condition"); + } + + // Construct from other register mask (deep copy) and register an arena + // for potential register mask extension. Passing nullptr as arena disables + // extension. RegMask(const RegMask& rm, Arena* arena) : _arena(arena), _rm_size_in_words(RM_SIZE_IN_WORDS), _offset(rm._offset) { - copy(rm); + assignFrom(rm); } - RegMask(const RegMask& rm) : RegMask(rm, nullptr) {} + // Copy constructor (deep copy). By default does not allow extension. + explicit RegMask(const RegMask& rm) : RegMask(rm, nullptr) {} - RegMask& operator=(const RegMask& rm) { - copy(rm); - return *this; - } + // Disallow copy assignment (use assignFrom instead) + RegMask& operator=(const RegMask&) = delete; // ---------------- // End deep copying diff --git a/test/hotspot/gtest/opto/test_regmask.cpp b/test/hotspot/gtest/opto/test_regmask.cpp index 55dc020b6d0..f367ca4bef4 100644 --- a/test/hotspot/gtest/opto/test_regmask.cpp +++ b/test/hotspot/gtest/opto/test_regmask.cpp @@ -523,8 +523,8 @@ TEST_VM_ASSERT_MSG(RegMask, offset_mismatch, ".*offset mismatch") { RegMask rm2; rm1.set_infinite_stack(true); rm1.rollover(); - // Cannot copy with different offsets - rm2 = rm1; + // Cannot assign with different offsets + rm2.assignFrom(rm1); } #endif @@ -1241,8 +1241,8 @@ TEST_VM(RegMask, random_copy) { // Randomly initialize source randomize(src); - // Copy source to destination - dst = src; + // Set destination to source + dst.assignFrom(src); // Check equality bool passed = src.gtest_equals(dst);