diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index e036cb6b1ec..4773043e1ba 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -2323,6 +2323,7 @@ enum Nf { } // Vector Bit-manipulation used in Cryptography (Zvbb) Extension + INSN(vandn_vx, 0b1010111, 0b100, 0b000001); INSN(vrol_vx, 0b1010111, 0b100, 0b010101); INSN(vror_vx, 0b1010111, 0b100, 0b010100); diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 6fea439954c..7df3c857a4d 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -1187,6 +1187,70 @@ instruct vand_notL_masked(vReg dst_src1, vReg src2, immL_M1 m1, vRegMask_V0 v0) ins_pipe(pipe_slow); %} +instruct vand_notI_vx(vReg dst, vReg src1, iRegIorL2I src2, immI_M1 m1) %{ + predicate(UseZvbb); + predicate(Matcher::vector_element_basic_type(n) == T_INT || + Matcher::vector_element_basic_type(n) == T_BYTE || + Matcher::vector_element_basic_type(n) == T_SHORT); + match(Set dst (AndV src1 (Replicate (XorI src2 m1)))); + format %{ "vand_notI_vx $dst, $src1, $src2" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ vsetvli_helper(bt, Matcher::vector_length(this)); + __ vandn_vx(as_VectorRegister($dst$$reg), + as_VectorRegister($src1$$reg), + as_Register($src2$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vand_notL_vx(vReg dst, vReg src1, iRegL src2, immL_M1 m1) %{ + predicate(UseZvbb); + predicate(Matcher::vector_element_basic_type(n) == T_LONG); + match(Set dst (AndV src1 (Replicate (XorL src2 m1)))); + format %{ "vand_notL_vx $dst, $src1, $src2" %} + ins_encode %{ + __ vsetvli_helper(T_LONG, Matcher::vector_length(this)); + __ vandn_vx(as_VectorRegister($dst$$reg), + as_VectorRegister($src1$$reg), + as_Register($src2$$reg)); + %} + ins_pipe(pipe_slow); +%} + +instruct vand_notI_vx_masked(vReg dst_src1, iRegIorL2I src2, immI_M1 m1, vRegMask_V0 v0) %{ + predicate(UseZvbb); + predicate(Matcher::vector_element_basic_type(n) == T_INT || + Matcher::vector_element_basic_type(n) == T_BYTE || + Matcher::vector_element_basic_type(n) == T_SHORT); + match(Set dst_src1 (AndV (Binary dst_src1 (Replicate (XorI src2 m1))) v0)); + format %{ "vand_notI_vx_masked $dst_src1, $dst_src1, $src2, $v0" %} + ins_encode %{ + BasicType bt = Matcher::vector_element_basic_type(this); + __ vsetvli_helper(bt, Matcher::vector_length(this)); + __ vandn_vx(as_VectorRegister($dst_src1$$reg), + as_VectorRegister($dst_src1$$reg), + as_Register($src2$$reg), + Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + +instruct vand_notL_vx_masked(vReg dst_src1, iRegL src2, immL_M1 m1, vRegMask_V0 v0) %{ + predicate(UseZvbb); + predicate(Matcher::vector_element_basic_type(n) == T_LONG); + match(Set dst_src1 (AndV (Binary dst_src1 (Replicate (XorL src2 m1))) v0)); + format %{ "vand_notL_vx_masked $dst_src1, $dst_src1, $src2, $v0" %} + ins_encode %{ + __ vsetvli_helper(T_LONG, Matcher::vector_length(this)); + __ vandn_vx(as_VectorRegister($dst_src1$$reg), + as_VectorRegister($dst_src1$$reg), + as_Register($src2$$reg), + Assembler::v0_t); + %} + ins_pipe(pipe_slow); +%} + // ------------------------------ Vector not ----------------------------------- // vector not diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java index 0b224523561..5dabcf0f828 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java @@ -2136,6 +2136,26 @@ public class IRNode { machOnlyNameRegex(VAND_NOT_L_MASKED, "vand_notL_masked"); } + public static final String RISCV_VAND_NOTI_VX = PREFIX + "RISCV_VAND_NOTI_VX" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VAND_NOTI_VX, "vand_notI_vx"); + } + + public static final String RISCV_VAND_NOTL_VX = PREFIX + "RISCV_VAND_NOTL_VX" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VAND_NOTL_VX, "vand_notL_vx"); + } + + public static final String RISCV_VAND_NOTI_VX_MASKED = PREFIX + "RISCV_VAND_NOTI_VX_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VAND_NOTI_VX_MASKED, "vand_notI_vx_masked"); + } + + public static final String RISCV_VAND_NOTL_VX_MASKED = PREFIX + "RISCV_VAND_NOTL_VX_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VAND_NOTL_VX_MASKED, "vand_notL_vx_masked"); + } + public static final String VECTOR_BLEND_B = VECTOR_PREFIX + "VECTOR_BLEND_B" + POSTFIX; static { vectorNode(VECTOR_BLEND_B, "VectorBlend", TYPE_BYTE); diff --git a/test/hotspot/jtreg/compiler/vectorapi/AllBitsSetVectorMatchRuleTest.java b/test/hotspot/jtreg/compiler/vectorapi/AllBitsSetVectorMatchRuleTest.java index f33dd24e726..bb88f60dd21 100644 --- a/test/hotspot/jtreg/compiler/vectorapi/AllBitsSetVectorMatchRuleTest.java +++ b/test/hotspot/jtreg/compiler/vectorapi/AllBitsSetVectorMatchRuleTest.java @@ -154,6 +154,68 @@ public class AllBitsSetVectorMatchRuleTest { } } + @Test + @Warmup(10000) + @IR(counts = { IRNode.RISCV_VAND_NOTI_VX, " >= 1" }, applyIfPlatform = {"riscv64", "true"}) + public static void testAllBitsSetVectorRegI() { + IntVector av = IntVector.fromArray(I_SPECIES, ia, 0); + int bs = ib[0]; + av.not().lanewise(VectorOperators.AND_NOT, bs).intoArray(ir, 0); + + // Verify results + for (int i = 0; i < I_SPECIES.length(); i++) { + Asserts.assertEquals((~ia[i]) & (~bs), ir[i]); + } + } + + @Test + @Warmup(10000) + @IR(counts = { IRNode.RISCV_VAND_NOTL_VX, " >= 1" }, applyIfPlatform = {"riscv64", "true"}) + public static void testAllBitsSetVectorRegL() { + LongVector av = LongVector.fromArray(L_SPECIES, la, 0); + long bs = lb[0]; + av.not().lanewise(VectorOperators.AND_NOT, bs).intoArray(lr, 0); + + // Verify results + for (int i = 0; i < L_SPECIES.length(); i++) { + Asserts.assertEquals((~la[i]) & (~bs), lr[i]); + } + } + + @Test + @Warmup(10000) + @IR(counts = { IRNode.RISCV_VAND_NOTI_VX_MASKED, " >= 1" }, applyIfPlatform = {"riscv64", "true"}) + public static void testAllBitsSetVectorRegIMask() { + VectorMask avm = VectorMask.fromArray(I_SPECIES, ma, 0); + IntVector av = IntVector.fromArray(I_SPECIES, ia, 0); + int bs = ib[0]; + av.not().lanewise(VectorOperators.AND_NOT, bs, avm).intoArray(ir, 0); + + // Verify results + for (int i = 0; i < I_SPECIES.length(); i++) { + if (ma[i] == true) { + Asserts.assertEquals((~ia[i]) & (~bs), ir[i]); + } + } + } + + @Test + @Warmup(10000) + @IR(counts = { IRNode.RISCV_VAND_NOTL_VX_MASKED, " >= 1" }, applyIfPlatform = {"riscv64", "true"}) + public static void testAllBitsSetVectorRegLMask() { + VectorMask avm = VectorMask.fromArray(L_SPECIES, ma, 0); + LongVector av = LongVector.fromArray(L_SPECIES, la, 0); + long bs = lb[0]; + av.not().lanewise(VectorOperators.AND_NOT, bs, avm).intoArray(lr, 0); + + // Verify results + for (int i = 0; i < L_SPECIES.length(); i++) { + if (ma[i] == true) { + Asserts.assertEquals((~la[i]) & (~bs), lr[i]); + } + } + } + @Test @Warmup(10000) @IR(counts = { IRNode.VAND_NOT_L, " >= 1" }, applyIfPlatform = {"aarch64", "true"}, applyIf = {"UseSVE", "0"})