8355074: RISC-V: C2: Support Vector-Scalar version of Zvbb Vector And-Not instruction

Reviewed-by: fjiang, fyang
This commit is contained in:
Anjian-Wen 2025-04-25 12:50:12 +00:00 committed by Fei Yang
parent a2f9c24844
commit 5c067232bf
4 changed files with 147 additions and 0 deletions

View File

@ -2323,6 +2323,7 @@ enum Nf {
}
// Vector Bit-manipulation used in Cryptography (Zvbb) Extension
INSN(vandn_vx, 0b1010111, 0b100, 0b000001);
INSN(vrol_vx, 0b1010111, 0b100, 0b010101);
INSN(vror_vx, 0b1010111, 0b100, 0b010100);

View File

@ -1187,6 +1187,70 @@ instruct vand_notL_masked(vReg dst_src1, vReg src2, immL_M1 m1, vRegMask_V0 v0)
ins_pipe(pipe_slow);
%}
instruct vand_notI_vx(vReg dst, vReg src1, iRegIorL2I src2, immI_M1 m1) %{
predicate(UseZvbb);
predicate(Matcher::vector_element_basic_type(n) == T_INT ||
Matcher::vector_element_basic_type(n) == T_BYTE ||
Matcher::vector_element_basic_type(n) == T_SHORT);
match(Set dst (AndV src1 (Replicate (XorI src2 m1))));
format %{ "vand_notI_vx $dst, $src1, $src2" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ vsetvli_helper(bt, Matcher::vector_length(this));
__ vandn_vx(as_VectorRegister($dst$$reg),
as_VectorRegister($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(pipe_slow);
%}
instruct vand_notL_vx(vReg dst, vReg src1, iRegL src2, immL_M1 m1) %{
predicate(UseZvbb);
predicate(Matcher::vector_element_basic_type(n) == T_LONG);
match(Set dst (AndV src1 (Replicate (XorL src2 m1))));
format %{ "vand_notL_vx $dst, $src1, $src2" %}
ins_encode %{
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
__ vandn_vx(as_VectorRegister($dst$$reg),
as_VectorRegister($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(pipe_slow);
%}
instruct vand_notI_vx_masked(vReg dst_src1, iRegIorL2I src2, immI_M1 m1, vRegMask_V0 v0) %{
predicate(UseZvbb);
predicate(Matcher::vector_element_basic_type(n) == T_INT ||
Matcher::vector_element_basic_type(n) == T_BYTE ||
Matcher::vector_element_basic_type(n) == T_SHORT);
match(Set dst_src1 (AndV (Binary dst_src1 (Replicate (XorI src2 m1))) v0));
format %{ "vand_notI_vx_masked $dst_src1, $dst_src1, $src2, $v0" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
__ vsetvli_helper(bt, Matcher::vector_length(this));
__ vandn_vx(as_VectorRegister($dst_src1$$reg),
as_VectorRegister($dst_src1$$reg),
as_Register($src2$$reg),
Assembler::v0_t);
%}
ins_pipe(pipe_slow);
%}
instruct vand_notL_vx_masked(vReg dst_src1, iRegL src2, immL_M1 m1, vRegMask_V0 v0) %{
predicate(UseZvbb);
predicate(Matcher::vector_element_basic_type(n) == T_LONG);
match(Set dst_src1 (AndV (Binary dst_src1 (Replicate (XorL src2 m1))) v0));
format %{ "vand_notL_vx_masked $dst_src1, $dst_src1, $src2, $v0" %}
ins_encode %{
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
__ vandn_vx(as_VectorRegister($dst_src1$$reg),
as_VectorRegister($dst_src1$$reg),
as_Register($src2$$reg),
Assembler::v0_t);
%}
ins_pipe(pipe_slow);
%}
// ------------------------------ Vector not -----------------------------------
// vector not

View File

@ -2136,6 +2136,26 @@ public class IRNode {
machOnlyNameRegex(VAND_NOT_L_MASKED, "vand_notL_masked");
}
public static final String RISCV_VAND_NOTI_VX = PREFIX + "RISCV_VAND_NOTI_VX" + POSTFIX;
static {
machOnlyNameRegex(RISCV_VAND_NOTI_VX, "vand_notI_vx");
}
public static final String RISCV_VAND_NOTL_VX = PREFIX + "RISCV_VAND_NOTL_VX" + POSTFIX;
static {
machOnlyNameRegex(RISCV_VAND_NOTL_VX, "vand_notL_vx");
}
public static final String RISCV_VAND_NOTI_VX_MASKED = PREFIX + "RISCV_VAND_NOTI_VX_MASKED" + POSTFIX;
static {
machOnlyNameRegex(RISCV_VAND_NOTI_VX_MASKED, "vand_notI_vx_masked");
}
public static final String RISCV_VAND_NOTL_VX_MASKED = PREFIX + "RISCV_VAND_NOTL_VX_MASKED" + POSTFIX;
static {
machOnlyNameRegex(RISCV_VAND_NOTL_VX_MASKED, "vand_notL_vx_masked");
}
public static final String VECTOR_BLEND_B = VECTOR_PREFIX + "VECTOR_BLEND_B" + POSTFIX;
static {
vectorNode(VECTOR_BLEND_B, "VectorBlend", TYPE_BYTE);

View File

@ -154,6 +154,68 @@ public class AllBitsSetVectorMatchRuleTest {
}
}
@Test
@Warmup(10000)
@IR(counts = { IRNode.RISCV_VAND_NOTI_VX, " >= 1" }, applyIfPlatform = {"riscv64", "true"})
public static void testAllBitsSetVectorRegI() {
IntVector av = IntVector.fromArray(I_SPECIES, ia, 0);
int bs = ib[0];
av.not().lanewise(VectorOperators.AND_NOT, bs).intoArray(ir, 0);
// Verify results
for (int i = 0; i < I_SPECIES.length(); i++) {
Asserts.assertEquals((~ia[i]) & (~bs), ir[i]);
}
}
@Test
@Warmup(10000)
@IR(counts = { IRNode.RISCV_VAND_NOTL_VX, " >= 1" }, applyIfPlatform = {"riscv64", "true"})
public static void testAllBitsSetVectorRegL() {
LongVector av = LongVector.fromArray(L_SPECIES, la, 0);
long bs = lb[0];
av.not().lanewise(VectorOperators.AND_NOT, bs).intoArray(lr, 0);
// Verify results
for (int i = 0; i < L_SPECIES.length(); i++) {
Asserts.assertEquals((~la[i]) & (~bs), lr[i]);
}
}
@Test
@Warmup(10000)
@IR(counts = { IRNode.RISCV_VAND_NOTI_VX_MASKED, " >= 1" }, applyIfPlatform = {"riscv64", "true"})
public static void testAllBitsSetVectorRegIMask() {
VectorMask<Integer> avm = VectorMask.fromArray(I_SPECIES, ma, 0);
IntVector av = IntVector.fromArray(I_SPECIES, ia, 0);
int bs = ib[0];
av.not().lanewise(VectorOperators.AND_NOT, bs, avm).intoArray(ir, 0);
// Verify results
for (int i = 0; i < I_SPECIES.length(); i++) {
if (ma[i] == true) {
Asserts.assertEquals((~ia[i]) & (~bs), ir[i]);
}
}
}
@Test
@Warmup(10000)
@IR(counts = { IRNode.RISCV_VAND_NOTL_VX_MASKED, " >= 1" }, applyIfPlatform = {"riscv64", "true"})
public static void testAllBitsSetVectorRegLMask() {
VectorMask<Long> avm = VectorMask.fromArray(L_SPECIES, ma, 0);
LongVector av = LongVector.fromArray(L_SPECIES, la, 0);
long bs = lb[0];
av.not().lanewise(VectorOperators.AND_NOT, bs, avm).intoArray(lr, 0);
// Verify results
for (int i = 0; i < L_SPECIES.length(); i++) {
if (ma[i] == true) {
Asserts.assertEquals((~la[i]) & (~bs), lr[i]);
}
}
}
@Test
@Warmup(10000)
@IR(counts = { IRNode.VAND_NOT_L, " >= 1" }, applyIfPlatform = {"aarch64", "true"}, applyIf = {"UseSVE", "0"})