diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java index 965b0083c49..96b62a2b12a 100644 --- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java +++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java @@ -2466,21 +2466,41 @@ public class IRNode { machOnlyNameRegex(VFMSB_MASKED, "vfmsb_masked"); } + public static final String RISCV_VFNMSUB_MASKED = PREFIX + "RISCV_VFNMSUB_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VFNMSUB_MASKED, "vfnmsub_masked"); + } + public static final String VFNMAD_MASKED = PREFIX + "VFNMAD_MASKED" + POSTFIX; static { machOnlyNameRegex(VFNMAD_MASKED, "vfnmad_masked"); } + public static final String RISCV_VFNMADD_MASKED = PREFIX + "RISCV_VFNMADD_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VFNMADD_MASKED, "vfnmadd_masked"); + } + public static final String VFNMSB_MASKED = PREFIX + "VFNMSB_MASKED" + POSTFIX; static { machOnlyNameRegex(VFNMSB_MASKED, "vfnmsb_masked"); } + public static final String RISCV_VFMSUB_MASKED = PREFIX + "RISCV_VFMSUB_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VFMSUB_MASKED, "vfmsub_masked"); + } + public static final String VFMAD_MASKED = PREFIX + "VFMAD_MASKED" + POSTFIX; static { machOnlyNameRegex(VFMAD_MASKED, "vfmad_masked"); } + public static final String RISCV_VFMADD_MASKED = PREFIX + "RISCV_VFMADD_MASKED" + POSTFIX; + static { + machOnlyNameRegex(RISCV_VFMADD_MASKED, "vfmadd_masked"); + } + public static final String VMASK_AND_NOT_L = PREFIX + "VMASK_AND_NOT_L" + POSTFIX; static { machOnlyNameRegex(VMASK_AND_NOT_L, "vmask_and_notL"); diff --git a/test/hotspot/jtreg/compiler/vectorapi/VectorFusedMultiplyAddSubTest.java b/test/hotspot/jtreg/compiler/vectorapi/VectorFusedMultiplyAddSubTest.java index cc65fa69e5d..ff0d5b65667 100644 --- a/test/hotspot/jtreg/compiler/vectorapi/VectorFusedMultiplyAddSubTest.java +++ b/test/hotspot/jtreg/compiler/vectorapi/VectorFusedMultiplyAddSubTest.java @@ -1,5 +1,6 @@ /* * Copyright (c) 2022, 2023, Arm Limited. All rights reserved. + * Copyright (c) 2025, Rivos Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,6 +39,7 @@ import jdk.incubator.vector.VectorOperators; import jdk.incubator.vector.VectorSpecies; import jdk.test.lib.Asserts; +import jdk.test.lib.Platform; import jdk.test.lib.Utils; /** @@ -45,8 +47,8 @@ import jdk.test.lib.Utils; * @bug 8282431 * @key randomness * @library /test/lib / - * @requires vm.cpu.features ~= ".*sve.*" - * @summary AArch64: Add optimized rules for masked vector multiply-add/sub for SVE + * @requires vm.cpu.features ~= ".*sve.*" | vm.cpu.features ~= ".*rvv.*" + * @summary Add optimized rules for masked vector multiply-add/sub for SVE and RVV * @modules jdk.incubator.vector * * @run driver compiler.vectorapi.VectorFusedMultiplyAddSubTest @@ -244,7 +246,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLA_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLA_MASKED, ">= 1" }) public static void testByteMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(B_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += B_SPECIES.length()) { @@ -257,7 +259,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLS_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLS_MASKED, ">= 1" }) public static void testByteMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(B_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += B_SPECIES.length()) { @@ -270,7 +272,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLA_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLA_MASKED, ">= 1" }) public static void testShortMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(S_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += S_SPECIES.length()) { @@ -283,7 +285,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLS_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLS_MASKED, ">= 1" }) public static void testShortMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(S_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += S_SPECIES.length()) { @@ -296,7 +298,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLA_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLA_MASKED, ">= 1" }) public static void testIntMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(I_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += I_SPECIES.length()) { @@ -309,7 +311,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLS_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLS_MASKED, ">= 1" }) public static void testIntMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(I_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += I_SPECIES.length()) { @@ -322,7 +324,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLA_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLA_MASKED, ">= 1" }) public static void testLongMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(L_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += L_SPECIES.length()) { @@ -335,7 +337,7 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VMLS_MASKED, ">= 1" }) + @IR(applyIfPlatformOr = {"aarch64", "true", "riscv64", "true"}, counts = { IRNode.VMLS_MASKED, ">= 1" }) public static void testLongMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(L_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += L_SPECIES.length()) { @@ -348,7 +350,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFNMSUB_MASKED, ">= 1" }) public static void testFloatMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(F_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += F_SPECIES.length()) { @@ -361,7 +364,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMADD_MASKED, ">= 1" }) public static void testFloatMultiplyNegAMasked() { VectorMask mask = VectorMask.fromArray(F_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += F_SPECIES.length()) { @@ -374,7 +378,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFNMADD_MASKED, ">= 1" }) public static void testFloatNegatedMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(F_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += F_SPECIES.length()) { @@ -387,7 +392,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMSUB_MASKED, ">= 1" }) public static void testFloatNegatedMultiplyNegAMasked() { VectorMask mask = VectorMask.fromArray(F_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += F_SPECIES.length()) { @@ -400,7 +406,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMSUB_MASKED, ">= 1" }) public static void testFloatNegatedMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(F_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += F_SPECIES.length()) { @@ -413,7 +420,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFNMSUB_MASKED, ">= 1" }) public static void testDoubleMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(D_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += D_SPECIES.length()) { @@ -426,7 +434,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMADD_MASKED, ">= 1" }) public static void testDoubleMultiplyNegAMasked() { VectorMask mask = VectorMask.fromArray(D_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += D_SPECIES.length()) { @@ -439,7 +448,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMAD_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFNMADD_MASKED, ">= 1" }) public static void testDoubleNegatedMultiplyAddMasked() { VectorMask mask = VectorMask.fromArray(D_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += D_SPECIES.length()) { @@ -452,7 +462,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMSUB_MASKED, ">= 1" }) public static void testDoubleNegatedMultiplyNegAMasked() { VectorMask mask = VectorMask.fromArray(D_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += D_SPECIES.length()) { @@ -465,7 +476,8 @@ public class VectorFusedMultiplyAddSubTest { } @Test - @IR(counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"aarch64", "true"}, counts = { IRNode.VFNMSB_MASKED, ">= 1" }) + @IR(applyIfPlatform = {"riscv64", "true"}, counts = { IRNode.RISCV_VFMSUB_MASKED, ">= 1" }) public static void testDoubleNegatedMultiplySubMasked() { VectorMask mask = VectorMask.fromArray(D_SPECIES, m, 0); for (int i = 0; i < LENGTH; i += D_SPECIES.length()) { @@ -479,8 +491,10 @@ public class VectorFusedMultiplyAddSubTest { public static void main(String[] args) { TestFramework testFramework = new TestFramework(); - testFramework.setDefaultWarmup(5000) - .addFlags("--add-modules=jdk.incubator.vector", "-XX:UseSVE=1") - .start(); + testFramework.setDefaultWarmup(5000).addFlags("--add-modules=jdk.incubator.vector"); + if (Platform.isAArch64()) { + testFramework.addFlags("-XX:UseSVE=1"); + } + testFramework.start(); } }