8383881: Cleanup cpu feature enums and test functions for features that should always be available on x86-64 platforms

Reviewed-by: kvn, vlivanov, adinn, shade
This commit is contained in:
Ashutosh Mehra 2026-05-14 01:27:25 +00:00
parent f59ee5acfc
commit 322dedba58
14 changed files with 45 additions and 194 deletions

View File

@ -15091,7 +15091,6 @@ void Assembler::cdqe() {
}
void Assembler::clflush(Address adr) {
assert(VM_Version::supports_clflush(), "should do");
prefix(adr, true /* is_map1 */);
emit_int8((unsigned char)0xAE);
emit_operand(rdi, adr, 0);

View File

@ -99,7 +99,7 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
\
product(int, UseSSE, 4, \
"Highest supported SSE instructions set on x86/x64") \
range(0, 4) \
range(2, 4) \
\
product(int, UseAVX, 3, \
"Highest supported AVX instructions set on x86/x64") \

View File

@ -5355,12 +5355,10 @@ void MacroAssembler::print_CPU_state() {
void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) {
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed (with -Xcheck:jni flag).
if (VM_Version::supports_sse()) {
if (RestoreMXCSROnJNICalls) {
ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
} else if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
if (RestoreMXCSROnJNICalls) {
ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch);
} else if (CheckJNICalls) {
call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
// Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
vzeroupper();
@ -9811,7 +9809,6 @@ void MacroAssembler::convert_d2l(Register dst, XMMRegister src) {
void MacroAssembler::cache_wb(Address line)
{
// 64 bit cpus always support clflush
assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();
@ -9833,7 +9830,6 @@ void MacroAssembler::cache_wb(Address line)
void MacroAssembler::cache_wbsync(bool is_pre)
{
assert(VM_Version::supports_clflush(), "clflush should be available");
bool optimized = VM_Version::supports_clflushopt();
bool no_evict = VM_Version::supports_clwb();

View File

@ -33,6 +33,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/icache.hpp"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
@ -80,20 +81,6 @@ static detect_virt_stub_t detect_virt_stub = nullptr;
static clear_apx_test_state_t clear_apx_test_state_stub = nullptr;
static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
bool VM_Version::supports_clflush() {
// clflush should always be available on x86_64
// if not we are in real trouble because we rely on it
// to flush the code cache.
// Unfortunately, Assembler::clflush is currently called as part
// of generation of the code cache flush routine. This happens
// under Universe::init before the processor features are set
// up. Assembler::flush calls this routine to check that clflush
// is allowed. So, we give the caller a free pass if Universe init
// is still in progress.
assert ((!Universe::is_fully_initialized() || _features.supports_feature(CPU_FLUSH)), "clflush should be available");
return true;
}
#define CPUID_STANDARD_FN 0x0
#define CPUID_STANDARD_FN_1 0x1
#define CPUID_STANDARD_FN_4 0x4
@ -511,7 +498,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// and check upper YMM/ZMM bits after it.
//
int saved_useavx = UseAVX;
int saved_usesse = UseSSE;
// If UseAVX is uninitialized or is set by the user to include EVEX
if (use_evex) {
@ -542,7 +528,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// EVEX setup: run in lowest evex mode
VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 3;
UseSSE = 2;
#ifdef _WINDOWS
// xmm5-xmm15 are not preserved by caller on windows
// https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
@ -569,7 +554,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// AVX setup
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 1;
UseSSE = 2;
#ifdef _WINDOWS
__ subptr(rsp, 32);
__ vmovdqu(Address(rsp, 0), xmm7);
@ -623,7 +607,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// EVEX check: run in lowest evex mode
VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 3;
UseSSE = 2;
__ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
__ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
__ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
@ -641,7 +624,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
UseSSE = saved_usesse;
__ jmp(wrapup);
}
@ -649,7 +631,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// AVX check
VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
UseAVX = 1;
UseSSE = 2;
__ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
__ vmovdqu(Address(rsi, 0), xmm0);
__ vmovdqu(Address(rsi, 32), xmm7);
@ -668,7 +649,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
generate_vzeroupper(wrapup);
VM_Version::clean_cpuFeatures();
UseAVX = saved_useavx;
UseSSE = saved_usesse;
__ bind(wrapup);
__ popf();
@ -905,25 +885,6 @@ void VM_Version::get_processor_features() {
_supports_atomic_getset8 = true;
_supports_atomic_getadd8 = true;
// OS should support SSE for x64 and hardware should support at least SSE2.
if (!VM_Version::supports_sse2()) {
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
}
// in 64 bit the use of SSE2 is the minimum
if (UseSSE < 2) UseSSE = 2;
// flush_icache_stub have to be generated first.
// That is why Icache line size is hard coded in ICache class,
// see icache_x86.hpp. It is also the reason why we can't use
// clflush instruction in 32-bit VM since it could be running
// on CPU which does not support it.
//
// The only thing we can do is to verify that flushed
// ICache::line_size has correct value.
guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
// clflush_size is size in quadwords (8 bytes).
guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
// assigning this field effectively enables Unsafe.writebackMemory()
// by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
// that is only implemented on x86_64 and only if the OS plays ball
@ -952,12 +913,6 @@ void VM_Version::get_processor_features() {
clear_feature(CPU_SSE4A);
}
if (UseSSE < 2)
clear_feature(CPU_SSE2);
if (UseSSE < 1)
clear_feature(CPU_SSE);
// ZX cpus specific settings
if (is_zx() && FLAG_IS_DEFAULT(UseAVX)) {
if (cpu_family() == 7) {
@ -972,21 +927,13 @@ void VM_Version::get_processor_features() {
}
// UseSSE is set to the smaller of what hardware supports and what
// the command line requires. I.e., you cannot set UseSSE to 2 on
// older Pentiums which do not support it.
int use_sse_limit = 0;
if (UseSSE > 0) {
if (UseSSE > 3 && supports_sse4_1()) {
use_sse_limit = 4;
} else if (UseSSE > 2 && supports_sse3()) {
use_sse_limit = 3;
} else if (UseSSE > 1 && supports_sse2()) {
use_sse_limit = 2;
} else if (UseSSE > 0 && supports_sse()) {
use_sse_limit = 1;
} else {
use_sse_limit = 0;
}
// the command line requires. i.e., you cannot set UseSSE to 4 on
// older systems which do not support it.
int use_sse_limit = 2;
if (UseSSE > 3 && supports_sse4_1()) {
use_sse_limit = 4;
} else if (UseSSE > 2 && supports_sse3()) {
use_sse_limit = 3;
}
if (FLAG_IS_DEFAULT(UseSSE)) {
FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
@ -1150,7 +1097,6 @@ void VM_Version::get_processor_features() {
_has_intel_jcc_erratum = IntelJccErratumMitigation;
}
assert(supports_clflush(), "Always present");
if (X86ICacheSync == -1) {
// Auto-detect, choosing the best performant one that still flushes
// the cache. We could switch to CPUID/SERIALIZE ("4"/"5") going forward.
@ -1535,7 +1481,7 @@ void VM_Version::get_processor_features() {
}
if (is_amd_family()) { // AMD cpus specific settings
if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
if (FLAG_IS_DEFAULT(UseAddressNop)) {
// Use it on new AMD cpus starting from Opteron.
UseAddressNop = true;
}
@ -1578,7 +1524,7 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
}
@ -1594,7 +1540,7 @@ void VM_Version::get_processor_features() {
if (cpu_family() >= 0x17) {
// On family >=17h processors use XMM and UnalignedLoadStores
// for Array Copy
if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
}
#ifdef COMPILER2
@ -1796,8 +1742,6 @@ void VM_Version::get_processor_features() {
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
} else if (!supports_sse() && supports_3dnow_prefetch()) {
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
}
}
@ -2889,29 +2833,23 @@ int64_t VM_Version::maximum_qualified_cpu_frequency(void) {
VM_Version::VM_Features VM_Version::CpuidInfo::feature_flags() const {
VM_Features vm_features;
// check the features that must be present
guarantee(std_cpuid1_edx.bits.sse2 != 0, "sse2 is not supported");
guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
// clflush_size is size in quadwords (8 bytes).
guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == ICache::line_size/8, "clflush size is not supported");
if (std_cpuid1_edx.bits.cmpxchg8 != 0)
vm_features.set_feature(CPU_CX8);
if (std_cpuid1_edx.bits.cmov != 0)
vm_features.set_feature(CPU_CMOV);
if (std_cpuid1_edx.bits.clflush != 0)
vm_features.set_feature(CPU_FLUSH);
// clflush should always be available on x86_64
// if not we are in real trouble because we rely on it
// to flush the code cache.
assert (vm_features.supports_feature(CPU_FLUSH), "clflush should be available");
if (std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
ext_cpuid1_edx.bits.fxsr != 0))
vm_features.set_feature(CPU_FXSR);
// HT flag is set for multi-core processors also.
if (threads_per_core() > 1)
vm_features.set_feature(CPU_HT);
if (std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
ext_cpuid1_edx.bits.mmx != 0))
vm_features.set_feature(CPU_MMX);
if (std_cpuid1_edx.bits.sse != 0)
vm_features.set_feature(CPU_SSE);
if (std_cpuid1_edx.bits.sse2 != 0)
vm_features.set_feature(CPU_SSE2);
if (std_cpuid1_ecx.bits.sse3 != 0)
vm_features.set_feature(CPU_SSE3);
if (std_cpuid1_ecx.bits.ssse3 != 0)
@ -3243,17 +3181,9 @@ int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
// It will be used only when AllocatePrefetchStyle > 0
if (is_amd_family()) { // AMD | Hygon
if (supports_sse2()) {
return 256; // Opteron
} else {
return 128; // Athlon
}
return 256; // Opteron
} else if (is_zx()) {
if (supports_sse2()) {
return 256;
} else {
return 128;
}
return 256;
} else { // Intel
if (supports_sse3() && is_intel_server_family()) {
if (is_intel_modern_cpu()) { // Nehalem based cpus
@ -3262,14 +3192,10 @@ int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
return 384;
}
}
if (supports_sse2()) {
if (is_intel_server_family()) {
return 256; // Pentium M, Core, Core2
} else {
return 512; // Pentium 4
}
if (is_intel_server_family()) {
return 256; // Pentium M, Core, Core2
} else {
return 128; // Pentium 3 (and all other old CPUs)
return 512; // Pentium 4
}
}
}

View File

@ -381,58 +381,43 @@ protected:
decl(CMOV, cmov ) \
decl(FXSR, fxsr ) \
decl(HT, ht ) \
\
decl(MMX, mmx ) \
decl(3DNOW_PREFETCH, 3dnowpref ) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
/* may not necessarily support other 3dnow instructions */ \
decl(SSE, sse ) \
decl(SSE2, sse2 ) \
\
decl(SSE3, sse3 ) /* SSE3 comes from cpuid 1 (ECX) */ \
decl(SSSE3, ssse3 ) \
decl(SSE4A, sse4a ) \
decl(SSE4_1, sse4.1 ) \
\
decl(SSE4_2, sse4.2 ) \
decl(POPCNT, popcnt ) \
decl(LZCNT, lzcnt ) \
decl(TSC, tsc ) \
\
decl(TSCINV_BIT, tscinvbit ) \
decl(TSCINV, tscinv ) \
decl(AVX, avx ) \
decl(AVX2, avx2 ) \
\
decl(AES, aes ) \
decl(ERMS, erms ) /* enhanced 'rep movsb/stosb' instructions */ \
decl(CLMUL, clmul ) /* carryless multiply for CRC */ \
decl(BMI1, bmi1 ) \
\
decl(BMI2, bmi2 ) \
decl(RTM, rtm ) /* Restricted Transactional Memory instructions */ \
decl(ADX, adx ) \
decl(AVX512F, avx512f ) /* AVX 512bit foundation instructions */ \
\
decl(AVX512DQ, avx512dq ) \
decl(AVX512PF, avx512pf ) \
decl(AVX512ER, avx512er ) \
decl(AVX512CD, avx512cd ) \
\
decl(AVX512BW, avx512bw ) /* Byte and word vector instructions */ \
decl(AVX512VL, avx512vl ) /* EVEX instructions with smaller vector length */ \
decl(SHA, sha ) /* SHA instructions */ \
decl(FMA, fma ) /* FMA instructions */ \
\
decl(VZEROUPPER, vzeroupper ) /* Vzeroupper instruction */ \
decl(AVX512_VPOPCNTDQ, avx512_vpopcntdq ) /* Vector popcount */ \
decl(AVX512_VPCLMULQDQ, avx512_vpclmulqdq ) /* Vector carryless multiplication */ \
decl(AVX512_VAES, avx512_vaes ) /* Vector AES instruction */ \
\
decl(AVX512_VNNI, avx512_vnni ) /* Vector Neural Network Instructions */ \
decl(FLUSH, clflush ) /* flush instruction */ \
decl(FLUSHOPT, clflushopt ) /* flusopth instruction */ \
decl(CLWB, clwb ) /* clwb instruction */ \
\
decl(AVX512_VBMI2, avx512_vbmi2 ) /* VBMI2 shift left double instructions */ \
decl(AVX512_VBMI, avx512_vbmi ) /* Vector BMI instructions */ \
decl(HV, hv ) /* Hypervisor instructions */ \
@ -790,16 +775,12 @@ public:
VM_Version::clear_cpu_features();
}
static void set_avx_cpuFeatures() {
_features.set_feature(CPU_SSE);
_features.set_feature(CPU_SSE2);
_features.set_feature(CPU_AVX);
_features.set_feature(CPU_VZEROUPPER);
}
static void set_evex_cpuFeatures() {
_features.set_feature(CPU_AVX10_1);
_features.set_feature(CPU_AVX512F);
_features.set_feature(CPU_SSE);
_features.set_feature(CPU_SSE2);
_features.set_feature(CPU_VZEROUPPER);
}
static void set_apx_cpuFeatures() {
@ -869,9 +850,6 @@ public:
static bool supports_cmov() { return _features.supports_feature(CPU_CMOV); }
static bool supports_fxsr() { return _features.supports_feature(CPU_FXSR); }
static bool supports_ht() { return _features.supports_feature(CPU_HT); }
static bool supports_mmx() { return _features.supports_feature(CPU_MMX); }
static bool supports_sse() { return _features.supports_feature(CPU_SSE); }
static bool supports_sse2() { return _features.supports_feature(CPU_SSE2); }
static bool supports_sse3() { return _features.supports_feature(CPU_SSE3); }
static bool supports_ssse3() { return _features.supports_feature(CPU_SSSE3); }
static bool supports_sse4_1() { return _features.supports_feature(CPU_SSE4_1); }
@ -1010,10 +988,10 @@ public:
static int allocate_prefetch_distance(bool use_watermark_prefetch);
// SSE2 and later processors implement a 'pause' instruction
// that can be used for efficient implementation of
// the intrinsic for java.lang.Thread.onSpinWait()
static bool supports_on_spin_wait() { return supports_sse2(); }
// All currently supported processors support PAUSE instruction
// that can be used for efficient implementation of intrinsic for
// java.lang.Thread.onSpinWait().
static bool supports_on_spin_wait() { return true; }
// x86_64 supports fast class initialization checks
static bool supports_fast_class_init_checks() {
@ -1046,7 +1024,6 @@ public:
// pending in-cache changes.
//
// 64 bit cpus always support clflush which writes back and evicts
// on 32 bit cpus support is recorded via a feature flag
//
// clflushopt is optional and acts like clflush except it does
// not synchronize with other memory ops. it needs a preceding
@ -1057,8 +1034,6 @@ public:
// synchronize with other memory ops. so, it needs preceding
// and trailing StoreStore fences.
static bool supports_clflush(); // Can't inline due to header file conflict
// Note: CPU_FLUSHOPT and CPU_CLWB bits should always be zero for 32-bit
static bool supports_clflushopt() { return (_features.supports_feature(CPU_FLUSHOPT)); }
static bool supports_clwb() { return (_features.supports_feature(CPU_CLWB)); }

View File

@ -61,8 +61,10 @@ public class AMD64HotSpotJVMCIBackendFactory implements HotSpotJVMCIBackendFacto
long featureIndex = idx >>> featuresElementShiftCount;
return Unsafe.getUnsafe().getLong(featuresBitMapAddress + featureIndex * Long.BYTES);
}, renaming);
assert features.contains(AMD64.CPUFeature.SSE) : "minimum config for x64";
assert features.contains(AMD64.CPUFeature.SSE2) : "minimum config for x64";
// SSE and SSE2 are no longer reported as of JDK-8383881, but JVMCI compiler may
// still model instructions using these feature flags, so add them explicitly here.
features.add(AMD64.CPUFeature.SSE);
features.add(AMD64.CPUFeature.SSE2);
return features;
}

View File

@ -24,10 +24,10 @@
/**
* @test
* @bug 8358592
* @summary Regression test for -XX:+UseSSE42Intrinsics -XX:UseSSE=1 crash
* @summary Regression test for -XX:+UseSSE42Intrinsics -XX:UseSSE=2 crash
* @requires os.arch=="amd64" | os.arch=="x86_64"
* @requires vm.debug
* @run main/othervm -XX:+UseSSE42Intrinsics -XX:UseSSE=1 compiler.arguments.TestUseSSE42IntrinsicsWithLowLevelSSE
* @run main/othervm -XX:+UseSSE42Intrinsics -XX:UseSSE=2 compiler.arguments.TestUseSSE42IntrinsicsWithLowLevelSSE
*/
package compiler.arguments;
@ -36,4 +36,4 @@ public class TestUseSSE42IntrinsicsWithLowLevelSSE {
public static void main(String[] args) {
System.out.println("passed");
}
}
}

View File

@ -126,16 +126,6 @@ public class TestStressArrayCopy {
configs.add(List.of("-XX:UseAVX=0", "-XX:UseSSE=2"));
}
// x86_64 always has UseSSE >= 2. These lower configurations only
// make sense for x86_32.
if (Platform.isX86()) {
if (containsFuzzy(cpuFeatures, "sse")) {
configs.add(List.of("-XX:UseAVX=0", "-XX:UseSSE=1"));
}
configs.add(List.of("-XX:UseAVX=0", "-XX:UseSSE=0"));
}
// Alternate configs with other flags
if (Platform.isX64()) {
configs = alternate(configs, "UseCompressedOops");

View File

@ -26,7 +26,7 @@
* @bug 6579789
* @summary Internal error "c1_LinearScan.cpp:1429 Error: assert(false,"")" in debuggee with fastdebug VM
*
* @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:UseSSE=0
* @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:UseSSE=2
* -XX:CompileCommand=compileonly,compiler.c1.Test6579789::bug
* compiler.c1.Test6579789
*/

View File

@ -26,7 +26,7 @@
* @bug 6855215
* @summary Calculation error (NaN) after about 1500 calculations
*
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:UseSSE=0 compiler.c1.Test6855215
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:UseSSE=2 compiler.c1.Test6855215
*/
package compiler.c1;

View File

@ -29,8 +29,7 @@ import compiler.lib.ir_framework.*;
* @test
* @bug 8279258
* @summary Auto-vectorization enhancement for two-dimensional array operations
* @requires (os.arch != "x86" & os.arch != "i386" & os.arch != "ppc64" & os.arch != "ppc64le" & os.arch != "riscv64")
* | ((os.arch == "x86" | os.arch == "i386") & (vm.opt.UseSSE == "null" | vm.opt.UseSSE >= 2))
* @requires (os.arch != "ppc64" & os.arch != "ppc64le" & os.arch != "riscv64")
* | ((os.arch == "ppc64" | os.arch == "ppc64le") & vm.cpu.features ~= ".*darn.*")
* | (os.arch == "riscv64" & vm.cpu.features ~= ".*rvv.*")
* @library /test/lib /

View File

@ -90,14 +90,6 @@ public class CPUFeaturesClearTest {
outputAnalyzer.shouldNotMatch("[os,cpu] CPU: .* sse3.*");
outputAnalyzer.shouldNotMatch("[os,cpu] CPU: .* ssse3.*");
}
if (isCpuFeatureSupported("sse2")) {
outputAnalyzer = ProcessTools.executeTestJava(generateArgs(prepareNumericFlag("UseSSE", 1)));
outputAnalyzer.shouldNotMatch("[os,cpu] CPU: .* sse2.*");
}
if (isCpuFeatureSupported("sse")) {
outputAnalyzer = ProcessTools.executeTestJava(generateArgs(prepareNumericFlag("UseSSE", 0)));
outputAnalyzer.shouldNotMatch("[os,cpu] CPU: .* sse.*");
}
if (isCpuFeatureSupported("avx512f")) {
outputAnalyzer = ProcessTools.executeTestJava(generateArgs(prepareNumericFlag("UseAVX", 2)));
outputAnalyzer.shouldNotMatch("[os,cpu] CPU: .* avx512.*");

View File

@ -73,35 +73,7 @@ public class NaNTest {
}
public static void main(String args[]) {
// Some platforms are known to strip signaling NaNs.
// The block below can be used to except them.
boolean expectStableFloats = true;
boolean expectStableDoubles = true;
// On x86_32 without relevant SSE-enabled stubs, we are entering
// native methods that use FPU instructions, and those strip the
// signaling NaNs.
if (Platform.isX86()) {
int sse = WHITE_BOX.getIntVMFlag("UseSSE").intValue();
boolean stubsPresent = WHITE_BOX.getBooleanVMFlag("InlineIntrinsics");
expectStableFloats = (sse >= 1) && stubsPresent;
expectStableDoubles = (sse >= 2) && stubsPresent;
}
if (expectStableFloats) {
testFloat();
} else {
System.out.println("Stable floats cannot be expected, skipping");
}
if (expectStableDoubles) {
testDouble();
} else {
System.out.println("Stable doubles cannot be expected, skipping");
}
if (!expectStableFloats && !expectStableDoubles) {
throw new SkippedException("No tests were run.");
}
testFloat();
testDouble();
}
}

View File

@ -107,7 +107,7 @@ public class ClhsdbLongConstant {
// Expected value obtained from the CPU_SHA definition in vm_version_x86.hpp
checkLongValue("VM_Version::CPU_SHA ",
longConstantOutput,
34L);
31L);
}
}