Merge branch 'master' into opt-simploop-8346177

This commit is contained in:
katkerem 2026-04-01 17:56:07 +01:00
commit 22672c078b
92 changed files with 3442 additions and 863 deletions

View File

@ -29,21 +29,21 @@ GTEST_VERSION=1.14.0
JTREG_VERSION=8.2.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz
LINUX_X64_BOOT_JDK_SHA256=59cdcaf255add4721de38eb411d4ecfe779356b61fb671aee63c7dec78054c2b
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_linux-x64_bin.tar.gz
LINUX_X64_BOOT_JDK_SHA256=83c78367f8c81257beef72aca4bbbf8e6dac8ca2b3a4546a85879a09e6e4e128
ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin25-binaries/releases/download/jdk-25%2B36/OpenJDK25U-jdk_x64_alpine-linux_hotspot_25_36.tar.gz
ALPINE_LINUX_X64_BOOT_JDK_SHA256=637e47474d411ed86134f413af7d5fef4180ddb0bf556347b7e74a88cf8904c8
ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin26-binaries/releases/download/jdk-26%2B35/OpenJDK26U-jdk_x64_alpine-linux_hotspot_26_35.tar.gz
ALPINE_LINUX_X64_BOOT_JDK_SHA256=c105e581fdccb4e7120d889235d1ad8d5b2bed0af4972bc881e0a8ba687c94a4
MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-aarch64_bin.tar.gz
MACOS_AARCH64_BOOT_JDK_SHA256=2006337bf326fdfdf6117081751ba38c1c8706d63419ecac7ff102ff7c776876
MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-aarch64_bin.tar.gz
MACOS_AARCH64_BOOT_JDK_SHA256=254586bcd1bf6dcd125ad667ac32562cb1e2ab1abf3a61fb117b6fabb571e765
MACOS_X64_BOOT_JDK_EXT=tar.gz
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-x64_bin.tar.gz
MACOS_X64_BOOT_JDK_SHA256=47482ad9888991ecac9b2bcc131e2b53ff78aff275104cef85f66252308e8a09
MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_macos-x64_bin.tar.gz
MACOS_X64_BOOT_JDK_SHA256=8642b89d889c14ede2c446fd5bbe3621c8a3082e3df02013fd1658e39f52929a
WINDOWS_X64_BOOT_JDK_EXT=zip
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_windows-x64_bin.zip
WINDOWS_X64_BOOT_JDK_SHA256=85bcc178461e2cb3c549ab9ca9dfa73afd54c09a175d6510d0884071867137d3
WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk26/c3cc523845074aa0af4f5e1e1ed4151d/35/GPL/openjdk-26_windows-x64_bin.zip
WINDOWS_X64_BOOT_JDK_SHA256=2dd2d92c9374cd49a120fe9d916732840bf6bb9f0e0cc29794917a3c08b99c5f

View File

@ -387,8 +387,8 @@ var getJibProfilesCommon = function (input, data) {
};
};
common.boot_jdk_version = "25";
common.boot_jdk_build_number = "37";
common.boot_jdk_version = "26";
common.boot_jdk_build_number = "35";
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
+ common.boot_jdk_version
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2026-09-15
DEFAULT_VERSION_CLASSFILE_MAJOR=71 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26 27"
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="26 27"
DEFAULT_JDK_SOURCE_TARGET_VERSION=27
DEFAULT_PROMOTED_VERSION_PRE=ea

View File

@ -0,0 +1,42 @@
;
; Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
; DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
;
; This code is free software; you can redistribute it and/or modify it
; under the terms of the GNU General Public License version 2 only, as
; published by the Free Software Foundation.
;
; This code is distributed in the hope that it will be useful, but WITHOUT
; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
; version 2 for more details (a copy is included in the LICENSE file that
; accompanied this code).
;
; You should have received a copy of the GNU General Public License version
; 2 along with this work; if not, write to the Free Software Foundation,
; Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
;
; Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
; or visit www.oracle.com if you need additional information or have any
; questions.
;
; Support for int get_sve_vector_length();
;
; Returns the current SVE vector length in bytes.
; This function uses the INCB instruction which increments a register
; by the number of bytes in an SVE vector register.
;
; Note: This function will fault if SVE is not available or enabled.
; The caller must ensure SVE support is detected before calling.
ALIGN 4
EXPORT get_sve_vector_length
AREA sve_text, CODE
get_sve_vector_length
mov x0, #0
incb x0
ret
END

View File

@ -26,16 +26,19 @@
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"
// Assembly function to get SVE vector length using INCB instruction
extern "C" int get_sve_vector_length();
int VM_Version::get_current_sve_vector_length() {
assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
// Use assembly instruction to get the actual SVE vector length
return VM_Version::supports_sve() ? get_sve_vector_length() : 0; // This value is in bytes
}
int VM_Version::set_and_get_current_sve_vector_length(int length) {
assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
// Use assembly instruction to get the SVE vector length
return VM_Version::supports_sve() ? get_sve_vector_length() : 0; // This value is in bytes
}
void VM_Version::get_os_cpu_info() {
@ -47,11 +50,29 @@ void VM_Version::get_os_cpu_info() {
set_feature(CPU_AES);
set_feature(CPU_SHA1);
set_feature(CPU_SHA2);
set_feature(CPU_PMULL);
}
if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) {
set_feature(CPU_ASIMD);
}
// No check for CPU_PMULL, CPU_SVE, CPU_SVE2
if (IsProcessorFeaturePresent(PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_LSE);
}
if (IsProcessorFeaturePresent(PF_ARM_SVE_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_SVE);
}
if (IsProcessorFeaturePresent(PF_ARM_SVE2_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_SVE2);
}
if (IsProcessorFeaturePresent(PF_ARM_SVE_BITPERM_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_SVEBITPERM);
}
if (IsProcessorFeaturePresent(PF_ARM_SHA3_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_SHA3);
}
if (IsProcessorFeaturePresent(PF_ARM_SHA512_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_SHA512);
}
__int64 dczid_el0 = _ReadStatusReg(0x5807 /* ARM64_DCZID_EL0 */);
@ -102,8 +123,8 @@ void VM_Version::get_os_cpu_info() {
SYSTEM_INFO si;
GetSystemInfo(&si);
_model = si.wProcessorLevel;
_variant = si.wProcessorRevision / 0xFF;
_revision = si.wProcessorRevision & 0xFF;
_variant = (si.wProcessorRevision >> 8) & 0xFF; // Variant is the upper byte of wProcessorRevision
_revision = si.wProcessorRevision & 0xFF; // Revision is the lower byte of wProcessorRevision
}
}
}

View File

@ -380,37 +380,110 @@ void AOTCodeCache::init_early_c1_table() {
}
}
// macro to record which flags are set -- flag_type selects the
// relevant accessor e.g. set_flag, set_x86_flag, set_x86_use_flag.
// n.b. flag_enum_name and global_flag_name are both needed because we
// don't have consistent conventions for naming global flags e.g.
// EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
#define RECORD_FLAG(flag_type, flag_enum_name, global_flag_name) \
if (global_flag_name) { \
set_ ## flag_type ## flag(flag_enum_name); \
}
void AOTCodeCache::Config::record(uint cpu_features_offset) {
_flags = 0;
#ifdef ASSERT
_flags |= debugVM;
set_flag(debugVM);
#endif
if (UseCompressedOops) {
_flags |= compressedOops;
}
if (UseTLAB) {
_flags |= useTLAB;
}
RECORD_FLAG(, compressedOops, UseCompressedOops);
RECORD_FLAG(, useTLAB, UseTLAB);
if (JavaAssertions::systemClassDefault()) {
_flags |= systemClassAssertions;
set_flag(systemClassAssertions);
}
if (JavaAssertions::userClassDefault()) {
_flags |= userClassAssertions;
}
if (EnableContended) {
_flags |= enableContendedPadding;
}
if (RestrictContended) {
_flags |= restrictContendedPadding;
set_flag(userClassAssertions);
}
RECORD_FLAG(, enableContendedPadding, EnableContended);
RECORD_FLAG(, restrictContendedPadding, RestrictContended);
_compressedOopShift = CompressedOops::shift();
_compressedOopBase = CompressedOops::base();
_compressedKlassShift = CompressedKlassPointers::shift();
_contendedPaddingWidth = ContendedPaddingWidth;
_gc = (uint)Universe::heap()->kind();
_optoLoopAlignment = (uint)OptoLoopAlignment;
_codeEntryAlignment = (uint)CodeEntryAlignment;
_allocatePrefetchLines = (uint)AllocatePrefetchLines;
_allocateInstancePrefetchLines = (uint)AllocateInstancePrefetchLines;
_allocatePrefetchDistance = (uint)AllocatePrefetchDistance;
_allocatePrefetchStepSize = (uint)AllocatePrefetchStepSize;
_use_intrinsics_flags = 0;
RECORD_FLAG(use_, useCRC32, UseCRC32Intrinsics);
RECORD_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
#ifdef COMPILER2
_maxVectorSize = (uint)MaxVectorSize;
_arrayOperationPartialInlineSize = (uint)ArrayOperationPartialInlineSize;
RECORD_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
RECORD_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
RECORD_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
RECORD_FLAG(use_, useMontgomeryMultiply, UseMontgomeryMultiplyIntrinsic);
RECORD_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
#endif // COMPILER2
RECORD_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
RECORD_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
RECORD_FLAG(use_, useKyber, UseKyberIntrinsics);
RECORD_FLAG(use_, useBASE64, UseBASE64Intrinsics);
RECORD_FLAG(use_, useAdler32, UseAdler32Intrinsics);
RECORD_FLAG(use_, useAES, UseAESIntrinsics);
RECORD_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
RECORD_FLAG(use_, useGHASH, UseGHASHIntrinsics);
RECORD_FLAG(use_, useMD5, UseMD5Intrinsics);
RECORD_FLAG(use_, useSHA1, UseSHA1Intrinsics);
RECORD_FLAG(use_, useSHA256, UseSHA256Intrinsics);
RECORD_FLAG(use_, useSHA512, UseSHA512Intrinsics);
RECORD_FLAG(use_, useSHA3, UseSHA3Intrinsics);
RECORD_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
RECORD_FLAG(use_, useVectorizedMismatch,UseVectorizedMismatchIntrinsic );
RECORD_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
#if defined(X86) && !defined(ZERO)
_avx3threshold = (uint)AVX3Threshold;
_useAVX = (uint)UseAVX;
_x86_flags = 0;
RECORD_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
RECORD_FLAG(x86_, x86_useUnalignedLoadStores, UseUnalignedLoadStores);
RECORD_FLAG(x86_, x86_useAPX, UseAPX);
_x86_use_intrinsics_flags = 0;
RECORD_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
RECORD_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
#endif // defined(X86) && !defined(ZERO)
#if defined(AARCH64) && !defined(ZERO)
_prefetchCopyIntervalInBytes = (uint)PrefetchCopyIntervalInBytes;
_blockZeroingLowLimit = (uint)BlockZeroingLowLimit;
_softwarePrefetchHintDistance = (uint)SoftwarePrefetchHintDistance;
_useSVE = (uint)UseSVE;
_aarch64_flags = 0;
RECORD_FLAG(aarch64_, aarch64_avoidUnalignedAccesses, AvoidUnalignedAccesses);
RECORD_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
RECORD_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
RECORD_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
RECORD_FLAG(aarch64_, aarch64_useLSE, UseLSE);
_aarch64_use_intrinsics_flags = 0;
RECORD_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
RECORD_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
RECORD_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
RECORD_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
#endif // defined(AARCH64) && !defined(ZERO)
#if INCLUDE_JVMCI
_enableJVMCI = (uint)EnableJVMCI;
#endif
_cpu_features_offset = cpu_features_offset;
}
#undef RECORD_FLAG
bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
LogStreamHandle(Debug, aot, codecache, init) log;
uint offset = _cpu_features_offset;
@ -451,15 +524,27 @@ bool AOTCodeCache::Config::verify_cpu_features(AOTCodeCache* cache) const {
return true;
}
// macro to do *standard* flag eq checks -- flag_type selects the
// relevant accessor e.g. test_flag, test_x86_flag, test_x86_use_flag.
// n.b. flag_enum_name and global_flag_name are both needed because we
// don't have consistent conventions for naming global flags e.g.
// EnableContended vs UseMulAddIntrinsic vs UseCRC32Intrinsics
#define CHECK_FLAG(flag_type, flag_enum_name, global_flag_name) \
if (test_ ## flag_type ## flag(flag_enum_name) != global_flag_name) { \
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with " # global_flag_name " = %s vs current %s" , (global_flag_name ? "false" : "true"), (global_flag_name ? "true" : "false")); \
return false; \
}
bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
// First checks affect all cached AOT code
#ifdef ASSERT
if ((_flags & debugVM) == 0) {
if (!test_flag(debugVM)) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by product VM, it can't be used by debug VM");
return false;
}
#else
if ((_flags & debugVM) != 0) {
if (test_flag(debugVM)) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created by debug VM, it can't be used by product VM");
return false;
}
@ -476,9 +561,195 @@ bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
return false;
}
// check CPU features before checking flags that may be
// auto-configured in response to them
if (!verify_cpu_features(cache)) {
return false;
}
// change to EnableContended can affect validity of nmethods
CHECK_FLAG(, enableContendedPadding, EnableContended);
// change to RestrictContended can affect validity of nmethods
CHECK_FLAG(, restrictContendedPadding, RestrictContended);
// Tests for config options which might affect validity of adapters,
// stubs or nmethods. Currently we take a pessemistic stand and
// drop the whole cache if any of these are changed.
// change to opto alignment can affect performance of array copy
// stubs and nmethods
if (_optoLoopAlignment != (uint)OptoLoopAlignment) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with OptoLoopAlignment = %d vs current %d", (int)_optoLoopAlignment, (int)OptoLoopAlignment);
return false;
}
// change to CodeEntryAlignment can affect performance of array
// copy stubs and nmethods
if (_codeEntryAlignment != CodeEntryAlignment) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with CodeEntryAlignment = %d vs current %d", _codeEntryAlignment, CodeEntryAlignment);
return false;
}
// changing Prefetch configuration can affect validity of nmethods
// and stubs
if (_allocatePrefetchLines != (uint)AllocatePrefetchLines) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchLines, (int)AllocatePrefetchLines);
return false;
}
if (_allocateInstancePrefetchLines != (uint)AllocateInstancePrefetchLines) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocateInstancePrefetchLines, (int)AllocateInstancePrefetchLines);
return false;
}
if (_allocatePrefetchDistance != (uint)AllocatePrefetchDistance) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchDistance, (int)AllocatePrefetchDistance);
return false;
}
if (_allocatePrefetchStepSize != (uint)AllocatePrefetchStepSize) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with = %d vs current %d", (int)_allocatePrefetchStepSize, (int)AllocatePrefetchStepSize);
return false;
}
// check intrinsic use settings are compatible
CHECK_FLAG(use_, useCRC32, UseCRC32Intrinsics);
CHECK_FLAG(use_, useCRC32C, UseCRC32CIntrinsics);
#ifdef COMPILER2
// change to MaxVectorSize can affect validity of array copy/fill
// stubs
if (_maxVectorSize != (uint)MaxVectorSize) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with MaxVectorSize = %d vs current %d", (int)_maxVectorSize, (int)MaxVectorSize);
return false;
}
// changing ArrayOperationPartialInlineSize can affect validity of
// nmethods and stubs
if (_arrayOperationPartialInlineSize != (uint)ArrayOperationPartialInlineSize) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with ArrayOperationPartialInlineSize = %d vs current %d", (int)_arrayOperationPartialInlineSize, (int)ArrayOperationPartialInlineSize);
return false;
}
CHECK_FLAG(use_, useMultiplyToLen, UseMultiplyToLenIntrinsic);
CHECK_FLAG(use_, useSquareToLen, UseSquareToLenIntrinsic);
CHECK_FLAG(use_, useMulAdd, UseMulAddIntrinsic);
CHECK_FLAG(use_, useMontgomeryMultiply,UseMontgomeryMultiplyIntrinsic);
CHECK_FLAG(use_, useMontgomerySquare, UseMontgomerySquareIntrinsic);
#endif // COMPILER2
CHECK_FLAG(use_, useChaCha20, UseChaCha20Intrinsics);
CHECK_FLAG(use_, useDilithium, UseDilithiumIntrinsics);
CHECK_FLAG(use_, useKyber, UseKyberIntrinsics);
CHECK_FLAG(use_, useBASE64, UseBASE64Intrinsics);
CHECK_FLAG(use_, useAES, UseAESIntrinsics);
CHECK_FLAG(use_, useAESCTR, UseAESCTRIntrinsics);
CHECK_FLAG(use_, useGHASH, UseGHASHIntrinsics);
CHECK_FLAG(use_, useMD5, UseMD5Intrinsics);
CHECK_FLAG(use_, useSHA1, UseSHA1Intrinsics);
CHECK_FLAG(use_, useSHA256, UseSHA256Intrinsics);
CHECK_FLAG(use_, useSHA512, UseSHA512Intrinsics);
CHECK_FLAG(use_, useSHA3, UseSHA3Intrinsics);
CHECK_FLAG(use_, usePoly1305, UsePoly1305Intrinsics);
CHECK_FLAG(use_, useVectorizedMismatch, UseVectorizedMismatchIntrinsic);
CHECK_FLAG(use_, useSecondarySupersTable, UseSecondarySupersTable);
#if defined(X86) && !defined(ZERO)
// change to AVX3Threshold may affect validity of array copy stubs
// and nmethods
if (_avx3threshold != (uint)AVX3Threshold) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AVX3Threshold = %d vs current %d", (int)_avx3threshold, AVX3Threshold);
return false;
}
// change to UseAVX may affect validity of array copy stubs and
// nmethods
if (_useAVX != (uint)UseAVX) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with useAVX = %d vs current %d", (int)_useAVX, UseAVX);
return false;
}
// change to EnableX86ECoreOpts may affect validity of nmethods
CHECK_FLAG(x86_, x86_enableX86ECoreOpts, EnableX86ECoreOpts);
// switching off UseUnalignedLoadStores can affect validity of fill
// stubs
if (test_x86_flag(x86_useUnalignedLoadStores) && !UseUnalignedLoadStores) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseUnalignedLoadStores = true vs current = false");
return false;
}
// change to UseAPX can affect validity of nmethods and stubs
CHECK_FLAG(x86_, x86_useAPX, UseAPX);
// check x86-specific intrinsic use settings are compatible
CHECK_FLAG(x86_use_, x86_useLibm, UseLibmIntrinsic);
CHECK_FLAG(x86_use_, x86_useIntPoly, UseIntPolyIntrinsics);
#endif // defined(X86) && !defined(ZERO)
#if defined(AARCH64) && !defined(ZERO)
// change to PrefetchCopyIntervalInBytes may affect validity of
// array copy stubs
if (_prefetchCopyIntervalInBytes != (uint)PrefetchCopyIntervalInBytes) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with PrefetchCopyIntervalInBytes = %d vs current %d", (int)_prefetchCopyIntervalInBytes, (int)PrefetchCopyIntervalInBytes);
return false;
}
// change to BlockZeroingLowLimit may affect validity of array fill
// stubs
if (_blockZeroingLowLimit != (uint)BlockZeroingLowLimit) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with BlockZeroingLowLimit = %d vs current %d", (int)_blockZeroingLowLimit, (int)BlockZeroingLowLimit);
return false;
}
// change to SoftwarePrefetchHintDistance may affect validity of array fill
// stubs
if (_softwarePrefetchHintDistance != (uint)SoftwarePrefetchHintDistance) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with SoftwarePrefetchHintDistance = %d vs current %d", (int)_softwarePrefetchHintDistance, (int)SoftwarePrefetchHintDistance);
return false;
}
// change to UseSVE may affect validity of stubs and nmethods
if (_useSVE != (uint)UseSVE) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseSVE = %d vs current %d",(int)_useSVE, UseSVE);
return false;
}
// switching on AvoidUnalignedAccesses may affect validity of array
// copy stubs and nmethods
if (!test_aarch64_flag(aarch64_avoidUnalignedAccesses) && AvoidUnalignedAccesses) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with AvoidUnalignedAccesses = false vs current = true");
return false;
}
// change to UseSIMDForMemoryOps may affect validity of array
// copy stubs and nmethods
CHECK_FLAG(aarch64_, aarch64_useSIMDForMemoryOps, UseSIMDForMemoryOps);
// change to UseSIMDForArrayEquals may affect validity of array
// copy stubs and nmethods
CHECK_FLAG(aarch64_, aarch64_useSIMDForArrayEquals, UseSIMDForArrayEquals);
// change to useSIMDForSHA3 may affect validity of SHA3 stubs
CHECK_FLAG(aarch64_, aarch64_useSIMDForSHA3, UseSIMDForSHA3Intrinsic);
// change to UseLSE may affect validity of stubs and nmethods
CHECK_FLAG(aarch64_, aarch64_useLSE, UseLSE);
// check aarch64-specific intrinsic use settings are compatible
CHECK_FLAG(aarch64_use_, aarch64_useBlockZeroing, UseBlockZeroing);
CHECK_FLAG(aarch64_use_, aarch64_useSIMDForBigIntegerShift, UseSIMDForBigIntegerShiftIntrinsics);
CHECK_FLAG(aarch64_use_, aarch64_useSimpleArrayEquals, UseSimpleArrayEquals);
CHECK_FLAG(aarch64_use_, aarch64_useSecondarySupersCache, UseSecondarySupersCache);
#endif // defined(AARCH64) && !defined(ZERO)
#if INCLUDE_JVMCI
// change to EnableJVMCI will affect validity of adapters and
// nmethods
if (_enableJVMCI != (uint)EnableJVMCI) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with EnableJVMCI = %s vs current %s", (_enableJVMCI ? "true" : "false"), (EnableJVMCI ? "true" : "false"));
return false;
}
#endif // INCLUDE_JVMCI
// The following checks do not affect AOT adapters caching
if (((_flags & compressedOops) != 0) != UseCompressedOops) {
if (test_flag(compressedOops) != UseCompressedOops) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: it was created with UseCompressedOops = %s", UseCompressedOops ? "false" : "true");
AOTStubCaching = false;
}
@ -493,12 +764,11 @@ bool AOTCodeCache::Config::verify(AOTCodeCache* cache) const {
AOTStubCaching = false;
}
if (!verify_cpu_features(cache)) {
return false;
}
return true;
}
#undef TEST_FLAG
bool AOTCodeCache::Header::verify(uint load_size) const {
if (_version != AOT_CODE_VERSION) {
log_debug(aot, codecache, init)("AOT Code Cache disabled: different AOT Code version %d vs %d recorded in AOT Code header", AOT_CODE_VERSION, _version);

View File

@ -173,6 +173,16 @@ protected:
uint _compressedKlassShift;
uint _contendedPaddingWidth;
uint _gc;
uint _optoLoopAlignment;
uint _codeEntryAlignment;
uint _allocatePrefetchLines;
uint _allocateInstancePrefetchLines;
uint _allocatePrefetchDistance;
uint _allocatePrefetchStepSize;
#ifdef COMPILER2
uint _maxVectorSize;
uint _arrayOperationPartialInlineSize;
#endif // COMPILER2
enum Flags {
none = 0,
debugVM = 1,
@ -184,8 +194,90 @@ protected:
restrictContendedPadding = 64
};
uint _flags;
enum IntrinsicsUseFlags {
use_none = 0,
useCRC32 = 1 << 0,
useCRC32C = 1 << 1,
useMultiplyToLen = 1 << 2,
useSquareToLen = 1 << 3,
useMulAdd = 1 << 4,
useMontgomeryMultiply = 1 << 5,
useMontgomerySquare = 1 << 6,
useChaCha20 = 1 << 7,
useDilithium = 1 << 8,
useKyber = 1 << 9,
useBASE64 = 1 << 10,
useAdler32 = 1 << 11,
useAES = 1 << 12,
useAESCTR = 1 << 13,
useGHASH = 1 << 14,
useMD5 = 1 << 15,
useSHA1 = 1 << 16,
useSHA256 = 1 << 17,
useSHA512 = 1 << 18,
useSHA3 = 1 << 19,
usePoly1305 = 1 << 20,
useVectorizedMismatch = 1 << 21,
useSecondarySupersTable = 1 << 22,
};
uint _use_intrinsics_flags;
bool test_flag(enum Flags flag) const { return (_flags & flag) != 0; }
bool test_use_flag(enum IntrinsicsUseFlags flag) const { return (_use_intrinsics_flags & flag) != 0; }
void set_flag(enum Flags flag) { _flags |= flag; }
void set_use_flag(enum IntrinsicsUseFlags flag) { _use_intrinsics_flags |= flag; }
#if defined(X86) && !defined(ZERO)
uint _avx3threshold;
uint _useAVX;
enum X86Flags {
x86_none = 0,
x86_enableX86ECoreOpts = 1,
x86_useUnalignedLoadStores = 2,
x86_useAPX = 4
};
uint _x86_flags;
enum X86IntrinsicsUseFlags {
x86_use_none = 0,
x86_useLibm = 1 << 1,
x86_useIntPoly = 1 << 2,
};
uint _x86_use_intrinsics_flags;
bool test_x86_flag(enum X86Flags flag) const { return (_x86_flags & flag) != 0; }
bool test_x86_use_flag(enum X86IntrinsicsUseFlags flag) const { return (_x86_use_intrinsics_flags & flag) != 0; }
void set_x86_flag(enum X86Flags flag) { _x86_flags |= flag; }
void set_x86_use_flag(enum X86IntrinsicsUseFlags flag) { _x86_use_intrinsics_flags |= flag; }
#endif // defined(X86) && !defined(ZERO)
#if defined(AARCH64) && !defined(ZERO)
// this is global but x86 does not use it and aarch64 does
uint _prefetchCopyIntervalInBytes;
uint _blockZeroingLowLimit;
uint _softwarePrefetchHintDistance;
uint _useSVE;
enum AArch64Flags {
aarch64_none = 0,
aarch64_avoidUnalignedAccesses = 1,
aarch64_useSIMDForMemoryOps = 2,
aarch64_useSIMDForArrayEquals = 4,
aarch64_useSIMDForSHA3 = 8,
aarch64_useLSE = 16,
};
uint _aarch64_flags;
enum AArch64IntrinsicsUseFlags {
aarch64_use_none = 0,
aarch64_useBlockZeroing = 1 << 0,
aarch64_useSIMDForBigIntegerShift = 1 << 1,
aarch64_useSimpleArrayEquals = 1 << 2,
aarch64_useSecondarySupersCache = 1 << 3,
};
uint _aarch64_use_intrinsics_flags;
bool test_aarch64_flag(enum AArch64Flags flag) const { return (_aarch64_flags & flag) != 0; }
bool test_aarch64_use_flag(enum AArch64IntrinsicsUseFlags flag) const { return (_aarch64_use_intrinsics_flags & flag) != 0; }
void set_aarch64_flag(enum AArch64Flags flag) { _aarch64_flags |= flag; }
void set_aarch64_use_flag(enum AArch64IntrinsicsUseFlags flag) { _aarch64_use_intrinsics_flags |= flag; }
#endif // defined(AARCH64) && !defined(ZERO)
#if INCLUDE_JVMCI
uint _enableJVMCI;
#endif // INCLUDE_JVMCI
uint _cpu_features_offset; // offset in the cache where cpu features are stored
public:
void record(uint cpu_features_offset);
bool verify_cpu_features(AOTCodeCache* cache) const;

View File

@ -148,8 +148,9 @@ void G1Arguments::initialize_card_set_configuration() {
if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
const JVMTypedFlagLimit<uint>* limit = JVMFlagLimit::get_range_at(FLAG_MEMBER_ENUM(G1RemSetArrayOfCardsEntries))->cast<uint>();
FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
MIN2(G1RemSetArrayOfCardsEntriesBase << region_size_log_mb, limit->max())));
}
// Howl card set container globals.

View File

@ -1652,6 +1652,7 @@ jint G1CollectedHeap::initialize() {
}
void G1CollectedHeap::stop() {
assert_not_at_safepoint();
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. logging)
// that are destroyed during shutdown.

View File

@ -372,63 +372,56 @@ void G1CMMarkStack::set_empty() {
G1CMRootMemRegions::G1CMRootMemRegions(uint const max_regions) :
_root_regions(MemRegion::create_array(max_regions, mtGC)),
_max_regions(max_regions),
_num_root_regions(0),
_claimed_root_regions(0),
_scan_in_progress(false),
_should_abort(false) { }
_num_regions(0),
_num_claimed_regions(0) { }
G1CMRootMemRegions::~G1CMRootMemRegions() {
MemRegion::destroy_array(_root_regions, _max_regions);
}
void G1CMRootMemRegions::reset() {
_num_root_regions.store_relaxed(0);
assert_at_safepoint();
assert(G1CollectedHeap::heap()->collector_state()->is_in_concurrent_start_gc(), "must be");
_num_regions.store_relaxed(0);
_num_claimed_regions.store_relaxed(0);
}
void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
assert_at_safepoint();
size_t idx = _num_root_regions.fetch_then_add(1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions);
uint idx = _num_regions.fetch_then_add(1u);
assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %u", _max_regions);
assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
"end (" PTR_FORMAT ")", p2i(start), p2i(end));
_root_regions[idx].set_start(start);
_root_regions[idx].set_end(end);
}
void G1CMRootMemRegions::prepare_for_scan() {
assert(!scan_in_progress(), "pre-condition");
_scan_in_progress.store_relaxed(num_root_regions() > 0);
_claimed_root_regions.store_relaxed(0);
_should_abort.store_relaxed(false);
}
const MemRegion* G1CMRootMemRegions::claim_next() {
if (_should_abort.load_relaxed()) {
// If someone has set the should_abort flag, we return null to
// force the caller to bail out of their loop.
uint local_num_regions = num_regions();
if (num_claimed_regions() >= local_num_regions) {
return nullptr;
}
uint local_num_root_regions = num_root_regions();
if (_claimed_root_regions.load_relaxed() >= local_num_root_regions) {
return nullptr;
}
size_t claimed_index = _claimed_root_regions.fetch_then_add(1u);
if (claimed_index < local_num_root_regions) {
uint claimed_index = _num_claimed_regions.fetch_then_add(1u);
if (claimed_index < local_num_regions) {
return &_root_regions[claimed_index];
}
return nullptr;
}
uint G1CMRootMemRegions::num_root_regions() const {
return (uint)_num_root_regions.load_relaxed();
bool G1CMRootMemRegions::work_completed() const {
return num_remaining_regions() == 0;
}
uint G1CMRootMemRegions::num_remaining_regions() const {
uint total = num_regions();
uint claimed = num_claimed_regions();
return (total > claimed) ? total - claimed : 0;
}
bool G1CMRootMemRegions::contains(const MemRegion mr) const {
uint local_num_root_regions = num_root_regions();
uint local_num_root_regions = num_regions();
for (uint i = 0; i < local_num_root_regions; i++) {
if (_root_regions[i].equals(mr)) {
return true;
@ -437,42 +430,6 @@ bool G1CMRootMemRegions::contains(const MemRegion mr) const {
return false;
}
void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress.store_relaxed(false);
G1RootRegionScan_lock->notify_all();
}
void G1CMRootMemRegions::cancel_scan() {
notify_scan_done();
}
void G1CMRootMemRegions::scan_finished() {
assert(scan_in_progress(), "pre-condition");
if (!_should_abort.load_relaxed()) {
assert(_claimed_root_regions.load_relaxed() >= num_root_regions(),
"we should have claimed all root regions, claimed %zu, length = %u",
_claimed_root_regions.load_relaxed(), num_root_regions());
}
notify_scan_done();
}
bool G1CMRootMemRegions::wait_until_scan_finished() {
if (!scan_in_progress()) {
return false;
}
{
MonitorLocker ml(G1RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
while (scan_in_progress()) {
ml.wait();
}
}
return true;
}
G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* bitmap_storage) :
_cm_thread(nullptr),
@ -483,6 +440,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_heap(_g1h->reserved()),
_root_regions(_g1h->max_num_regions()),
_root_region_scan_aborted(false),
_global_mark_stack(),
@ -614,6 +572,7 @@ void G1ConcurrentMark::reset() {
_region_mark_stats[i].clear();
}
_root_region_scan_aborted.store_relaxed(false);
_root_regions.reset();
}
@ -970,8 +929,6 @@ void G1ConcurrentMark::start_full_concurrent_cycle() {
satb_mq_set.set_active_all_threads(true, /* new active value */
false /* expected_active */);
_root_regions.prepare_for_scan();
// update_g1_committed() will be called at the end of an evac pause
// when marking is on. So, it's also called at the end of the
// concurrent start pause to update the heap end, if the heap expands
@ -982,7 +939,11 @@ void G1ConcurrentMark::start_full_concurrent_cycle() {
}
void G1ConcurrentMark::start_undo_concurrent_cycle() {
root_regions()->cancel_scan();
assert_at_safepoint_on_vm_thread();
// At this time this GC is not a concurrent start gc any more, can only check for young only gc/phase.
assert(_g1h->collector_state()->is_in_young_only_phase(), "must be");
abort_root_region_scan_at_safepoint();
// Signal the thread to start work.
cm_thread()->start_undo_cycle();
@ -1094,6 +1055,16 @@ uint G1ConcurrentMark::calc_active_marking_workers() {
return result;
}
bool G1ConcurrentMark::has_root_region_scan_aborted() const {
return _root_region_scan_aborted.load_relaxed();
}
#ifndef PRODUCT
void G1ConcurrentMark::assert_root_region_scan_completed_or_aborted() {
assert(root_regions()->work_completed() || has_root_region_scan_aborted(), "must be");
}
#endif
void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id) {
#ifdef ASSERT
HeapWord* last = region->last();
@ -1120,45 +1091,76 @@ void G1ConcurrentMark::scan_root_region(const MemRegion* region, uint worker_id)
class G1CMRootRegionScanTask : public WorkerTask {
G1ConcurrentMark* _cm;
bool _should_yield;
public:
G1CMRootRegionScanTask(G1ConcurrentMark* cm) :
WorkerTask("G1 Root Region Scan"), _cm(cm) { }
G1CMRootRegionScanTask(G1ConcurrentMark* cm, bool should_yield) :
WorkerTask("G1 Root Region Scan"), _cm(cm), _should_yield(should_yield) { }
void work(uint worker_id) {
G1CMRootMemRegions* root_regions = _cm->root_regions();
const MemRegion* region = root_regions->claim_next();
while (region != nullptr) {
SuspendibleThreadSetJoiner sts_join(_should_yield);
while (true) {
if (_cm->has_root_region_scan_aborted()) {
return;
}
G1CMRootMemRegions* root_regions = _cm->root_regions();
const MemRegion* region = root_regions->claim_next();
if (region == nullptr) {
return;
}
_cm->scan_root_region(region, worker_id);
region = root_regions->claim_next();
if (_should_yield) {
SuspendibleThreadSet::yield();
// If we yielded, a GC may have processed all root regions,
// so this loop will naturally exit on the next claim_next() call.
// Same if a Full GC signalled abort of the concurrent mark.
}
}
}
};
void G1ConcurrentMark::scan_root_regions() {
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
assert(!has_aborted(), "Aborting before root region scanning is finished not supported.");
bool G1ConcurrentMark::scan_root_regions(WorkerThreads* workers, bool concurrent) {
// We first check whether there is any work to do as we might have already aborted
// the concurrent cycle, or ran into a GC that did the actual work when we reach here.
// We want to avoid spinning up the worker threads if that happened.
// (Note that due to races reading the abort-flag, we might spin up the threads anyway).
//
// Abort happens if a Full GC occurs right after starting the concurrent cycle or
// a young gc doing the work.
//
// Concurrent gc threads enter an STS when starting the task, so they stop, then
// continue after that safepoint.
bool do_scan = !root_regions()->work_completed() && !has_root_region_scan_aborted();
if (do_scan) {
// Assign one worker to each root-region but subject to the max constraint.
const uint num_workers = MIN2(root_regions()->num_root_regions(),
// The constraint is also important to avoid accesses beyond the allocated per-worker
// marking helper data structures. We might get passed different WorkerThreads with
// different number of threads (potential worker ids) than helper data structures when
// completing this work during GC.
const uint num_workers = MIN2(root_regions()->num_remaining_regions(),
_max_concurrent_workers);
assert(num_workers > 0, "no more remaining root regions to process");
G1CMRootRegionScanTask task(this);
G1CMRootRegionScanTask task(this, concurrent);
log_debug(gc, ergo)("Running %s using %u workers for %u work units.",
task.name(), num_workers, root_regions()->num_root_regions());
_concurrent_workers->run_task(&task, num_workers);
// It's possible that has_aborted() is true here without actually
// aborting the survivor scan earlier. This is OK as it's
// mainly used for sanity checking.
root_regions()->scan_finished();
task.name(), num_workers, root_regions()->num_remaining_regions());
workers->run_task(&task, num_workers);
}
assert_root_region_scan_completed_or_aborted();
return do_scan;
}
bool G1ConcurrentMark::wait_until_root_region_scan_finished() {
return root_regions()->wait_until_scan_finished();
void G1ConcurrentMark::scan_root_regions_concurrently() {
assert(Thread::current() == cm_thread(), "must be on Concurrent Mark Thread");
scan_root_regions(_concurrent_workers, true /* concurrent */);
}
bool G1ConcurrentMark::complete_root_regions_scan_in_safepoint() {
assert_at_safepoint_on_vm_thread();
return scan_root_regions(_g1h->workers(), false /* concurrent */);
}
void G1ConcurrentMark::add_root_region(G1HeapRegion* r) {
@ -1169,9 +1171,16 @@ bool G1ConcurrentMark::is_root_region(G1HeapRegion* r) {
return root_regions()->contains(MemRegion(top_at_mark_start(r), r->top()));
}
void G1ConcurrentMark::root_region_scan_abort_and_wait() {
root_regions()->abort();
root_regions()->wait_until_scan_finished();
void G1ConcurrentMark::abort_root_region_scan() {
assert_not_at_safepoint();
_root_region_scan_aborted.store_relaxed(true);
}
void G1ConcurrentMark::abort_root_region_scan_at_safepoint() {
assert_at_safepoint_on_vm_thread();
_root_region_scan_aborted.store_relaxed(true);
}
void G1ConcurrentMark::concurrent_cycle_start() {
@ -1948,12 +1957,15 @@ void G1ConcurrentMark::print_stats() {
}
bool G1ConcurrentMark::concurrent_cycle_abort() {
assert_at_safepoint_on_vm_thread();
assert(_g1h->collector_state()->is_in_full_gc(), "must be");
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
root_region_scan_abort_and_wait();
// be moving objects / updating references. Since the root region
// scan synchronized with the safepoint, just tell it to abort.
// It will notice when the threads start up again later.
abort_root_region_scan_at_safepoint();
// We haven't started a concurrent cycle no need to do anything; we might have
// aborted the marking because of shutting down though. In this case the marking
@ -1983,7 +1995,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
}
void G1ConcurrentMark::abort_marking_threads() {
assert(!_root_regions.scan_in_progress(), "still doing root region scan");
assert_root_region_scan_completed_or_aborted();
_has_aborted.store_relaxed(true);
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();

View File

@ -288,57 +288,36 @@ private:
class G1CMRootMemRegions {
// The set of root MemRegions.
MemRegion* _root_regions;
size_t const _max_regions;
uint const _max_regions;
Atomic<size_t> _num_root_regions; // Actual number of root regions.
Atomic<uint> _num_regions; // Actual number of root regions.
Atomic<uint> _num_claimed_regions; // Number of root regions currently claimed.
Atomic<size_t> _claimed_root_regions; // Number of root regions currently claimed.
Atomic<bool> _scan_in_progress;
Atomic<bool> _should_abort;
void notify_scan_done();
uint num_regions() const { return _num_regions.load_relaxed(); }
uint num_claimed_regions() const { return _num_claimed_regions.load_relaxed(); }
public:
G1CMRootMemRegions(uint const max_regions);
~G1CMRootMemRegions();
// Reset the data structure to allow addition of new root regions.
void reset();
void add(HeapWord* start, HeapWord* end);
// Reset the claiming / scanning of the root regions.
void prepare_for_scan();
// Forces get_next() to return null so that the iteration aborts early.
void abort() { _should_abort.store_relaxed(true); }
// Return true if the CM thread are actively scanning root regions,
// false otherwise.
bool scan_in_progress() { return _scan_in_progress.load_relaxed(); }
// Reset data structure to initial state.
void reset();
// Claim the next root MemRegion to scan atomically, or return null if
// all have been claimed.
const MemRegion* claim_next();
// The number of root regions to scan.
uint num_root_regions() const;
// Number of root regions to still process.
uint num_remaining_regions() const;
// Returns whether all root regions have been processed or the processing been aborted.
bool work_completed() const;
// Is the given memregion contained in the root regions; the MemRegion must
// match exactly.
bool contains(const MemRegion mr) const;
void cancel_scan();
// Flag that we're done with root region scanning and notify anyone
// who's waiting on it. If aborted is false, assume that all regions
// have been claimed.
void scan_finished();
// If CM threads are still scanning root regions, wait until they
// are done. Return true if we had to wait, false otherwise.
bool wait_until_scan_finished();
};
// This class manages data structures and methods for doing liveness analysis in
@ -367,6 +346,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
// Root region tracking and claiming
G1CMRootMemRegions _root_regions;
Atomic<bool> _root_region_scan_aborted;
// For grey objects
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
@ -600,7 +580,7 @@ public:
// Notifies marking threads to abort. This is a best-effort notification. Does not
// guarantee or update any state after the call. Root region scan must not be
// running.
// running or being aborted.
void abort_marking_threads();
// Total cpu time spent in mark worker threads in seconds.
@ -651,17 +631,30 @@ public:
// Stop active components/the concurrent mark thread.
void stop();
// Scan all the root regions and mark everything reachable from
// them.
void scan_root_regions();
bool wait_until_root_region_scan_finished();
void add_root_region(G1HeapRegion* r);
bool is_root_region(G1HeapRegion* r);
void root_region_scan_abort_and_wait();
// Scan all the root regions concurrently and mark everything reachable from
// them.
void scan_root_regions_concurrently();
// Complete root region scan work in the safepoint, return if we did some work.
bool complete_root_regions_scan_in_safepoint();
// Abort an active concurrent root region scan outside safepoint.
void abort_root_region_scan();
bool has_root_region_scan_aborted() const;
private:
// Abort an active concurrent root region scan during safepoint.
void abort_root_region_scan_at_safepoint();
void assert_root_region_scan_completed_or_aborted() PRODUCT_RETURN;
G1CMRootMemRegions* root_regions() { return &_root_regions; }
// Perform root region scan until all root regions have been processed, or
// the process has been aborted. Returns true if we did some work.
bool scan_root_regions(WorkerThreads* workers, bool concurrent);
// Scan a single root MemRegion to mark everything reachable from it.
void scan_root_region(const MemRegion* region, uint worker_id);

View File

@ -131,14 +131,11 @@ void G1ConcurrentMarkThread::run_service() {
update_perf_counter_cpu_time();
}
_cm->root_regions()->cancel_scan();
}
void G1ConcurrentMarkThread::stop_service() {
if (is_in_progress()) {
// We are not allowed to abort the marking threads during root region scan.
// Needs to be done separately.
_cm->root_region_scan_abort_and_wait();
_cm->abort_root_region_scan();
_cm->abort_marking_threads();
}
@ -164,7 +161,7 @@ bool G1ConcurrentMarkThread::phase_clear_cld_claimed_marks() {
bool G1ConcurrentMarkThread::phase_scan_root_regions() {
G1ConcPhaseTimer p(_cm, "Concurrent Scan Root Regions");
_cm->scan_root_regions();
_cm->scan_root_regions_concurrently();
update_perf_counter_cpu_time();
return _cm->has_aborted();
}

View File

@ -180,7 +180,7 @@ void G1GCPhaseTimes::reset() {
_cur_post_evacuate_cleanup_2_time_ms = 0.0;
_cur_resize_heap_time_ms = 0.0;
_cur_ref_proc_time_ms = 0.0;
_root_region_scan_wait_time_ms = 0.0;
_root_region_scan_time_ms = 0.0;
_external_accounted_time_ms = 0.0;
_recorded_prepare_heap_roots_time_ms = 0.0;
_recorded_young_cset_choice_time_ms = 0.0;
@ -549,8 +549,8 @@ void G1GCPhaseTimes::print_other(double accounted_ms) const {
// In addition, these are not included in G1GCPhaseTimes::_gc_pause_time_ms.
// See G1YoungCollector::collect().
void G1GCPhaseTimes::print(bool evacuation_failed) {
if (_root_region_scan_wait_time_ms > 0.0) {
debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
if (_root_region_scan_time_ms > 0.0) {
debug_time("Root Region Scan", _root_region_scan_time_ms);
}
// Check if some time has been recorded for verification and only then print

View File

@ -191,7 +191,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_ref_proc_time_ms;
// Not included in _gc_pause_time_ms
double _root_region_scan_wait_time_ms;
double _root_region_scan_time_ms;
double _external_accounted_time_ms;
@ -325,8 +325,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_prepare_concurrent_task_time_ms = ms;
}
void record_root_region_scan_wait_time(double time_ms) {
_root_region_scan_wait_time_ms = time_ms;
void record_root_region_scan_time(double time_ms) {
_root_region_scan_time_ms = time_ms;
}
void record_serial_free_cset_time_ms(double time_ms) {
@ -399,8 +399,8 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
return _cur_resize_heap_time_ms;
}
double root_region_scan_wait_time_ms() {
return _root_region_scan_wait_time_ms;
double root_region_scan_time_ms() {
return _root_region_scan_time_ms;
}
double young_cset_choice_time_ms() {

View File

@ -690,11 +690,6 @@ void G1Policy::record_young_collection_start() {
assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
}
void G1Policy::record_concurrent_mark_init_end() {
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
collector_state()->set_in_normal_young_gc();
}
void G1Policy::record_concurrent_mark_remark_end() {
double end_time_sec = os::elapsedTime();
double start_time_sec = cur_pause_start_sec();
@ -795,7 +790,8 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
bool is_young_only_pause = G1CollectorState::is_young_only_pause(this_pause);
if (G1CollectorState::is_concurrent_start_pause(this_pause)) {
record_concurrent_mark_init_end();
assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
collector_state()->set_in_normal_young_gc();
} else {
maybe_start_marking(allocation_word_size);
}
@ -1245,10 +1241,6 @@ bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause)
}
}
void G1Policy::initiate_conc_mark() {
collector_state()->set_in_concurrent_start_gc();
}
static const char* requester_for_mixed_abort(GCCause::Cause cause) {
if (cause == GCCause::_wb_breakpoint) {
return "run_to breakpoint";
@ -1264,7 +1256,7 @@ void G1Policy::decide_on_concurrent_start_pause() {
// We are about to decide on whether this pause will be a
// concurrent start pause.
// First, collector_state()->in_concurrent_start_gc() should not be already set. We
// First, collector_state()->is_in_concurrent_start_gc() should not already be set. We
// will set it here if we have to. However, it should be cleared by
// the end of the pause (it's only set for the duration of a
// concurrent start pause).
@ -1283,22 +1275,19 @@ void G1Policy::decide_on_concurrent_start_pause() {
log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
} else if (!about_to_start_mixed_phase() && collector_state()->is_in_young_only_phase()) {
// Initiate a new concurrent start if there is no marking or reclamation going on.
initiate_conc_mark();
collector_state()->set_in_concurrent_start_gc();
log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
} else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
GCCause::is_codecache_requested_gc(cause) ||
(cause == GCCause::_wb_breakpoint)) {
// Initiate a concurrent start. A concurrent start must be a young only
// GC, so the collector state must be updated to reflect this.
collector_state()->set_in_normal_young_gc();
// Force concurrent start.
collector_state()->set_in_concurrent_start_gc();
// We might have ended up coming here about to start a mixed phase with a collection set
// active. The following remark might change the change the "evacuation efficiency" of
// the regions in this set, leading to failing asserts later.
// Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
abandon_collection_set_candidates();
abort_time_to_mixed_tracking();
initiate_conc_mark();
log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
requester_for_mixed_abort(cause));
} else {

View File

@ -313,9 +313,6 @@ public:
void record_full_collection_start();
void record_full_collection_end(size_t allocation_word_size);
// Must currently be called while the world is stopped.
void record_concurrent_mark_init_end();
void record_concurrent_mark_remark_end();
// Record start, end, and completion of cleanup.
@ -332,11 +329,6 @@ private:
// regions and update the associated members.
void update_survival_estimates_for_next_collection();
// Set the state to start a concurrent marking cycle and clear
// _initiate_conc_mark_if_possible because it has now been
// acted on.
void initiate_conc_mark();
public:
// This sets the initiate_conc_mark_if_possible() flag to start a
// new cycle, as long as we are not already in one. It's best if it

View File

@ -152,8 +152,9 @@ bool VM_G1PauseConcurrent::doit_prologue() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
if (g1h->is_shutting_down()) {
Heap_lock->unlock();
// JVM shutdown has started. This ensures that any further operations will be properly aborted
// and will not interfere with the shutdown process.
// JVM shutdown has started. Abort concurrent marking to ensure that any further
// concurrent VM operations will not try to start and interfere with the shutdown
// process.
g1h->concurrent_mark()->abort_marking_threads();
return false;
}

View File

@ -244,19 +244,13 @@ G1YoungGCAllocationFailureInjector* G1YoungCollector::allocation_failure_injecto
return _g1h->allocation_failure_injector();
}
void G1YoungCollector::wait_for_root_region_scanning() {
void G1YoungCollector::complete_root_region_scan() {
Ticks start = Ticks::now();
// We have to wait until the CM threads finish scanning the
// root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
Tickspan wait_time;
if (waited) {
wait_time = (Ticks::now() - start);
// We have to complete root region scan as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start moving them during the GC.
if (concurrent_mark()->complete_root_regions_scan_in_safepoint()) {
phase_times()->record_root_region_scan_time((Ticks::now() - start).seconds() * MILLIUNITS);
}
phase_times()->record_root_region_scan_wait_time(wait_time.seconds() * MILLIUNITS);
}
class G1PrintCollectionSetClosure : public G1HeapRegionClosure {
@ -1147,7 +1141,7 @@ void G1YoungCollector::collect() {
// Wait for root region scan here to make sure that it is done before any
// use of the STW workers to maximize cpu use (i.e. all cores are available
// just to do that).
wait_for_root_region_scanning();
complete_root_region_scan();
G1YoungGCVerifierMark vm(this);
{

View File

@ -89,7 +89,7 @@ class G1YoungCollector {
// returning the total time taken.
Tickspan run_task_timed(WorkerTask* task);
void wait_for_root_region_scanning();
void complete_root_region_scan();
void calculate_collection_set(G1EvacInfo* evacuation_info, double target_pause_time_ms);

View File

@ -705,6 +705,10 @@
develop(bool, TraceIterativeGVN, false, \
"Print progress during Iterative Global Value Numbering") \
\
develop(bool, UseDeepIGVNRevisit, true, \
"Re-process nodes that could benefit from a deep revisit after " \
"the IGVN worklist drains") \
\
develop(uint, VerifyIterativeGVN, 0, \
"Verify Iterative Global Value Numbering =FEDCBA, with:" \
" F: verify Node::Ideal does not return nullptr if the node" \

View File

@ -735,7 +735,7 @@ Node *RegionNode::Ideal(PhaseGVN *phase, bool can_reshape) {
#endif
}
// Remove the RegionNode itself from DefUse info
igvn->remove_dead_node(this);
igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph);
return nullptr;
}
return this; // Record progress
@ -1007,7 +1007,7 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
BoolNode* new_bol = new BoolNode(bol2->in(1), res);
igvn->replace_input_of(iff2, 1, igvn->transform((proj2->_con == 1) ? new_bol : new_bol->negate(igvn)));
if (new_bol->outcnt() == 0) {
igvn->remove_dead_node(new_bol);
igvn->remove_dead_node(new_bol, PhaseIterGVN::NodeOrigin::Speculative);
}
}
return false;

View File

@ -2277,7 +2277,7 @@ void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
if (n != nullptr && n->is_SafePoint()) {
r->rm_prec(i);
if (n->outcnt() == 0) {
igvn.remove_dead_node(n);
igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
}
--i;
}
@ -2321,7 +2321,7 @@ void Compile::Optimize() {
#endif
{
TracePhase tp(_t_iterGVN);
igvn.optimize();
igvn.optimize(true);
}
if (failing()) return;
@ -2385,7 +2385,7 @@ void Compile::Optimize() {
PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
}
igvn.reset();
igvn.optimize();
igvn.optimize(true);
if (failing()) return;
}
@ -2418,7 +2418,7 @@ void Compile::Optimize() {
int mcount = macro_count(); // Record number of allocations and locks before IGVN
// Optimize out fields loads from scalar replaceable allocations.
igvn.optimize();
igvn.optimize(true);
print_method(PHASE_ITER_GVN_AFTER_EA, 2);
if (failing()) return;
@ -2498,7 +2498,7 @@ void Compile::Optimize() {
{
TracePhase tp(_t_iterGVN2);
igvn.reset_from_igvn(&ccp);
igvn.optimize();
igvn.optimize(true);
}
print_method(PHASE_ITER_GVN2, 2);

View File

@ -933,7 +933,7 @@ void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, Growabl
j = MIN2(j, (int)use->outcnt()-1);
}
_igvn->remove_dead_node(use);
_igvn->remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
}
--i;
i = MIN2(i, (int)curr_castpp->outcnt()-1);

View File

@ -2911,8 +2911,8 @@ Node* Phase::gen_subtype_check(Node* subklass, Node* superklass, Node** ctrl, No
*ctrl = iftrue1; // We need exactly the 1 test above
PhaseIterGVN* igvn = gvn.is_IterGVN();
if (igvn != nullptr) {
igvn->remove_globally_dead_node(r_ok_subtype);
igvn->remove_globally_dead_node(r_not_subtype);
igvn->remove_globally_dead_node(r_ok_subtype, PhaseIterGVN::NodeOrigin::Speculative);
igvn->remove_globally_dead_node(r_not_subtype, PhaseIterGVN::NodeOrigin::Speculative);
}
return not_subtype_ctrl;
}

View File

@ -132,7 +132,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
cmp2->set_req(2,con2);
const Type *t = cmp2->Value(igvn);
// This compare is dead, so whack it!
igvn->remove_dead_node(cmp2);
igvn->remove_dead_node(cmp2, PhaseIterGVN::NodeOrigin::Speculative);
if( !t->singleton() ) return nullptr;
// No intervening control, like a simple Call
@ -443,7 +443,7 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
}
l -= uses_found; // we deleted 1 or more copies of this edge
}
igvn->remove_dead_node(p);
igvn->remove_dead_node(p, PhaseIterGVN::NodeOrigin::Graph);
}
// Force the original merge dead
@ -455,14 +455,14 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
r->set_req(0, nullptr);
} else {
assert(u->outcnt() == 0, "only dead users");
igvn->remove_dead_node(u);
igvn->remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph);
}
l -= 1;
}
igvn->remove_dead_node(r);
igvn->remove_dead_node(r, PhaseIterGVN::NodeOrigin::Graph);
// Now remove the bogus extra edges used to keep things alive
igvn->remove_dead_node( hook );
igvn->remove_dead_node(hook, PhaseIterGVN::NodeOrigin::Speculative);
// Must return either the original node (now dead) or a new node
// (Do not return a top here, since that would break the uniqueness of top.)
@ -905,6 +905,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN
IfNode* dom_iff = proj->in(0)->as_If();
BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
Node* lo = dom_iff->in(1)->in(1)->in(2);
Node* orig_lo = lo;
Node* hi = this_cmp->in(2);
Node* n = this_cmp->in(1);
IfProjNode* otherproj = proj->other_if_proj();
@ -916,6 +917,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN
BoolTest::mask hi_test = this_bool->_test._test;
BoolTest::mask cond = hi_test;
PhaseTransform::SpeculativeProgressGuard progress_guard(igvn);
// convert:
//
// dom_bool = x {<,<=,>,>=} a
@ -1053,6 +1055,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN
// previous if determines the result of this if so
// replace Bool with constant
igvn->replace_input_of(this, 1, igvn->intcon(success->_con));
progress_guard.commit();
return true;
}
}
@ -1087,11 +1090,14 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN
// min(limit, max(-2 + min_jint + 1, min_jint))
// = min(limit, min_jint)
// = min_jint
if (lo != orig_lo && lo->outcnt() == 0) {
igvn->remove_dead_node(lo, PhaseIterGVN::NodeOrigin::Speculative);
}
if (adjusted_val->outcnt() == 0) {
igvn->remove_dead_node(adjusted_val);
igvn->remove_dead_node(adjusted_val, PhaseIterGVN::NodeOrigin::Speculative);
}
if (adjusted_lim->outcnt() == 0) {
igvn->remove_dead_node(adjusted_lim);
igvn->remove_dead_node(adjusted_lim, PhaseIterGVN::NodeOrigin::Speculative);
}
igvn->C->record_for_post_loop_opts_igvn(this);
return false;
@ -1103,6 +1109,7 @@ bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjN
igvn->replace_input_of(dom_iff, 1, igvn->intcon(proj->_con));
igvn->replace_input_of(this, 1, newbool);
progress_guard.commit();
return true;
}
@ -1592,11 +1599,11 @@ Node* IfNode::dominated_by(Node* prev_dom, PhaseIterGVN* igvn, bool prev_dom_not
}
} // End for each child of a projection
igvn->remove_dead_node(ifp);
igvn->remove_dead_node(ifp, PhaseIterGVN::NodeOrigin::Graph);
} // End for each IfTrue/IfFalse child of If
// Kill the IfNode
igvn->remove_dead_node(this);
igvn->remove_dead_node(this, PhaseIterGVN::NodeOrigin::Graph);
// Must return either the original node (now dead) or a new node
// (Do not return a top here, since that would break the uniqueness of top.)
@ -1758,7 +1765,7 @@ Node* IfNode::simple_subsuming(PhaseIterGVN* igvn) {
}
if (bol->outcnt() == 0) {
igvn->remove_dead_node(bol); // Kill the BoolNode.
igvn->remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph); // Kill the BoolNode.
}
return this;
}
@ -1903,7 +1910,7 @@ static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff) {
Node *prior = igvn->hash_find_insert(iff);
if( prior ) {
igvn->remove_dead_node(iff);
igvn->remove_dead_node(iff, PhaseIterGVN::NodeOrigin::Graph);
iff = (IfNode*)prior;
} else {
// Cannot call transform on it just yet

View File

@ -698,7 +698,7 @@ SafePointNode* PhaseIdealLoop::find_safepoint(Node* back_control, const Node* he
}
#ifdef ASSERT
if (mm != nullptr) {
_igvn.remove_dead_node(mm);
_igvn.remove_dead_node(mm, PhaseIterGVN::NodeOrigin::Speculative);
}
#endif
}
@ -2003,7 +2003,7 @@ bool CountedLoopConverter::stress_long_counted_loop() {
Node* n = iv_nodes.at(i);
Node* clone = old_new[n->_idx];
if (clone != nullptr) {
igvn->remove_dead_node(clone);
igvn->remove_dead_node(clone, PhaseIterGVN::NodeOrigin::Speculative);
}
}
return false;
@ -4742,7 +4742,7 @@ void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) {
_igvn.replace_node(phi2, result);
// Sometimes an induction variable is unused
if (result->outcnt() == 0) {
_igvn.remove_dead_node(result);
_igvn.remove_dead_node(result, PhaseIterGVN::NodeOrigin::Graph);
}
--i; // deleted this phi; rescan starting with next position
}
@ -5420,7 +5420,7 @@ void PhaseIdealLoop::build_and_optimize() {
// clear out the dead code after build_loop_late
while (_deadlist.size()) {
_igvn.remove_globally_dead_node(_deadlist.pop());
_igvn.remove_globally_dead_node(_deadlist.pop(), PhaseIterGVN::NodeOrigin::Graph);
}
eliminate_useless_zero_trip_guard();

View File

@ -161,7 +161,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
}
if (the_clone != x) {
_igvn.remove_dead_node(the_clone);
_igvn.remove_dead_node(the_clone, PhaseIterGVN::NodeOrigin::Speculative);
} else if (region->is_Loop() && i == LoopNode::LoopBackControl &&
n->is_Load() && can_move_to_inner_loop(n, region->as_Loop(), x)) {
// it is not a win if 'x' moved from an outer to an inner loop
@ -172,7 +172,7 @@ Node* PhaseIdealLoop::split_thru_phi(Node* n, Node* region, int policy) {
}
// Too few wins?
if (!wins.profitable(policy)) {
_igvn.remove_dead_node(phi);
_igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative);
return nullptr;
}
@ -1866,7 +1866,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
assert(cast != nullptr, "must have added a cast to pin the node");
}
}
_igvn.remove_dead_node(n);
_igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
}
_dom_lca_tags_round = 0;
}
@ -2082,7 +2082,7 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
// Register with optimizer
Node *hit1 = _igvn.hash_find_insert(phi1);
if (hit1) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi1); // Remove new phi
_igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
assert(hit1->is_Phi(), "" );
phi1 = (PhiNode*)hit1; // Use existing phi
} else { // Miss
@ -2090,7 +2090,7 @@ Node* PhaseIdealLoop::clone_iff(PhiNode* phi) {
}
Node *hit2 = _igvn.hash_find_insert(phi2);
if (hit2) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi2); // Remove new phi
_igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
assert(hit2->is_Phi(), "" );
phi2 = (PhiNode*)hit2; // Use existing phi
} else { // Miss
@ -2165,7 +2165,7 @@ CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
// Register with optimizer
Node *hit1 = _igvn.hash_find_insert(phi1);
if( hit1 ) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi1); // Remove new phi
_igvn.remove_dead_node(phi1, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
assert( hit1->is_Phi(), "" );
phi1 = (PhiNode*)hit1; // Use existing phi
} else { // Miss
@ -2173,7 +2173,7 @@ CmpNode*PhaseIdealLoop::clone_bool(PhiNode* phi) {
}
Node *hit2 = _igvn.hash_find_insert(phi2);
if( hit2 ) { // Hit, toss just made Phi
_igvn.remove_dead_node(phi2); // Remove new phi
_igvn.remove_dead_node(phi2, PhaseIterGVN::NodeOrigin::Speculative); // Remove new phi
assert( hit2->is_Phi(), "" );
phi2 = (PhiNode*)hit2; // Use existing phi
} else { // Miss
@ -2324,7 +2324,7 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
_igvn.register_new_node_with_optimizer(phi); // Register new phi
} else { // or
// Remove the new phi from the graph and use the hit
_igvn.remove_dead_node(phi);
_igvn.remove_dead_node(phi, phi == prev ? PhaseIterGVN::NodeOrigin::Graph : PhaseIterGVN::NodeOrigin::Speculative);
phi = hit; // Use existing phi
}
set_ctrl(phi, prev);
@ -3472,7 +3472,7 @@ void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_va
set_ctrl(phi, lp);
} else {
// Remove the new phi from the graph and use the hit
_igvn.remove_dead_node(phi);
_igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Speculative);
phi = hit;
}
_igvn.replace_input_of(use, idx, phi);

View File

@ -973,7 +973,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
}
k -= (oc2 - use->outcnt());
}
_igvn.remove_dead_node(use);
_igvn.remove_dead_node(use, PhaseIterGVN::NodeOrigin::Graph);
} else if (use->is_ArrayCopy()) {
// Disconnect ArrayCopy node
ArrayCopyNode* ac = use->as_ArrayCopy();
@ -1008,7 +1008,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
// src can be top at this point if src and dest of the
// arraycopy were the same
if (src->outcnt() == 0 && !src->is_top()) {
_igvn.remove_dead_node(src);
_igvn.remove_dead_node(src, PhaseIterGVN::NodeOrigin::Graph);
}
}
_igvn._worklist.push(ac);
@ -1018,7 +1018,7 @@ void PhaseMacroExpand::process_users_of_allocation(CallNode *alloc) {
j -= (oc1 - res->outcnt());
}
assert(res->outcnt() == 0, "all uses of allocated objects must be deleted");
_igvn.remove_dead_node(res);
_igvn.remove_dead_node(res, PhaseIterGVN::NodeOrigin::Graph);
}
//
@ -1502,7 +1502,7 @@ void PhaseMacroExpand::expand_allocate_common(
transform_later(_callprojs.fallthrough_memproj);
}
migrate_outs(_callprojs.catchall_memproj, _callprojs.fallthrough_memproj);
_igvn.remove_dead_node(_callprojs.catchall_memproj);
_igvn.remove_dead_node(_callprojs.catchall_memproj, PhaseIterGVN::NodeOrigin::Graph);
}
// An allocate node has separate i_o projections for the uses on the control
@ -1521,7 +1521,7 @@ void PhaseMacroExpand::expand_allocate_common(
transform_later(_callprojs.fallthrough_ioproj);
}
migrate_outs(_callprojs.catchall_ioproj, _callprojs.fallthrough_ioproj);
_igvn.remove_dead_node(_callprojs.catchall_ioproj);
_igvn.remove_dead_node(_callprojs.catchall_ioproj, PhaseIterGVN::NodeOrigin::Graph);
}
// if we generated only a slow call, we are done
@ -1585,11 +1585,11 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
--i; // back up iterator
}
assert(_callprojs.resproj->outcnt() == 0, "all uses must be deleted");
_igvn.remove_dead_node(_callprojs.resproj);
_igvn.remove_dead_node(_callprojs.resproj, PhaseIterGVN::NodeOrigin::Graph);
}
if (_callprojs.fallthrough_catchproj != nullptr) {
migrate_outs(_callprojs.fallthrough_catchproj, ctrl);
_igvn.remove_dead_node(_callprojs.fallthrough_catchproj);
_igvn.remove_dead_node(_callprojs.fallthrough_catchproj, PhaseIterGVN::NodeOrigin::Graph);
}
if (_callprojs.catchall_catchproj != nullptr) {
_igvn.rehash_node_delayed(_callprojs.catchall_catchproj);
@ -1597,16 +1597,16 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
}
if (_callprojs.fallthrough_proj != nullptr) {
Node* catchnode = _callprojs.fallthrough_proj->unique_ctrl_out();
_igvn.remove_dead_node(catchnode);
_igvn.remove_dead_node(_callprojs.fallthrough_proj);
_igvn.remove_dead_node(catchnode, PhaseIterGVN::NodeOrigin::Graph);
_igvn.remove_dead_node(_callprojs.fallthrough_proj, PhaseIterGVN::NodeOrigin::Graph);
}
if (_callprojs.fallthrough_memproj != nullptr) {
migrate_outs(_callprojs.fallthrough_memproj, mem);
_igvn.remove_dead_node(_callprojs.fallthrough_memproj);
_igvn.remove_dead_node(_callprojs.fallthrough_memproj, PhaseIterGVN::NodeOrigin::Graph);
}
if (_callprojs.fallthrough_ioproj != nullptr) {
migrate_outs(_callprojs.fallthrough_ioproj, i_o);
_igvn.remove_dead_node(_callprojs.fallthrough_ioproj);
_igvn.remove_dead_node(_callprojs.fallthrough_ioproj, PhaseIterGVN::NodeOrigin::Graph);
}
if (_callprojs.catchall_memproj != nullptr) {
_igvn.rehash_node_delayed(_callprojs.catchall_memproj);
@ -1625,7 +1625,7 @@ void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
}
}
#endif
_igvn.remove_dead_node(alloc);
_igvn.remove_dead_node(alloc, PhaseIterGVN::NodeOrigin::Graph);
}
void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,

View File

@ -1156,40 +1156,38 @@ Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
return nullptr;
}
//---------------------------can_see_stored_value------------------------------
// This routine exists to make sure this set of tests is done the same
// everywhere. We need to make a coordinated change: first LoadNode::Ideal
// will change the graph shape in a way which makes memory alive twice at the
// same time (uses the Oracle model of aliasing), then some
// LoadXNode::Identity will fold things back to the equivalence-class model
// of aliasing.
Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
Node* LoadNode::can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
Compile::AliasType* atp = (tp != nullptr) ? phase->C->alias_type(tp) : nullptr;
// This is more general than load from boxing objects.
if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
uint alias_idx = atp->index();
Node* result = nullptr;
Node* current = st;
// Skip through chains of MemBarNodes checking the MergeMems for
// new states for the slice of this load. Stop once any other
// kind of node is encountered. Loads from final memory can skip
// through any kind of MemBar but normal loads shouldn't skip
// through MemBarAcquire since the could allow them to move out of
// a synchronized region. It is not safe to step over MemBarCPUOrder,
// because alias info above them may be inaccurate (e.g., due to
// mixed/mismatched unsafe accesses).
// Skip through chains of MemBarNodes checking the MergeMems for new states for the slice of
// this load. Stop once any other kind of node is encountered.
//
// In principle, folding a load is moving it up until it meets a matching store.
//
// store(ptr, v); store(ptr, v); store(ptr, v);
// membar1; -> membar1; -> load(ptr);
// membar2; load(ptr); membar1;
// load(ptr); membar2; membar2;
//
// So, we can decide which kinds of barriers we can walk past. It is not safe to step over
// MemBarCPUOrder, even if the memory is not rewritable, because alias info above them may be
// inaccurate (e.g., due to mixed/mismatched unsafe accesses).
bool is_final_mem = !atp->is_rewritable();
while (current->is_Proj()) {
int opc = current->in(0)->Opcode();
if ((is_final_mem && (opc == Op_MemBarAcquire ||
opc == Op_MemBarAcquireLock ||
opc == Op_LoadFence)) ||
if ((is_final_mem && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock || opc == Op_LoadFence)) ||
opc == Op_MemBarRelease ||
opc == Op_StoreFence ||
opc == Op_MemBarReleaseLock ||
@ -1216,6 +1214,17 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
}
}
return can_see_stored_value(st, phase);
}
// If st is a store to the same location as this, return the stored value
Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
Node* ld_base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ld_off);
Node* ld_alloc = AllocateNode::Ideal_allocation(ld_base);
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
// Loop around twice in the case Load -> Initialize -> Store.
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
for (int trip = 0; trip <= 1; trip++) {
@ -1344,7 +1353,7 @@ Node* LoadNode::Identity(PhaseGVN* phase) {
// If the previous store-maker is the right kind of Store, and the store is
// to the same address, then we are equal to the value stored.
Node* mem = in(Memory);
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if( value ) {
// byte, short & char stores truncate naturally.
// A load has to load the truncated value which requires
@ -1889,7 +1898,7 @@ Node* LoadNode::split_through_phi(PhaseGVN* phase, bool ignore_missing_instance_
}
}
if (x != the_clone && the_clone != nullptr) {
igvn->remove_dead_node(the_clone);
igvn->remove_dead_node(the_clone, PhaseIterGVN::NodeOrigin::Speculative);
}
phi->set_req(i, x);
}
@ -2042,7 +2051,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// (c) See if we can fold up on the spot, but don't fold up here.
// Fold-up might require truncation (for LoadB/LoadS/LoadUS) or
// just return a prior value, which is done by Identity calls.
if (can_see_stored_value(prev_mem, phase)) {
if (can_see_stored_value_through_membars(prev_mem, phase)) {
// Make ready for step (d):
set_req_X(MemNode::Memory, prev_mem, phase);
return this;
@ -2099,7 +2108,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
Compile* C = phase->C;
// If load can see a previous constant store, use that.
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type), "sanity");
return value->bottom_type();
@ -2350,7 +2359,7 @@ uint LoadNode::match_edge(uint idx) const {
//
Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_BYTE, value, _type, phase, false);
if (narrow != value) {
@ -2363,7 +2372,7 @@ Node* LoadBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadBNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2384,7 +2393,7 @@ const Type* LoadBNode::Value(PhaseGVN* phase) const {
//
Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem, phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_BOOLEAN, value, _type, phase, false);
if (narrow != value) {
@ -2397,7 +2406,7 @@ Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadUBNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2418,7 +2427,7 @@ const Type* LoadUBNode::Value(PhaseGVN* phase) const {
//
Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_CHAR, value, _type, phase, false);
if (narrow != value) {
@ -2431,7 +2440,7 @@ Node* LoadUSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadUSNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,
@ -2452,7 +2461,7 @@ const Type* LoadUSNode::Value(PhaseGVN* phase) const {
//
Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr) {
Node* narrow = Compile::narrow_value(T_SHORT, value, _type, phase, false);
if (narrow != value) {
@ -2465,7 +2474,7 @@ Node* LoadSNode::Ideal(PhaseGVN* phase, bool can_reshape) {
const Type* LoadSNode::Value(PhaseGVN* phase) const {
Node* mem = in(MemNode::Memory);
Node* value = can_see_stored_value(mem,phase);
Node* value = can_see_stored_value_through_membars(mem, phase);
if (value != nullptr && value->is_Con() &&
!value->bottom_type()->higher_equal(_type)) {
// If the input to the store does not fit with the load's result type,

View File

@ -266,6 +266,7 @@ protected:
const Type* const _type; // What kind of value is loaded?
virtual Node* find_previous_arraycopy(PhaseValues* phase, Node* ld_alloc, Node*& mem, bool can_see_stored_value) const;
Node* can_see_stored_value_through_membars(Node* st, PhaseValues* phase) const;
public:
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)

View File

@ -1757,6 +1757,12 @@ static bool match_type_check(PhaseGVN& gvn,
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
// or the narrowOop equivalent.
(*obj) = extract_obj_from_klass_load(&gvn, val);
// Some klass comparisons are not directly in the form
// Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]),
// e.g. Bool(CmpP(CastPP(LoadKlass(...)), ConP(klass)), [eq]).
// These patterns with nullable klasses arise from example from
// load_array_klass_from_mirror.
if (*obj == nullptr) { return false; }
(*cast_type) = tcon->isa_klassptr()->as_instance_type();
return true; // found
}

View File

@ -575,7 +575,7 @@ PhaseValues::~PhaseValues() {
_table.dump();
// Statistics for value progress and efficiency
if( PrintCompilation && Verbose && WizardMode ) {
tty->print("\n%sValues: %d nodes ---> %d/%d (%d)",
tty->print("\n%sValues: %d nodes ---> " UINT64_FORMAT "/%d (%d)",
is_IterGVN() ? "Iter" : " ", C->unique(), made_progress(), made_transforms(), made_new_values());
if( made_transforms() != 0 ) {
tty->print_cr(" ratio %f", made_progress()/(float)made_transforms() );
@ -731,14 +731,14 @@ Node* PhaseGVN::transform(Node* n) {
}
if (t->singleton() && !k->is_Con()) {
NOT_PRODUCT(set_progress();)
set_progress();
return makecon(t); // Turn into a constant
}
// Now check for Identities
i = k->Identity(this); // Look for a nearby replacement
if (i != k) { // Found? Return replacement!
NOT_PRODUCT(set_progress();)
set_progress();
return i;
}
@ -746,7 +746,7 @@ Node* PhaseGVN::transform(Node* n) {
i = hash_find_insert(k); // Insert if new
if (i && (i != k)) {
// Return the pre-existing node
NOT_PRODUCT(set_progress();)
set_progress();
return i;
}
@ -977,7 +977,7 @@ void PhaseIterGVN::init_verifyPhaseIterGVN() {
#endif
}
void PhaseIterGVN::verify_PhaseIterGVN() {
void PhaseIterGVN::verify_PhaseIterGVN(bool deep_revisit_converged) {
#ifdef ASSERT
// Verify nodes with changed inputs.
Unique_Node_List* modified_list = C->modified_nodes();
@ -1010,7 +1010,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
}
}
verify_optimize();
verify_optimize(deep_revisit_converged);
#endif
}
#endif /* PRODUCT */
@ -1040,38 +1040,54 @@ void PhaseIterGVN::trace_PhaseIterGVN_verbose(Node* n, int num_processed) {
}
#endif /* ASSERT */
void PhaseIterGVN::optimize() {
DEBUG_ONLY(uint num_processed = 0;)
NOT_PRODUCT(init_verifyPhaseIterGVN();)
NOT_PRODUCT(C->reset_igv_phase_iter(PHASE_AFTER_ITER_GVN_STEP);)
C->print_method(PHASE_BEFORE_ITER_GVN, 3);
if (StressIGVN) {
shuffle_worklist();
bool PhaseIterGVN::needs_deep_revisit(const Node* n) const {
// LoadNode::Value() -> can_see_stored_value() walks up through many memory
// nodes. LoadNode::Ideal() -> find_previous_store() also walks up to 50
// nodes through stores and arraycopy nodes.
if (n->is_Load()) {
return true;
}
// CmpPNode::sub() -> detect_ptr_independence() -> all_controls_dominate()
// walks CFG dominator relationships extensively. This only triggers when
// both inputs are oop pointers (subnode.cpp:984).
if (n->Opcode() == Op_CmpP) {
const Type* t1 = type_or_null(n->in(1));
const Type* t2 = type_or_null(n->in(2));
return t1 != nullptr && t1->isa_oopptr() &&
t2 != nullptr && t2->isa_oopptr();
}
// IfNode::Ideal() -> search_identical() walks up the CFG dominator tree.
// RangeCheckNode::Ideal() scans up to ~999 nodes up the chain.
// CountedLoopEndNode/LongCountedLoopEndNode::Ideal() via simple_subsuming
// looks for dominating test that subsumes the current test.
switch (n->Opcode()) {
case Op_If:
case Op_RangeCheck:
case Op_CountedLoopEnd:
case Op_LongCountedLoopEnd:
return true;
default:
break;
}
return false;
}
// The node count check in the loop below (check_node_count) assumes that we
// increase the live node count with at most
// max_live_nodes_increase_per_iteration in between checks. If this
// assumption does not hold, there is a risk that we exceed the max node
// limit in between checks and trigger an assert during node creation.
bool PhaseIterGVN::drain_worklist() {
uint loop_count = 1;
const int max_live_nodes_increase_per_iteration = NodeLimitFudgeFactor * 3;
uint loop_count = 0;
// Pull from worklist and transform the node. If the node has changed,
// update edge info and put uses on worklist.
while (_worklist.size() > 0) {
while (_worklist.size() != 0) {
if (C->check_node_count(max_live_nodes_increase_per_iteration, "Out of nodes")) {
C->print_method(PHASE_AFTER_ITER_GVN, 3);
return;
return true;
}
Node* n = _worklist.pop();
if (loop_count >= K * C->live_nodes()) {
DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::optimize");)
C->record_method_not_compilable("infinite loop in PhaseIterGVN::optimize");
DEBUG_ONLY(dump_infinite_loop_info(n, "PhaseIterGVN::drain_worklist");)
C->record_method_not_compilable("infinite loop in PhaseIterGVN::drain_worklist");
C->print_method(PHASE_AFTER_ITER_GVN, 3);
return;
return true;
}
DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, num_processed++);)
DEBUG_ONLY(trace_PhaseIterGVN_verbose(n, _num_processed++);)
if (n->outcnt() != 0) {
NOT_PRODUCT(const Type* oldtype = type_or_null(n));
// Do the transformation
@ -1079,7 +1095,7 @@ void PhaseIterGVN::optimize() {
Node* nn = transform_old(n);
DEBUG_ONLY(int live_nodes_after = C->live_nodes();)
// Ensure we did not increase the live node count with more than
// max_live_nodes_increase_per_iteration during the call to transform_old
// max_live_nodes_increase_per_iteration during the call to transform_old.
DEBUG_ONLY(int increase = live_nodes_after - live_nodes_before;)
assert(increase < max_live_nodes_increase_per_iteration,
"excessive live node increase in single iteration of IGVN: %d "
@ -1087,16 +1103,115 @@ void PhaseIterGVN::optimize() {
increase, max_live_nodes_increase_per_iteration);
NOT_PRODUCT(trace_PhaseIterGVN(n, nn, oldtype);)
} else if (!n->is_top()) {
remove_dead_node(n);
remove_dead_node(n, NodeOrigin::Graph);
}
loop_count++;
}
NOT_PRODUCT(verify_PhaseIterGVN();)
return false;
}
void PhaseIterGVN::push_deep_revisit_candidates() {
ResourceMark rm;
Unique_Node_List all_nodes;
all_nodes.push(C->root());
for (uint j = 0; j < all_nodes.size(); j++) {
Node* n = all_nodes.at(j);
if (needs_deep_revisit(n)) {
_worklist.push(n);
}
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
all_nodes.push(n->fast_out(i));
}
}
}
bool PhaseIterGVN::deep_revisit() {
// Re-process nodes that inspect the graph deeply. After the main worklist drains, walk
// the graph to find all live deep-inspection nodes and push them to the worklist
// for re-evaluation. If any produce changes, drain the worklist again.
// Repeat until stable. This mirrors PhaseCCP::analyze()'s revisit loop.
const uint max_deep_revisit_rounds = 10; // typically converges in <2 rounds
uint round = 0;
for (; round < max_deep_revisit_rounds; round++) {
push_deep_revisit_candidates();
if (_worklist.size() == 0) {
break; // No deep-inspection nodes to revisit, done.
}
#ifndef PRODUCT
uint candidates = _worklist.size();
uint n_if = 0; uint n_rc = 0; uint n_load = 0; uint n_cmpp = 0; uint n_cle = 0; uint n_lcle = 0;
if (TraceIterativeGVN) {
for (uint i = 0; i < _worklist.size(); i++) {
Node* n = _worklist.at(i);
switch (n->Opcode()) {
case Op_If: n_if++; break;
case Op_RangeCheck: n_rc++; break;
case Op_CountedLoopEnd: n_cle++; break;
case Op_LongCountedLoopEnd: n_lcle++; break;
case Op_CmpP: n_cmpp++; break;
default: if (n->is_Load()) n_load++; break;
}
}
}
#endif
// Convergence: if the drain does not make progress (no Ideal, Value, Identity or GVN changes),
// we are at a fixed point. We use made_progress() rather than live_nodes because live_nodes
// misses non-structural changes like a LoadNode dropping its control input.
uint progress_before = made_progress();
if (drain_worklist()) {
return false;
}
uint progress = made_progress() - progress_before;
#ifndef PRODUCT
if (TraceIterativeGVN) {
tty->print("deep_revisit round %u: %u candidates (If=%u RC=%u Load=%u CmpP=%u CLE=%u LCLE=%u), progress=%u (%s)",
round, candidates, n_if, n_rc, n_load, n_cmpp, n_cle, n_lcle, progress, progress != 0 ? "changed" : "converged");
if (C->method() != nullptr) {
tty->print(", ");
C->method()->print_short_name(tty);
}
tty->cr();
}
#endif
if (progress == 0) {
break;
}
}
return round < max_deep_revisit_rounds;
}
void PhaseIterGVN::optimize(bool deep) {
bool deep_revisit_converged = false;
DEBUG_ONLY(_num_processed = 0;)
NOT_PRODUCT(init_verifyPhaseIterGVN();)
NOT_PRODUCT(C->reset_igv_phase_iter(PHASE_AFTER_ITER_GVN_STEP);)
C->print_method(PHASE_BEFORE_ITER_GVN, 3);
if (StressIGVN) {
shuffle_worklist();
}
// Pull from worklist and transform the node.
if (drain_worklist()) {
return;
}
if (deep && UseDeepIGVNRevisit) {
deep_revisit_converged = deep_revisit();
if (C->failing()) {
return;
}
}
NOT_PRODUCT(verify_PhaseIterGVN(deep_revisit_converged);)
C->print_method(PHASE_AFTER_ITER_GVN, 3);
}
#ifdef ASSERT
void PhaseIterGVN::verify_optimize() {
void PhaseIterGVN::verify_optimize(bool deep_revisit_converged) {
assert(_worklist.size() == 0, "igvn worklist must be empty before verify");
if (is_verify_Value() ||
@ -1114,11 +1229,11 @@ void PhaseIterGVN::verify_optimize() {
// in PhaseIterGVN::add_users_to_worklist to update it again or add an exception
// in the verification methods below if that is not possible for some reason (like Load nodes).
if (is_verify_Value()) {
verify_Value_for(n);
verify_Value_for(n, deep_revisit_converged /* strict */);
}
if (is_verify_Ideal()) {
verify_Ideal_for(n, false);
verify_Ideal_for(n, true);
verify_Ideal_for(n, false /* can_reshape */, deep_revisit_converged);
verify_Ideal_for(n, true /* can_reshape */, deep_revisit_converged);
}
if (is_verify_Identity()) {
verify_Identity_for(n);
@ -1240,52 +1355,15 @@ void PhaseIterGVN::verify_Value_for(const Node* n, bool strict) {
// Check that all Ideal optimizations that could be done were done.
// Asserts if it found missed optimization opportunities or encountered unexpected changes, and
// returns normally otherwise (no missed optimization, or skipped verification).
void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape, bool deep_revisit_converged) {
if (!deep_revisit_converged && needs_deep_revisit(n)) {
return;
}
// First, we check a list of exceptions, where we skip verification,
// because there are known cases where Ideal can optimize after IGVN.
// Some may be expected and cannot be fixed, and others should be fixed.
switch (n->Opcode()) {
// RangeCheckNode::Ideal looks up the chain for about 999 nodes
// (see "Range-Check scan limit"). So, it is possible that something
// is optimized in that input subgraph, and the RangeCheck was not
// added to the worklist because it would be too expensive to walk
// down the graph for 1000 nodes and put all on the worklist.
//
// Found with:
// java -XX:VerifyIterativeGVN=0100 -Xbatch --version
case Op_RangeCheck:
return;
// IfNode::Ideal does:
// Node* prev_dom = search_identical(dist, igvn);
// which means we seach up the CFG, traversing at most up to a distance.
// If anything happens rather far away from the If, we may not put the If
// back on the worklist.
//
// Found with:
// java -XX:VerifyIterativeGVN=0100 -Xcomp --version
case Op_If:
return;
// IfNode::simple_subsuming
// Looks for dominating test that subsumes the current test.
// Notification could be difficult because of larger distance.
//
// Found with:
// runtime/exceptionMsgs/ArrayIndexOutOfBoundsException/ArrayIndexOutOfBoundsExceptionTest.java#id1
// -XX:VerifyIterativeGVN=1110
case Op_CountedLoopEnd:
return;
// LongCountedLoopEndNode::Ideal
// Probably same issue as above.
//
// Found with:
// compiler/predicates/assertion/TestAssertionPredicates.java#NoLoopPredicationXbatch
// -XX:StressLongCountedLoop=2000000 -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
case Op_LongCountedLoopEnd:
return;
// RegionNode::Ideal does "Skip around the useless IF diamond".
// 245 IfTrue === 244
// 258 If === 245 257
@ -1757,22 +1835,6 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
return;
}
if (n->is_Load()) {
// LoadNode::Ideal uses tries to find an earlier memory state, and
// checks can_see_stored_value for it.
//
// Investigate why this was not already done during IGVN.
// A similar issue happens with Identity.
//
// There seem to be other cases where loads go up some steps, like
// LoadNode::Ideal going up 10x steps to find dominating load.
//
// Found with:
// test/hotspot/jtreg/compiler/arraycopy/TestCloneAccess.java
// -XX:VerifyIterativeGVN=1110
return;
}
if (n->is_Store()) {
// StoreNode::Ideal can do this:
// // Capture an unaliased, unconditional, simple store into an initializer.
@ -1857,8 +1919,16 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
return;
}
// The number of nodes shoud not increase.
uint old_unique = C->unique();
// Ideal should not make progress if it returns nullptr.
// We use made_progress() rather than unique() or live_nodes() because some
// Ideal implementations speculatively create nodes and kill them before
// returning nullptr (e.g. split_if clones a Cmp to check is_canonical).
// unique() is a high-water mark that is not decremented by remove_dead_node,
// so it would cause false-positives. live_nodes() accounts for dead nodes but can
// decrease when Ideal removes existing nodes as side effects.
// made_progress() precisely tracks meaningful transforms, and speculative
// work killed via NodeOrigin::Speculative does not increment it.
uint old_progress = made_progress();
// The hash of a node should not change, this would indicate different inputs
uint old_hash = n->hash();
// Remove 'n' from hash table in case it gets modified. We want to avoid
@ -1870,14 +1940,15 @@ void PhaseIterGVN::verify_Ideal_for(Node* n, bool can_reshape) {
Node* i = n->Ideal(this, can_reshape);
// If there was no new Idealization, we are probably happy.
if (i == nullptr) {
if (old_unique < C->unique()) {
uint progress = made_progress() - old_progress;
if (progress != 0) {
stringStream ss; // Print as a block without tty lock.
ss.cr();
ss.print_cr("Ideal optimization did not make progress but created new unused nodes.");
ss.print_cr(" old_unique = %d, unique = %d", old_unique, C->unique());
ss.print_cr("Ideal optimization did not make progress but had side effects.");
ss.print_cr(" %u transforms made progress", progress);
n->dump_bfs(1, nullptr, "", &ss);
tty->print_cr("%s", ss.as_string());
assert(false, "Unexpected new unused nodes from applying Ideal optimization on %s", n->Name());
assert(false, "Unexpected side effects from applying Ideal optimization on %s", n->Name());
}
if (old_hash != n->hash()) {
@ -2152,6 +2223,9 @@ Node *PhaseIterGVN::transform_old(Node* n) {
#endif
DEBUG_ONLY(uint loop_count = 1;)
if (i != nullptr) {
set_progress();
}
while (i != nullptr) {
#ifdef ASSERT
if (loop_count >= K + C->live_nodes()) {
@ -2197,10 +2271,8 @@ Node *PhaseIterGVN::transform_old(Node* n) {
// cache Value. Later requests for the local phase->type of this Node can
// use the cached Value instead of suffering with 'bottom_type'.
if (type_or_null(k) != t) {
#ifndef PRODUCT
inc_new_values();
NOT_PRODUCT(inc_new_values();)
set_progress();
#endif
set_type(k, t);
// If k is a TypeNode, capture any more-precise type permanently into Node
k->raise_bottom_type(t);
@ -2209,7 +2281,7 @@ Node *PhaseIterGVN::transform_old(Node* n) {
}
// If 'k' computes a constant, replace it with a constant
if (t->singleton() && !k->is_Con()) {
NOT_PRODUCT(set_progress();)
set_progress();
Node* con = makecon(t); // Make a constant
add_users_to_worklist(k);
subsume_node(k, con); // Everybody using k now uses con
@ -2219,7 +2291,7 @@ Node *PhaseIterGVN::transform_old(Node* n) {
// Now check for Identities
i = k->Identity(this); // Look for a nearby replacement
if (i != k) { // Found? Return replacement!
NOT_PRODUCT(set_progress();)
set_progress();
add_users_to_worklist(k);
subsume_node(k, i); // Everybody using k now uses i
return i;
@ -2229,7 +2301,7 @@ Node *PhaseIterGVN::transform_old(Node* n) {
i = hash_find_insert(k); // Check for pre-existing node
if (i && (i != k)) {
// Return the pre-existing node if it isn't dead
NOT_PRODUCT(set_progress();)
set_progress();
add_users_to_worklist(k);
subsume_node(k, i); // Everybody using k now uses i
return i;
@ -2248,7 +2320,7 @@ const Type* PhaseIterGVN::saturate(const Type* new_type, const Type* old_type,
//------------------------------remove_globally_dead_node----------------------
// Kill a globally dead Node. All uses are also globally dead and are
// aggressively trimmed.
void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
void PhaseIterGVN::remove_globally_dead_node(Node* dead, NodeOrigin origin) {
enum DeleteProgress {
PROCESS_INPUTS,
PROCESS_OUTPUTS
@ -2265,11 +2337,13 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
uint progress_state = stack.index();
assert(dead != C->root(), "killing root, eh?");
assert(!dead->is_top(), "add check for top when pushing");
NOT_PRODUCT( set_progress(); )
if (progress_state == PROCESS_INPUTS) {
// After following inputs, continue to outputs
stack.set_index(PROCESS_OUTPUTS);
if (!dead->is_Con()) { // Don't kill cons but uses
if (origin != NodeOrigin::Speculative) {
set_progress();
}
bool recurse = false;
// Remove from hash table
_table.hash_delete( dead );
@ -2379,7 +2453,7 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
// Smash all inputs to 'old', isolating him completely
Node *temp = new Node(1);
temp->init_req(0,nn); // Add a use to nn to prevent him from dying
remove_dead_node( old );
remove_dead_node(old, NodeOrigin::Graph);
temp->del_req(0); // Yank bogus edge
if (nn != nullptr && nn->outcnt() == 0) {
_worklist.push(nn);

View File

@ -187,8 +187,8 @@ public:
class PhaseTransform : public Phase {
public:
PhaseTransform(PhaseNumber pnum) : Phase(pnum) {
#ifndef PRODUCT
clear_progress();
#ifndef PRODUCT
clear_transforms();
set_allow_progress(true);
#endif
@ -201,12 +201,31 @@ public:
// true if CFG node d dominates CFG node n
virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; };
#ifndef PRODUCT
uint _count_progress; // For profiling, count transforms that make progress
void set_progress() { ++_count_progress; assert( allow_progress(),"No progress allowed during verification"); }
void clear_progress() { _count_progress = 0; }
uint made_progress() const { return _count_progress; }
uint64_t _count_progress; // Count transforms that make progress
void set_progress() { ++_count_progress; assert(allow_progress(), "No progress allowed during verification"); }
void clear_progress() { _count_progress = 0; }
uint64_t made_progress() const { return _count_progress; }
// RAII guard for speculative transforms. Restores _count_progress in the destructor
// unless commit() is called, so that abandoned speculative work does not count as progress.
// In case multiple nodes are created and only some are speculative, commit() should still be called.
class SpeculativeProgressGuard {
PhaseTransform* _phase;
uint64_t _saved_progress;
bool _committed;
public:
SpeculativeProgressGuard(PhaseTransform* phase) :
_phase(phase), _saved_progress(phase->made_progress()), _committed(false) {}
~SpeculativeProgressGuard() {
if (!_committed) {
_phase->_count_progress = _saved_progress;
}
}
void commit() { _committed = true; }
};
#ifndef PRODUCT
uint _count_transforms; // For profiling, count transforms performed
void set_transforms() { ++_count_transforms; }
void clear_transforms() { _count_transforms = 0; }
@ -446,10 +465,30 @@ class PhaseIterGVN : public PhaseGVN {
private:
bool _delay_transform; // When true simply register the node when calling transform
// instead of actually optimizing it
DEBUG_ONLY(uint _num_processed;) // Running count for trace_PhaseIterGVN_verbose
// Idealize old Node 'n' with respect to its inputs and its value
virtual Node *transform_old( Node *a_node );
// Drain the IGVN worklist: process nodes until the worklist is empty.
// Returns true if compilation was aborted (node limit or infinite loop),
// false on normal completion.
bool drain_worklist();
// Walk all live nodes and push deep-inspection candidates to _worklist.
void push_deep_revisit_candidates();
// After the main worklist drains, re-process deep-inspection nodes to
// catch optimization opportunities from far-away changes. Repeats until
// convergence (no progress made) or max rounds reached.
// Returns true if converged.
bool deep_revisit();
// Returns true for nodes that inspect the graph beyond their direct
// inputs, and therefore may miss optimization opportunities when
// changes happen far away.
bool needs_deep_revisit(const Node* n) const;
// Subsume users of node 'old' into node 'nn'
void subsume_node( Node *old, Node *nn );
@ -493,11 +532,16 @@ public:
// Given def-use info and an initial worklist, apply Node::Ideal,
// Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU
// and dominator info to a fixed point.
void optimize();
// When deep is true, after the main worklist drains, re-process
// nodes that inspect the graph deeply (Load, CmpP, If, RangeCheck,
// CountedLoopEnd, LongCountedLoopEnd) to catch optimization opportunities
// from changes far away that the normal notification mechanism misses.
void optimize(bool deep = false);
#ifdef ASSERT
void verify_optimize();
void verify_optimize(bool deep_revisit_converged);
void verify_Value_for(const Node* n, bool strict = false);
void verify_Ideal_for(Node* n, bool can_reshape);
void verify_Ideal_for(Node* n, bool can_reshape, bool deep_revisit_converged);
void verify_Identity_for(Node* n);
void verify_node_invariants_for(const Node* n);
void verify_empty_worklist(Node* n);
@ -506,7 +550,7 @@ public:
#ifndef PRODUCT
void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
void init_verifyPhaseIterGVN();
void verify_PhaseIterGVN();
void verify_PhaseIterGVN(bool deep_revisit_converged);
#endif
#ifdef ASSERT
@ -522,15 +566,21 @@ public:
// It is significant only for debugging and profiling.
Node* register_new_node_with_optimizer(Node* n, Node* orig = nullptr);
// Kill a globally dead Node. All uses are also globally dead and are
// Origin of a dead node, describing why it is dying.
// Speculative: a temporarily created node that was never part of the graph
// (e.g., a speculative clone in split_if to test constant foldability).
// Its death does not count as progress for convergence tracking.
enum class NodeOrigin { Graph, Speculative };
// Kill a globally dead Node. All uses are also globally dead and are
// aggressively trimmed.
void remove_globally_dead_node( Node *dead );
void remove_globally_dead_node(Node* dead, NodeOrigin origin);
// Kill all inputs to a dead node, recursively making more dead nodes.
// The Node must be dead locally, i.e., have no uses.
void remove_dead_node( Node *dead ) {
void remove_dead_node(Node* dead, NodeOrigin origin) {
assert(dead->outcnt() == 0 && !dead->is_top(), "node must be dead");
remove_globally_dead_node(dead);
remove_globally_dead_node(dead, origin);
}
// Add users of 'n' to worklist

View File

@ -85,7 +85,7 @@ bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
if( split_up( n->in(i), blk1, blk2 ) ) {
// Got split recursively and self went dead?
if (n->outcnt() == 0)
_igvn.remove_dead_node(n);
_igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
return true;
}
}
@ -273,7 +273,7 @@ void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp
_igvn.replace_input_of(decode_clone, 1, loadklass_clone);
_igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
if (decode->outcnt() == 0) {
_igvn.remove_dead_node(decode);
_igvn.remove_dead_node(decode, PhaseIterGVN::NodeOrigin::Graph);
}
}
}
@ -290,7 +290,7 @@ void PhaseIdealLoop::clone_loadklass_nodes_at_cmp_index(const Node* n, Node* cmp
_igvn.replace_input_of(cmp, i, loadklass_clone);
_igvn.replace_input_of(loadklass_clone, MemNode::Address, addp_clone);
if (loadklass->outcnt() == 0) {
_igvn.remove_dead_node(loadklass);
_igvn.remove_dead_node(loadklass, PhaseIterGVN::NodeOrigin::Graph);
}
}
}
@ -369,7 +369,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2)
_igvn.replace_input_of(x2, 1, x1);
_igvn.replace_input_of(iff, 1, x2);
}
_igvn.remove_dead_node(u);
_igvn.remove_dead_node(u, PhaseIterGVN::NodeOrigin::Graph);
--j;
} else {
// We might see an Opaque1 from a loop limit check here
@ -385,7 +385,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2)
--j;
}
}
_igvn.remove_dead_node(bol);
_igvn.remove_dead_node(bol, PhaseIterGVN::NodeOrigin::Graph);
--i;
}
}
@ -403,7 +403,7 @@ bool PhaseIdealLoop::clone_cmp_down(Node* n, const Node* blk1, const Node* blk2)
register_new_node(x, ctrl_or_self(use));
_igvn.replace_input_of(use, pos, x);
}
_igvn.remove_dead_node(n);
_igvn.remove_dead_node(n, PhaseIterGVN::NodeOrigin::Graph);
return true;
}
@ -517,7 +517,7 @@ Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, No
Node *t = _igvn.hash_find_insert(phi_post);
if( t ) { // See if we already have this one
// phi_post will not be used, so kill it
_igvn.remove_dead_node(phi_post);
_igvn.remove_dead_node(phi_post, PhaseIterGVN::NodeOrigin::Speculative);
phi_post->destruct(&_igvn);
phi_post = t;
} else {
@ -647,7 +647,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
Node* m = n->out(j);
// If m is dead, throw it away, and declare progress
if (_loop_or_ctrl[m->_idx] == nullptr) {
_igvn.remove_dead_node(m);
_igvn.remove_dead_node(m, PhaseIterGVN::NodeOrigin::Graph);
// fall through
}
else if (m != iff && split_up(m, region, iff)) {
@ -704,7 +704,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
new_true = ifpx;
}
}
_igvn.remove_dead_node(new_iff);
_igvn.remove_dead_node(new_iff, PhaseIterGVN::NodeOrigin::Speculative);
// Lazy replace IDOM info with the region's dominator
replace_node_and_forward_ctrl(iff, region_dom);
// Break the self-cycle. Required for forward_ctrl to work on region.
@ -720,7 +720,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
for (DUIterator k = region->outs(); region->has_out(k); k++) {
Node* phi = region->out(k);
if (!phi->in(0)) { // Dead phi? Remove it
_igvn.remove_dead_node(phi);
_igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph);
} else if (phi == region) { // Found the self-reference
continue; // No roll-back of DUIterator
} else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
@ -739,7 +739,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
} // End of while phi has uses
// Remove the dead Phi
_igvn.remove_dead_node( phi );
_igvn.remove_dead_node(phi, PhaseIterGVN::NodeOrigin::Graph);
} else {
assert(phi->in(0) == region, "Inconsistent graph");
// Random memory op guarded by Region. Compute new DEF for USE.
@ -752,7 +752,7 @@ void PhaseIdealLoop::do_split_if(Node* iff, RegionNode** new_false_region, Regio
--k;
} // End of while merge point has phis
_igvn.remove_dead_node(region);
_igvn.remove_dead_node(region, PhaseIterGVN::NodeOrigin::Graph);
// Control is updated here to a region, which is not a test, so any node that
// depends_only_on_test must be pinned

View File

@ -105,7 +105,6 @@ Mutex* G1MarkStackFreeList_lock = nullptr;
Monitor* G1OldGCCount_lock = nullptr;
Mutex* G1OldSets_lock = nullptr;
Mutex* G1ReviseYoungLength_lock = nullptr;
Monitor* G1RootRegionScan_lock = nullptr;
Mutex* G1RareEvent_lock = nullptr;
Mutex* G1Uncommit_lock = nullptr;
#endif
@ -216,7 +215,6 @@ void mutex_init() {
MUTEX_DEFN(G1MarkStackChunkList_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(G1MarkStackFreeList_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(G1OldSets_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(G1RootRegionScan_lock , PaddedMonitor, nosafepoint-1);
MUTEX_DEFN(G1Uncommit_lock , PaddedMutex , service-2);
}
#endif

View File

@ -101,7 +101,6 @@ extern Monitor* G1OldGCCount_lock; // in support of "concurrent" f
extern Mutex* G1OldSets_lock; // protects the G1 old region sets
extern Mutex* G1RareEvent_lock; // Synchronizes (rare) parallel GC operations.
extern Mutex* G1ReviseYoungLength_lock; // Protects access to young gen length revising operations.
extern Monitor* G1RootRegionScan_lock; // used to notify that the G1 CM threads have finished scanning the root regions
extern Mutex* G1Uncommit_lock; // protects the G1 uncommit list when not at safepoints
#endif

View File

@ -155,7 +155,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000;
//
// Succession is provided for by a policy of competitive handoff.
// The exiting thread does _not_ grant or pass ownership to the
// successor thread. (This is also referred to as "handoff succession").
// successor thread. (This is also referred to as "handoff succession").
// Instead the exiting thread releases ownership and possibly wakes
// a successor, so the successor can (re)compete for ownership of the lock.
//
@ -189,7 +189,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000;
//
// Once we have formed a doubly linked list it's easy to find the
// successor (A), wake it up, have it remove itself, and update the
// tail pointer, as seen in and 3) below.
// tail pointer, see 3) below.
//
// 3) entry_list ->F<=>E<=>D<=>C<=>B->null
// entry_list_tail ------------------^
@ -223,7 +223,7 @@ static const jlong MAX_RECHECK_INTERVAL = 1000;
// remove itself) or update the tail.
//
// * The monitor entry list operations avoid locks, but strictly speaking
// they're not lock-free. Enter is lock-free, exit is not.
// they're not lock-free. Enter is lock-free, exit is not.
// For a description of 'Methods and apparatus providing non-blocking access
// to a resource,' see U.S. Pat. No. 7844973.
//
@ -387,7 +387,7 @@ bool ObjectMonitor::try_lock_with_contention_mark(JavaThread* locking_thread, Ob
prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
if (prev_owner == DEFLATER_MARKER) {
// We successfully cancelled the in-progress async deflation by
// changing owner from DEFLATER_MARKER to current. We now extend
// changing owner from DEFLATER_MARKER to current. We now extend
// the lifetime of the contention_mark (e.g. contentions++) here
// to prevent the deflater thread from winning the last part of
// the 2-part async deflation protocol after the regular
@ -633,14 +633,14 @@ void ObjectMonitor::enter_with_contention_mark(JavaThread* current, ObjectMonito
// The thread -- now the owner -- is back in vm mode.
// Report the glorious news via TI,DTrace and jvmstat.
// The probe effect is non-trivial. All the reportage occurs
// The probe effect is non-trivial. All the reportage occurs
// while we hold the monitor, increasing the length of the critical
// section. Amdahl's parallel speedup law comes vividly into play.
// section. Amdahl's parallel speedup law comes vividly into play.
//
// Another option might be to aggregate the events (thread local or
// per-monitor aggregation) and defer reporting until a more opportune
// time -- such as next time some thread encounters contention but has
// yet to acquire the lock. While spinning that thread could
// yet to acquire the lock. While spinning that thread could
// spinning we could increment JVMStat counters, etc.
DTRACE_MONITOR_PROBE(contended__entered, this, object(), current);
@ -739,11 +739,11 @@ bool ObjectMonitor::try_lock_or_add_to_entry_list(JavaThread* current, ObjectWai
return false;
}
// Interference - the CAS failed because _entry_list changed. Before
// Interference - the CAS failed because _entry_list changed. Before
// retrying the CAS retry taking the lock as it may now be free.
if (try_lock(current) == TryLockResult::Success) {
assert(!has_successor(current), "invariant");
assert(has_owner(current), "invariant");
assert(!has_successor(current), "invariant");
node->TState = ObjectWaiter::TS_RUN;
return true;
}
@ -953,8 +953,8 @@ bool ObjectMonitor::try_enter_fast(JavaThread* current, ObjectWaiter* current_no
// Try the lock - TATAS
if (try_lock(current) == TryLockResult::Success) {
assert(!has_successor(current), "invariant");
assert(has_owner(current), "invariant");
assert(!has_successor(current), "invariant");
return true;
}
@ -964,7 +964,7 @@ bool ObjectMonitor::try_enter_fast(JavaThread* current, ObjectWaiter* current_no
//
// If the _owner is ready but OFFPROC we could use a YieldTo()
// operation to donate the remainder of this thread's quantum
// to the owner. This has subtle but beneficial affinity
// to the owner. This has subtle but beneficial affinity
// effects.
if (try_spin(current)) {
@ -974,15 +974,15 @@ bool ObjectMonitor::try_enter_fast(JavaThread* current, ObjectWaiter* current_no
}
// The Spin failed -- Enqueue and park the thread ...
assert(!has_successor(current), "invariant");
assert(!has_owner(current), "invariant");
assert(!has_successor(current), "invariant");
// Enqueue "current" on ObjectMonitor's _entry_list.
//
// current_node acts as a proxy for current.
// As an aside, if were to ever rewrite the synchronization code mostly
// in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
// Java objects. This would avoid awkward lifecycle and liveness issues,
// Java objects. This would avoid awkward lifecycle and liveness issues,
// as well as eliminate a subset of ABA issues.
// TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
@ -995,7 +995,7 @@ bool ObjectMonitor::try_enter_fast(JavaThread* current, ObjectWaiter* current_no
// This thread is now added to the _entry_list.
// The lock might have been released while this thread was occupied queueing
// itself onto _entry_list. To close the race and avoid "stranding" and
// itself onto _entry_list. To close the race and avoid "stranding" and
// progress-liveness failure the caller must resample-retry _owner before parking.
// Note the Dekker/Lamport duality: ST _entry_list; MEMBAR; LD Owner.
// In this case the ST-MEMBAR is accomplished with CAS() in try_lock_or_add_to_entry_list.
@ -1051,13 +1051,13 @@ void ObjectMonitor::enter_internal(JavaThread* current, ObjectWaiter* current_no
}
// Try again, but just so we distinguish between futile wakeups and
// successful wakeups. The following test isn't algorithmically
// successful wakeups. The following test isn't algorithmically
// necessary, but it helps us maintain sensible statistics.
if (try_lock(current) == TryLockResult::Success) {
break;
}
// The lock is still contested.
// The lock is still contended.
if (!reenter_path) {
// Assuming this is not a spurious wakeup we'll normally find _succ == current.
@ -1070,9 +1070,9 @@ void ObjectMonitor::enter_internal(JavaThread* current, ObjectWaiter* current_no
}
// We can find that we were unpark()ed and redesignated _succ while
// we were spinning. That's harmless. If we iterate and call park(),
// we were spinning. That's harmless. If we iterate and call park(),
// park() will consume the event and return immediately and we'll
// just spin again. This pattern can repeat, leaving _succ to simply
// just spin again. This pattern can repeat, leaving _succ to simply
// spin on a CPU.
if (has_successor(current)) {
@ -1092,9 +1092,9 @@ void ObjectMonitor::enter_internal(JavaThread* current, ObjectWaiter* current_no
// Current has acquired the lock -- Unlink current from the _entry_list.
unlink_after_acquire(current, current_node);
if (has_successor(current)) {
clear_successor();
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
clear_successor();
}
// We've acquired ownership with CAS().
@ -1146,7 +1146,9 @@ bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* wai
if (try_lock_or_add_to_entry_list(current, node)) {
// We got the lock.
if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
if (waiter == nullptr) {
delete node; // for Object.wait() don't delete yet
}
dec_unmounted_vthreads();
return true;
}
@ -1157,8 +1159,14 @@ bool ObjectMonitor::vthread_monitor_enter(JavaThread* current, ObjectWaiter* wai
if (try_lock(current) == TryLockResult::Success) {
assert(has_owner(current), "invariant");
unlink_after_acquire(current, node);
if (has_successor(current)) clear_successor();
if (waiter == nullptr) delete node; // for Object.wait() don't delete yet
if (has_successor(current)) {
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
clear_successor();
}
if (waiter == nullptr) {
delete node; // for Object.wait() don't delete yet
}
dec_unmounted_vthreads();
return true;
}
@ -1182,7 +1190,9 @@ bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, Co
if (node->is_wait() && !node->at_reenter()) {
bool acquired_monitor = vthread_wait_reenter(current, node, cont);
if (acquired_monitor) return true;
if (acquired_monitor) {
return true;
}
}
// Retry acquiring monitor...
@ -1196,7 +1206,9 @@ bool ObjectMonitor::resume_operation(JavaThread* current, ObjectWaiter* node, Co
}
oop vthread = current->vthread();
if (has_successor(current)) clear_successor();
if (has_successor(current)) {
clear_successor();
}
// Invariant: after clearing _succ a thread *must* retry acquiring the monitor.
OrderAccess::fence();
@ -1217,7 +1229,11 @@ void ObjectMonitor::vthread_epilog(JavaThread* current, ObjectWaiter* node) {
add_to_contentions(-1);
dec_unmounted_vthreads();
if (has_successor(current)) clear_successor();
if (has_successor(current)) {
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
clear_successor();
}
guarantee(_recursions == 0, "invariant");
@ -1419,7 +1435,7 @@ void ObjectMonitor::unlink_after_acquire(JavaThread* current, ObjectWaiter* curr
// inopportune) reclamation of "this".
//
// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
// There's one exception to the claim above, however. enter_internal() can call
// There's one exception to the claim above, however. enter_internal() can call
// exit() to drop a lock if the acquirer has been externally suspended.
// In that case exit() is called with _thread_state == _thread_blocked,
// but the monitor's _contentions field is > 0, which inhibits reclamation.
@ -1507,12 +1523,12 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
w = entry_list_tail(current);
// I'd like to write: guarantee (w->_thread != current).
// But in practice an exiting thread may find itself on the entry_list.
// Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
// then calls exit(). Exit release the lock by setting O._owner to null.
// Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
// Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
// then calls exit(). Exit releases the lock by setting O._owner to null.
// Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
// notify() operation moves T1 from O's waitset to O's entry_list. T2 then
// release the lock "O". T1 resumes immediately after the ST of null into
// _owner, above. T1 notices that the entry_list is populated, so it
// releases the lock "O". T1 resumes immediately after the ST of null into
// _owner, above. T1 notices that the entry_list is populated, so it
// reacquires the lock and then finds itself on the entry_list.
// Given all that, we have to tolerate the circumstance where "w" is
// associated with current.
@ -1534,26 +1550,26 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) {
// Normally the exiting thread is responsible for ensuring succession,
// but if this thread observes other successors are ready or other
// entering threads are spinning after it has stored null into _owner
// then it can exit without waking a successor. The existence of
// then it can exit without waking a successor. The existence of
// spinners or ready successors guarantees proper succession (liveness).
// Responsibility passes to the ready or running successors. The exiting
// thread delegates the duty. More precisely, if a successor already
// Responsibility passes to the ready or running successors. The exiting
// thread delegates the duty. More precisely, if a successor already
// exists this thread is absolved of the responsibility of waking
// (unparking) one.
// The _succ variable is critical to reducing futile wakeup frequency.
// _succ identifies the "heir presumptive" thread that has been made
// ready (unparked) but that has not yet run. We need only one such
// ready (unparked) but that has not yet run. We need only one such
// successor thread to guarantee progress.
// See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
// section 3.3 "Futile Wakeup Throttling" for details.
//
// Note that spinners in Enter() also set _succ non-null.
// In the current implementation spinners opportunistically set
// Note that spinners in enter(), try_enter_fast() and enter_internal() also
// set _succ non-null. In the current implementation spinners opportunistically set
// _succ so that exiting threads might avoid waking a successor.
// Which means that the exiting thread could exit immediately without
// waking a successor, if it observes a successor after it has dropped
// the lock. Note that the dropped lock needs to become visible to the
// the lock. Note that the dropped lock needs to become visible to the
// spinner.
if (_entry_list == nullptr || has_successor()) {
@ -1730,7 +1746,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
if (JvmtiExport::should_post_monitor_wait()) {
JvmtiExport::post_monitor_wait(current, object(), millis);
}
// post monitor waited event. Note that this is past-tense, we are done waiting.
// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
// Note: 'false' parameter is passed here because the
// wait was not timed out due to thread interrupt.
@ -1791,9 +1807,9 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// Enter the waiting queue, which is a circular doubly linked list in this case
// but it could be a priority queue or any data structure.
// _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
// _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
// by the owner of the monitor *except* in the case where park()
// returns because of a timeout of interrupt. Contention is exceptionally rare
// returns because of a timeout of interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
{
@ -1850,7 +1866,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// written by the is thread. (perhaps the fetch might even be satisfied
// by a look-aside into the processor's own store buffer, although given
// the length of the code path between the prior ST and this load that's
// highly unlikely). If the following LD fetches a stale TS_WAIT value
// highly unlikely). If the following LD fetches a stale TS_WAIT value
// then we'll acquire the lock and then re-fetch a fresh TState value.
// That is, we fail toward safety.
@ -1868,7 +1884,12 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// No other threads will asynchronously modify TState.
guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
OrderAccess::loadload();
if (has_successor(current)) clear_successor();
if (has_successor(current)) {
clear_successor();
// Note that we do not need a fence here, as, regardless of the path taken,
// there is a fence either in ThreadBlockInVM's destructor or
// right after a call to post_monitor_wait_event().
}
// Reentry phase -- reacquire the monitor.
// re-enter contended monitor after object.wait().
@ -2046,11 +2067,11 @@ bool ObjectMonitor::notify_internal(JavaThread* current) {
}
}
// _wait_set_lock protects the wait queue, not the entry_list. We could
// _wait_set_lock protects the wait queue, not the entry_list. We could
// move the add-to-entry_list operation, above, outside the critical section
// protected by _wait_set_lock. In practice that's not useful. With the
// protected by _wait_set_lock. In practice that's not useful. With the
// exception of wait() timeouts and interrupts the monitor owner
// is the only thread that grabs _wait_set_lock. There's almost no contention
// is the only thread that grabs _wait_set_lock. There's almost no contention
// on _wait_set_lock so it's not profitable to reduce the length of the
// critical section.
}
@ -2151,9 +2172,9 @@ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interru
// Enter the waiting queue, which is a circular doubly linked list in this case
// but it could be a priority queue or any data structure.
// _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
// _wait_set_lock protects the wait queue. Normally the wait queue is accessed only
// by the owner of the monitor *except* in the case where park()
// returns because of a timeout or interrupt. Contention is exceptionally rare
// returns because of a timeout or interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
{
@ -2243,25 +2264,25 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node
// algorithm.
//
// Broadly, we can fix the spin frequency -- that is, the % of contended lock
// acquisition attempts where we opt to spin -- at 100% and vary the spin count
// acquisition attempts where we opt to spin -- at 100% and vary the spin count
// (duration) or we can fix the count at approximately the duration of
// a context switch and vary the frequency. Of course we could also
// a context switch and vary the frequency. Of course we could also
// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
// For a description of 'Adaptive spin-then-block mutual exclusion in
// multi-threaded processing,' see U.S. Pat. No. 8046758.
//
// This implementation varies the duration "D", where D varies with
// the success rate of recent spin attempts. (D is capped at approximately
// length of a round-trip context switch). The success rate for recent
// length of a round-trip context switch). The success rate for recent
// spin attempts is a good predictor of the success rate of future spin
// attempts. The mechanism adapts automatically to varying critical
// attempts. The mechanism adapts automatically to varying critical
// section length (lock modality), system load and degree of parallelism.
// D is maintained per-monitor in _SpinDuration and is initialized
// optimistically. Spin frequency is fixed at 100%.
// optimistically. Spin frequency is fixed at 100%.
//
// Note that _SpinDuration is volatile, but we update it without locks
// or atomics. The code is designed so that _SpinDuration stays within
// a reasonable range even in the presence of races. The arithmetic
// or atomics. The code is designed so that _SpinDuration stays within
// a reasonable range even in the presence of races. The arithmetic
// operations on _SpinDuration are closed over the domain of legal values,
// so at worst a race will install and older but still legal value.
// At the very worst this introduces some apparent non-determinism.
@ -2269,28 +2290,28 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node
// count are relatively short, even in the worst case, the effect is harmless.
//
// Care must be taken that a low "D" value does not become an
// an absorbing state. Transient spinning failures -- when spinning
// an absorbing state. Transient spinning failures -- when spinning
// is overall profitable -- should not cause the system to converge
// on low "D" values. We want spinning to be stable and predictable
// on low "D" values. We want spinning to be stable and predictable
// and fairly responsive to change and at the same time we don't want
// it to oscillate, become metastable, be "too" non-deterministic,
// or converge on or enter undesirable stable absorbing states.
//
// We implement a feedback-based control system -- using past behavior
// to predict future behavior. We face two issues: (a) if the
// to predict future behavior. We face two issues: (a) if the
// input signal is random then the spin predictor won't provide optimal
// results, and (b) if the signal frequency is too high then the control
// system, which has some natural response lag, will "chase" the signal.
// (b) can arise from multimodal lock hold times. Transient preemption
// (b) can arise from multimodal lock hold times. Transient preemption
// can also result in apparent bimodal lock hold times.
// Although sub-optimal, neither condition is particularly harmful, as
// in the worst-case we'll spin when we shouldn't or vice-versa.
// The maximum spin duration is rather short so the failure modes aren't bad.
// To be conservative, I've tuned the gain in system to bias toward
// _not spinning. Relatedly, the system can sometimes enter a mode where it
// "rings" or oscillates between spinning and not spinning. This happens
// _not spinning. Relatedly, the system can sometimes enter a mode where it
// "rings" or oscillates between spinning and not spinning. This happens
// when spinning is just on the cusp of profitability, however, so the
// situation is not dire. The state is benign -- there's no need to add
// situation is not dire. The state is benign -- there's no need to add
// hysteresis control to damp the transition rate between spinning and
// not spinning.
@ -2322,7 +2343,9 @@ inline static int adjust_down(int spin_duration) {
// Consider an AIMD scheme like: x -= (x >> 3) + 100
// This is globally sample and tends to damp the response.
x -= Knob_Penalty;
if (x < 0) { x = 0; }
if (x < 0) {
x = 0;
}
return x;
} else {
return spin_duration;
@ -2348,7 +2371,7 @@ bool ObjectMonitor::short_fixed_spin(JavaThread* current, int spin_count, bool a
// Spinning: Fixed frequency (100%), vary duration
bool ObjectMonitor::try_spin(JavaThread* current) {
// Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
// Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
int knob_fixed_spin = Knob_FixedSpin; // 0 (don't spin: default), 2000 good test
if (knob_fixed_spin > 0) {
return short_fixed_spin(current, knob_fixed_spin, false);
@ -2357,7 +2380,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) {
// Admission control - verify preconditions for spinning
//
// We always spin a little bit, just to prevent _SpinDuration == 0 from
// becoming an absorbing state. Put another way, we spin briefly to
// becoming an absorbing state. Put another way, we spin briefly to
// sample, just in case the system load, parallelism, contention, or lock
// modality changed.
@ -2369,7 +2392,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) {
//
// Consider the following alternative:
// Periodically set _SpinDuration = _SpinLimit and try a long/full
// spin attempt. "Periodically" might mean after a tally of
// spin attempt. "Periodically" might mean after a tally of
// the # of failed spin attempts (or iterations) reaches some threshold.
// This takes us into the realm of 1-out-of-N spinning, where we
// hold the duration constant but vary the frequency.
@ -2416,9 +2439,9 @@ bool ObjectMonitor::try_spin(JavaThread* current) {
// If this thread observes the monitor transition or flicker
// from locked to unlocked to locked, then the odds that this
// thread will acquire the lock in this spin attempt go down
// considerably. The same argument applies if the CAS fails
// considerably. The same argument applies if the CAS fails
// or if we observe _owner change from one non-null value to
// another non-null value. In such cases we might abort
// another non-null value. In such cases we might abort
// the spin without prejudice or apply a "penalty" to the
// spin count-down variable "ctr", reducing it by 100, say.
@ -2429,6 +2452,8 @@ bool ObjectMonitor::try_spin(JavaThread* current) {
// The CAS succeeded -- this thread acquired ownership
// Take care of some bookkeeping to exit spin state.
if (has_successor(current)) {
// Note that we don't need to do OrderAccess::fence() after clearing
// _succ here, since we own the lock.
clear_successor();
}
@ -2470,7 +2495,7 @@ bool ObjectMonitor::try_spin(JavaThread* current) {
if (has_successor(current)) {
clear_successor();
// Invariant: after setting succ=null a contending thread
// must recheck-retry _owner before parking. This usually happens
// must recheck-retry _owner before parking. This usually happens
// in the normal usage of try_spin(), but it's safest
// to make try_spin() as foolproof as possible.
OrderAccess::fence();

View File

@ -833,7 +833,7 @@ public sealed interface Linker permits AbstractLinker {
* <p>
* Captured state can be stored in, or retrieved from the capture state segment by
* constructing var handles from the {@linkplain #captureStateLayout capture state layout}.
* Some functions require this state the be initialized to a particular value before
* Some functions require this state to be initialized to a particular value before
* invoking the downcall.
* <p>
* The following example demonstrates the use of this linker option:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -257,6 +257,11 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
default:
throw new ZipException("unsupported compression method");
}
// Verify that entry name and comment can be encoded
byte[] nameBytes = checkEncodable(e.name, "unmappable character in ZIP entry name");
if (e.comment != null) {
checkEncodable(e.comment, "unmappable character in ZIP entry comment");
}
if (! names.add(e.name)) {
throw new ZipException("duplicate entry: " + e.name);
}
@ -270,7 +275,16 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
}
current = new XEntry(e, written);
xentries.add(current);
writeLOC(current);
writeLOC(current, nameBytes);
}
// Throws ZipException if the given string cannot be encoded
private byte[] checkEncodable(String str, String msg) throws ZipException {
try {
return zc.getBytes(str);
} catch (IllegalArgumentException ex) {
throw (ZipException) new ZipException(msg).initCause(ex);
}
}
/**
@ -424,7 +438,7 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
/*
* Writes local file (LOC) header for specified entry.
*/
private void writeLOC(XEntry xentry) throws IOException {
private void writeLOC(XEntry xentry, byte[] nameBytes) throws IOException {
ZipEntry e = xentry.entry;
int flag = e.flag;
boolean hasZip64 = false;
@ -461,7 +475,6 @@ public class ZipOutputStream extends DeflaterOutputStream implements ZipConstant
writeInt(e.size); // uncompressed size
}
}
byte[] nameBytes = zc.getBytes(e.name);
writeShort(nameBytes.length);
int elenEXTT = 0; // info-zip extended timestamp

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -185,5 +185,12 @@ class UTF_32Coder {
doneBOM = !doBOM;
}
public boolean canEncode(char c) {
return !Character.isSurrogate(c);
}
public boolean canEncode(CharSequence cs) {
return Unicode.isValidUnicode(cs);
}
}
}

View File

@ -424,6 +424,10 @@ public final class UTF_8 extends Unicode {
return !Character.isSurrogate(c);
}
public boolean canEncode(CharSequence cs) {
return Unicode.isValidUnicode(cs);
}
public boolean isLegalReplacement(byte[] repl) {
return ((repl.length == 1 && repl[0] >= 0) ||
super.isLegalReplacement(repl));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,4 +95,23 @@ abstract class Unicode extends Charset
|| (cs.name().equals("x-Johab"))
|| (cs.name().equals("Shift_JIS")));
}
static boolean isValidUnicode(CharSequence cs) {
int length = cs.length();
for (int i = 0; i < length;) {
char c = cs.charAt(i++);
if (Character.isHighSurrogate(c)) {
if (i == length) {
return false;
}
char low = cs.charAt(i++);
if (!Character.isLowSurrogate(low)) {
return false;
}
} else if (Character.isLowSurrogate(c)) {
return false;
}
}
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -108,4 +108,8 @@ public abstract class UnicodeEncoder extends CharsetEncoder {
public boolean canEncode(char c) {
return ! Character.isSurrogate(c);
}
public boolean canEncode(CharSequence cs) {
return Unicode.isValidUnicode(cs);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1995, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,6 @@ import javax.accessibility.AccessibleRole;
import javax.accessibility.AccessibleState;
import javax.accessibility.AccessibleStateSet;
import sun.awt.AppContext;
import sun.awt.SunToolkit;
import sun.awt.util.IdentityArrayList;
@ -1013,30 +1012,12 @@ public class Dialog extends Window {
if (!isModal()) {
conditionalShow(null, null);
} else {
AppContext showAppContext = AppContext.getAppContext();
AtomicLong time = new AtomicLong();
Component predictedFocusOwner = null;
try {
predictedFocusOwner = getMostRecentFocusOwner();
if (conditionalShow(predictedFocusOwner, time)) {
modalFilter = ModalEventFilter.createFilterForDialog(this);
// if this dialog is toolkit-modal, the filter should be added
// to all EDTs (for all AppContexts)
if (modalityType == ModalityType.TOOLKIT_MODAL) {
for (AppContext appContext : AppContext.getAppContexts()) {
if (appContext == showAppContext) {
continue;
}
EventQueue eventQueue = (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY);
// it may occur that EDT for appContext hasn't been started yet, so
// we post an empty invocation event to trigger EDT initialization
eventQueue.postEvent(new InvocationEvent(this, () -> {}));
EventDispatchThread edt = eventQueue.getDispatchThread();
edt.addEventFilter(modalFilter);
}
}
modalityPushed();
try {
EventQueue eventQueue = Toolkit.getDefaultToolkit().getSystemEventQueue();
@ -1047,19 +1028,6 @@ public class Dialog extends Window {
} finally {
modalityPopped();
}
// if this dialog is toolkit-modal, its filter must be removed
// from all EDTs (for all AppContexts)
if (modalityType == ModalityType.TOOLKIT_MODAL) {
for (AppContext appContext : AppContext.getAppContexts()) {
if (appContext == showAppContext) {
continue;
}
EventQueue eventQueue = (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY);
EventDispatchThread edt = eventQueue.getDispatchThread();
edt.removeEventFilter(modalFilter);
}
}
}
} finally {
if (predictedFocusOwner != null) {
@ -1482,8 +1450,7 @@ public class Dialog extends Window {
return getDocumentRoot() == w.getDocumentRoot();
}
case APPLICATION_MODAL:
return !w.isModalExcluded(ModalExclusionType.APPLICATION_EXCLUDE) &&
(appContext == w.appContext);
return !w.isModalExcluded(ModalExclusionType.APPLICATION_EXCLUDE);
case TOOLKIT_MODAL:
return !w.isModalExcluded(ModalExclusionType.TOOLKIT_EXCLUDE);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,11 +43,6 @@ import java.io.Writer;
* for setting and getting all times and for doing whatever
* analysis is interesting; this class is merely a central container
* for those timing values.
* Note that, due to the variables in this class being static,
* use of particular time values by multiple AppContexts will cause
* confusing results. For example, if two contexts run
* simultaneously, the initTime for those will collide
* and the results may be undefined.
* <P>
* To automatically track startup performance in an app
* use the command-line parameter sun.perflog as follows:<BR>

View File

@ -38,11 +38,12 @@ public final class ScriptRunData {
private static final int CHAR_START = 0;
private static final int CHAR_LIMIT = 0x110000;
private static int cache = 0;
private static volatile int cache = 0;
public static int getScript(int cp) {
int lcache = cache;
// optimize for runs of characters in the same script
if (cp >= data[cache] && cp < data[cache+2]) {
return data[cache+1];
if (cp >= data[lcache] && cp < data[lcache+2]) {
return data[lcache+1];
}
if ((cp >= CHAR_START) && (cp < CHAR_LIMIT)) {
int probe = dataPower;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2009, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2009, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,6 +31,7 @@ import java.awt.Color;
import java.awt.Component;
import java.awt.Container;
import java.awt.Dimension;
import java.awt.EventQueue;
import java.awt.Font;
import java.awt.Frame;
import java.awt.GridLayout;
@ -91,7 +92,7 @@ public abstract class InfoWindow extends Window {
// Must be executed on EDT.
@SuppressWarnings("deprecation")
protected void show(Point corner, int indent) {
assert SunToolkit.isDispatchThreadForAppContext(this);
assert EventQueue.isDispatchThread();
pack();
@ -464,7 +465,7 @@ public abstract class InfoWindow extends Window {
ActionEvent aev = new ActionEvent(target, ActionEvent.ACTION_PERFORMED,
liveArguments.getActionCommand(),
e.getWhen(), e.getModifiers());
XToolkit.postEvent(XToolkit.targetToAppContext(aev.getSource()), aev);
XToolkit.postEvent(aev);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -114,8 +114,6 @@ public abstract class XBaseMenuWindow extends XWindow {
protected Point grabInputPoint = null;
protected boolean hasPointerMoved = false;
private AppContext disposeAppContext;
/************************************************
*
* Mapping data
@ -175,8 +173,6 @@ public abstract class XBaseMenuWindow extends XWindow {
XBaseMenuWindow() {
super(new XCreateWindowParams(new Object[] {
DELAYED, Boolean.TRUE}));
disposeAppContext = AppContext.getAppContext();
}
/************************************************
@ -920,7 +916,7 @@ public abstract class XBaseMenuWindow extends XWindow {
public void dispose() {
setDisposed(true);
SunToolkit.invokeLaterOnAppContext(disposeAppContext, new Runnable() {
SunToolkit.invokeLater(new Runnable() {
public void run() {
doDispose();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -201,7 +201,7 @@ public final class XEmbedChildProxyPeer implements ComponentPeer, XEventDispatch
public void updateCursorImmediately() {}
void postEvent(AWTEvent event) {
XToolkit.postEvent(XToolkit.targetToAppContext(proxy), event);
XToolkit.postEvent(event);
}
boolean simulateMotifRequestFocus(Component lightweightChild, boolean temporary,
@ -323,9 +323,9 @@ public final class XEmbedChildProxyPeer implements ComponentPeer, XEventDispatch
}
void childResized() {
XToolkit.postEvent(XToolkit.targetToAppContext(proxy), new ComponentEvent(proxy, ComponentEvent.COMPONENT_RESIZED));
XToolkit.postEvent(new ComponentEvent(proxy, ComponentEvent.COMPONENT_RESIZED));
container.childResized(proxy);
// XToolkit.postEvent(XToolkit.targetToAppContext(proxy), new InvocationEvent(proxy, new Runnable() {
// XToolkit.postEvent(new InvocationEvent(proxy, new Runnable() {
// public void run() {
// getTopLevel(proxy).invalidate();
// getTopLevel(proxy).pack();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -152,7 +152,7 @@ final class XTaskbarPeer implements TaskbarPeer {
mi.getActionCommand());
try {
XToolkit.awtLock();
XToolkit.postEvent(XToolkit.targetToAppContext(ae.getSource()), ae);
XToolkit.postEvent(ae);
} finally {
XToolkit.awtUnlock();
}

View File

@ -625,14 +625,8 @@ public final class XToolkit extends UNIXToolkit implements Runnable {
while(true) {
// Fix for 6829923: we should gracefully handle toolkit thread interruption
if (Thread.currentThread().isInterrupted()) {
// We expect interruption from the AppContext.dispose() method only.
// If the thread is interrupted from another place, let's skip it
// for compatibility reasons. Probably some time later we'll remove
// the check for AppContext.isDisposed() and will unconditionally
// break the loop here.
if (AppContext.getAppContext().isDisposed()) {
break;
}
// for compatibility reasons.
}
awtLock();
try {
@ -2054,14 +2048,6 @@ public final class XToolkit extends UNIXToolkit implements Runnable {
(exclusionType == Dialog.ModalExclusionType.TOOLKIT_EXCLUDE);
}
static EventQueue getEventQueue(Object target) {
AppContext appContext = targetToAppContext(target);
if (appContext != null) {
return (EventQueue)appContext.get(AppContext.EVENT_QUEUE_KEY);
}
return null;
}
static void removeSourceEvents(EventQueue queue,
Object source,
boolean removeAllEvents) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -269,7 +269,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
@Override
public void dispose() {
if (SunToolkit.isDispatchThreadForAppContext(target)) {
if (EventQueue.isDispatchThread()) {
disposeOnEDT();
} else {
try {
@ -329,7 +329,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
}
};
if (!SunToolkit.isDispatchThreadForAppContext(target)) {
if (!EventQueue.isDispatchThread()) {
SunToolkit.executeOnEventHandlerThread(target, r);
} else {
r.run();
@ -355,7 +355,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
if (isDisposed())
return;
assert SunToolkit.isDispatchThreadForAppContext(target);
assert EventQueue.isDispatchThread();
PopupMenu newPopup = target.getPopupMenu();
if (popup != newPopup) {
@ -476,7 +476,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
// other class tries to cast source field to Component).
// We already filter DRAG events out (CR 6565779).
e.setSource(xtiPeer.target);
XToolkit.postEvent(XToolkit.targetToAppContext(e.getSource()), e);
XToolkit.postEvent(e);
}
@Override
@SuppressWarnings("deprecation")
@ -487,7 +487,7 @@ public final class XTrayIconPeer implements TrayIconPeer,
ActionEvent aev = new ActionEvent(xtiPeer.target, ActionEvent.ACTION_PERFORMED,
xtiPeer.target.getActionCommand(), e.getWhen(),
e.getModifiers());
XToolkit.postEvent(XToolkit.targetToAppContext(aev.getSource()), aev);
XToolkit.postEvent(aev);
}
if (xtiPeer.balloon.isVisible()) {
xtiPeer.balloon.hide();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -434,7 +434,7 @@ public abstract class X11InputMethodBase extends InputMethodAdapter {
if (source != null) {
InputMethodEvent event = new InputMethodEvent(source,
id, when, text, committedCharacterCount, caret, visiblePosition);
SunToolkit.postEvent(SunToolkit.targetToAppContext(source), (AWTEvent)event);
SunToolkit.postEvent((AWTEvent)event);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,6 @@ struct ComponentIDs {
jfieldID graphicsConfig;
jfieldID name;
jfieldID isProxyActive;
jfieldID appContext;
jmethodID getParent;
jmethodID getLocationOnScreen;
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -195,10 +195,6 @@ Java_java_awt_Component_initIDs
"Z");
CHECK_NULL(componentIDs.isProxyActive);
componentIDs.appContext =
(*env)->GetFieldID(env, cls, "appContext",
"Lsun/awt/AppContext;");
(*env)->DeleteLocalRef(env, keyclass);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -178,7 +178,6 @@ jfieldID AwtComponent::parentID;
jfieldID AwtComponent::graphicsConfigID;
jfieldID AwtComponent::peerGCID;
jfieldID AwtComponent::focusableID;
jfieldID AwtComponent::appContextID;
jfieldID AwtComponent::cursorID;
jfieldID AwtComponent::hwndID;
@ -6573,11 +6572,6 @@ Java_java_awt_Component_initIDs(JNIEnv *env, jclass cls)
DASSERT(AwtComponent::focusableID);
CHECK_NULL(AwtComponent::focusableID);
AwtComponent::appContextID = env->GetFieldID(cls, "appContext",
"Lsun/awt/AppContext;");
DASSERT(AwtComponent::appContextID);
CHECK_NULL(AwtComponent::appContextID);
AwtComponent::peerGCID = env->GetFieldID(peerCls, "winGraphicsConfig",
"Lsun/awt/Win32GraphicsConfig;");
DASSERT(AwtComponent::peerGCID);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1996, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -112,7 +112,6 @@ public:
static jfieldID graphicsConfigID;
static jfieldID peerGCID;
static jfieldID focusableID;
static jfieldID appContextID;
static jfieldID hwndID;
static jmethodID getFontMID;

View File

@ -28,6 +28,7 @@ package com.sun.source.util;
import com.sun.source.doctree.DocCommentTree;
import com.sun.source.doctree.DocTree;
import com.sun.source.tree.CompilationUnitTree;
import com.sun.source.tree.Tree;
/**
* Provides methods to obtain the position of a DocTree within a javadoc comment.
@ -59,8 +60,31 @@ public interface DocSourcePositions extends SourcePositions {
* position is being sought
* @param tree tree for which a position is sought
* @return the start position of tree
* @deprecated use {@link #getStartPosition(DocCommentTree, DocTree)} instead
*/
long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree);
@Deprecated(since = "27", forRemoval = true)
default long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
return getStartPosition(comment, tree);
}
/**
* {@return the starting position of the given {@link Tree}. If the starting position is not available, returns
* {@link javax.tools.Diagnostic#NOPOS}}
*
* <p>The given tree should be under the given comment tree. The returned position must be at the start of the
* yield of this tree, that is for any sub-tree of this tree, the following must hold:
*
* <p>
* {@code getStartPosition(comment, tree) <= getStartPosition(comment, subtree)} or <br>
* {@code getStartPosition(comment, tree) == NOPOS} or <br>
* {@code getStartPosition(comment, subtree) == NOPOS}
* </p>
*
* @param comment the comment tree that encloses the tree for which the
* position is being sought
* @param tree tree for which a position is sought
*/
long getStartPosition(DocCommentTree comment, DocTree tree);
/**
* Returns the ending position of the tree within the comment within the file. If tree is not found within
@ -91,7 +115,38 @@ public interface DocSourcePositions extends SourcePositions {
* position is being sought
* @param tree tree for which a position is sought
* @return the end position of tree
* @deprecated use {@link #getEndPosition(DocCommentTree, DocTree)} instead
*/
long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree);
@Deprecated(since = "27", forRemoval = true)
default long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
return getEndPosition(comment, tree);
}
/**
* {@return the ending position of the given {@link Tree}. If the ending position is not available, returns
* {@link javax.tools.Diagnostic#NOPOS}}
*
* <p>The given tree should be under the given comment tree. The returned position must be at the end of the yield
* of this tree, that is for any sub-tree of this tree, the following must hold:
*
* <p>
* {@code getEndPosition(comment, tree) >= getEndPosition(comment, subtree)} or <br>
* {@code getEndPosition(comment, tree) == NOPOS} or <br>
* {@code getEndPosition(comment, subtree) == NOPOS}
* </p>
*
* In addition, the following must hold:
*
* <p>
* {@code getStartPosition(comment, tree) <= getEndPosition(comment, tree)} or <br>
* {@code getStartPosition(comment, tree) == NOPOS} or <br>
* {@code getEndPosition(comment, tree) == NOPOS}
* </p>
*
* @param comment the comment tree that encloses the tree for which the
* position is being sought
* @param tree tree for which a position is sought
*/
long getEndPosition(DocCommentTree comment, DocTree tree);
}

View File

@ -53,8 +53,29 @@ public interface SourcePositions {
* @param file CompilationUnit in which to find tree
* @param tree tree for which a position is sought
* @return the start position of tree
* @deprecated use {@link #getStartPosition(Tree)} instead
*/
long getStartPosition(CompilationUnitTree file, Tree tree);
@Deprecated(since = "27", forRemoval = true)
default long getStartPosition(CompilationUnitTree file, Tree tree) {
return getStartPosition(tree);
}
/**
* {@return the starting position of the given {@link Tree}, or if the starting position is not available, returns
* {@link javax.tools.Diagnostic#NOPOS}}
*
* <p>The returned position must be at the start of the yield of this tree, that is for any sub-tree of this tree,
* the following must hold:
*
* <p>
* {@code getStartPosition(tree) <= getStartPosition(subtree)} or <br>
* {@code getStartPosition(tree) == NOPOS} or <br>
* {@code getStartPosition(subtree) == NOPOS}
* </p>
*
* @param tree tree for which a position is sought
*/
long getStartPosition(Tree tree);
/**
* Returns the ending position of tree within file. If tree is not found within
@ -80,7 +101,35 @@ public interface SourcePositions {
* @param file CompilationUnit in which to find tree
* @param tree tree for which a position is sought
* @return the end position of tree
* @deprecated use {@link #getEndPosition(Tree)} instead
*/
long getEndPosition(CompilationUnitTree file, Tree tree);
@Deprecated(since = "27", forRemoval = true)
default long getEndPosition(CompilationUnitTree file, Tree tree) {
return getEndPosition(tree);
}
/**
* {@return the ending position of the given {@link Tree}. If the ending position is not available,
* returns {@link javax.tools.Diagnostic#NOPOS}}
*
* <p>The returned position must be at the end of the yield of this tree, that is for any sub-tree of this tree,
* the following must hold:
*
* <p>
* {@code getEndPosition(tree) >= getEndPosition(subtree)} or <br>
* {@code getEndPosition(tree) == NOPOS} or <br>
* {@code getEndPosition(subtree) == NOPOS}
* </p>
*
* In addition, the following must hold:
*
* <p>
* {@code getStartPosition(tree) <= getEndPosition(tree)} or <br>
* {@code getStartPosition(tree) == NOPOS} or <br>
* {@code getEndPosition(tree) == NOPOS}
* </p>
*
* @param tree tree for which a position is sought
*/
long getEndPosition(Tree tree);
}

View File

@ -233,24 +233,24 @@ public class JavacTrees extends DocTrees {
public DocSourcePositions getSourcePositions() {
return new DocSourcePositions() {
@Override @DefinedBy(Api.COMPILER_TREE)
public long getStartPosition(CompilationUnitTree file, Tree tree) {
public long getStartPosition(Tree tree) {
return TreeInfo.getStartPos((JCTree) tree);
}
@Override @DefinedBy(Api.COMPILER_TREE)
public long getEndPosition(CompilationUnitTree file, Tree tree) {
public long getEndPosition(Tree tree) {
return TreeInfo.getEndPos((JCTree) tree);
}
@Override @DefinedBy(Api.COMPILER_TREE)
public long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
public long getStartPosition(DocCommentTree comment, DocTree tree) {
DCDocComment dcComment = (DCDocComment) comment;
DCTree dcTree = (DCTree) tree;
return dcComment.getSourcePosition(dcTree.getStartPosition());
}
@Override @DefinedBy(Api.COMPILER_TREE)
public long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
public long getEndPosition(DocCommentTree comment, DocTree tree) {
DCDocComment dcComment = (DCDocComment) comment;
DCTree dcTree = (DCTree) tree;
return dcComment.getSourcePosition(dcTree.getEndPosition());

View File

@ -1618,7 +1618,7 @@ public class Utils {
CompilationUnitTree cu = path.getCompilationUnit();
LineMap lineMap = cu.getLineMap();
DocSourcePositions spos = docTrees.getSourcePositions();
long pos = spos.getStartPosition(cu, path.getLeaf());
long pos = spos.getStartPosition(path.getLeaf());
return lineMap.getLineNumber(pos);
}

View File

@ -211,7 +211,7 @@ public class Env {
long getStartPos(TreePath p) {
SourcePositions sp = trees.getSourcePositions();
return sp.getStartPosition(p.getCompilationUnit(), p.getLeaf());
return sp.getStartPosition(p.getLeaf());
}
boolean shouldCheck(CompilationUnitTree unit) {

View File

@ -582,9 +582,9 @@ public class JavadocLog extends Log implements Reporter {
}
CompilationUnitTree compUnit = tp.getCompilationUnit();
JCTree tree = (JCTree) tp.getLeaf();
int start = (int) posns.getStartPosition(compUnit, tree);
int start = (int) posns.getStartPosition(tree);
int pos = tree.getPreferredPosition();
int end = (int) posns.getEndPosition(compUnit, tree);
int end = (int) posns.getEndPosition(tree);
return createDiagnosticPosition(tree, start, pos, end);
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jpackage.internal;
public interface EnvironmentProvider {
String getProperty(String propertyName);
String getenv(String envVarName);
public static EnvironmentProvider DEFAULT = new EnvironmentProvider() {
@Override
public String getenv(String envVarName) {
return System.getenv(envVarName);
}
@Override
public String getProperty(String propertyName) {
return System.getProperty(propertyName);
}
};
}

View File

@ -65,6 +65,14 @@ public final class Globals {
return this;
}
public EnvironmentProvider system() {
return this.<EnvironmentProvider>findProperty(EnvironmentProvider.class).orElse(EnvironmentProvider.DEFAULT);
}
public Globals system(EnvironmentProvider v) {
return setProperty(EnvironmentProvider.class, v);
}
Log.Logger logger() {
return logger;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
*/
package jdk.jpackage.internal.util;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.util.Objects;
import java.util.Optional;
@ -31,18 +32,26 @@ import java.util.function.UnaryOperator;
public final class PathUtils {
private PathUtils() {
}
public static String getSuffix(Path path) {
String filename = replaceSuffix(path.getFileName(), null).toString();
return path.getFileName().toString().substring(filename.length());
}
public static Path addSuffix(Path path, String suffix) {
Objects.requireNonNull(path);
Objects.requireNonNull(suffix);
Path parent = path.getParent();
String filename = path.getFileName().toString() + suffix;
return parent != null ? parent.resolve(filename) : Path.of(filename);
}
public static Path replaceSuffix(Path path, String suffix) {
Objects.requireNonNull(path);
Path parent = path.getParent();
String filename = path.getFileName().toString().replaceAll("\\.[^.]*$",
"") + Optional.ofNullable(suffix).orElse("");
@ -59,18 +68,22 @@ public final class PathUtils {
}
public static Path normalizedAbsolutePath(Path path) {
if (path != null) {
return mapNullablePath(_ -> {
return path.normalize().toAbsolutePath();
} else {
return null;
}
}, path);
}
public static String normalizedAbsolutePathString(Path path) {
if (path != null) {
return normalizedAbsolutePath(path).toString();
} else {
return null;
}
return Optional.ofNullable(normalizedAbsolutePath(path)).map(Path::toString).orElse(null);
}
public static Optional<Path> asPath(String value) {
return Optional.ofNullable(value).map(v -> {
try {
return Path.of(v);
} catch (InvalidPathException ex) {
return null;
}
});
}
}

View File

@ -23,14 +23,14 @@
* questions.
*/
package jdk.jpackage.internal;
import static java.util.stream.Collectors.groupingBy;
import static java.util.stream.Collectors.toMap;
import static java.util.stream.Collectors.toSet;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.text.MessageFormat;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
@ -38,12 +38,13 @@ import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.function.Supplier;
import java.util.stream.Stream;
import jdk.jpackage.internal.WixToolset.WixToolsetType;
import jdk.jpackage.internal.model.ConfigException;
import jdk.jpackage.internal.model.DottedVersion;
import jdk.jpackage.internal.util.PathUtils;
import jdk.jpackage.internal.util.Slot;
/**
* WiX tool.
@ -58,16 +59,20 @@ public enum WixTool {
this.minimalVersion = minimalVersion;
}
interface ToolInfo {
Path fileName() {
return toolFileName;
}
sealed interface ToolInfo {
Path path();
DottedVersion version();
}
interface CandleInfo extends ToolInfo {
sealed interface CandleInfo extends ToolInfo {
boolean fips();
}
private record DefaultToolInfo(Path path, DottedVersion version) implements ToolInfo {
record DefaultToolInfo(Path path, DottedVersion version) implements ToolInfo {
DefaultToolInfo {
Objects.requireNonNull(path);
Objects.requireNonNull(version);
@ -76,9 +81,14 @@ public enum WixTool {
DefaultToolInfo(Path path, String version) {
this(path, DottedVersion.lazy(version));
}
@Override
public String toString() {
return String.format("%s|ver=%s", path, version);
}
}
private record DefaultCandleInfo(Path path, DottedVersion version, boolean fips) implements CandleInfo {
record DefaultCandleInfo(Path path, DottedVersion version, boolean fips) implements CandleInfo {
DefaultCandleInfo {
Objects.requireNonNull(path);
Objects.requireNonNull(version);
@ -87,25 +97,42 @@ public enum WixTool {
DefaultCandleInfo(ToolInfo info, boolean fips) {
this(info.path(), info.version(), fips);
}
@Override
public String toString() {
var sb = new StringBuffer();
sb.append(path);
if (fips) {
sb.append("|fips");
}
sb.append("|ver=").append(version);
return sb.toString();
}
}
static WixToolset createToolset() {
return createToolset(WixTool::findWixInstallDirs, true);
}
static WixToolset createToolset(Supplier<List<Path>> wixInstallDirs, boolean searchInPath) {
Function<List<ToolLookupResult>, Map<WixTool, ToolInfo>> conv = lookupResults -> {
return lookupResults.stream().filter(ToolLookupResult::isValid).collect(Collectors.
groupingBy(lookupResult -> {
return lookupResults.stream().filter(ToolLookupResult::isValid).collect(groupingBy(lookupResult -> {
return lookupResult.info().version().toString();
})).values().stream().filter(sameVersionLookupResults -> {
Set<WixTool> sameVersionTools = sameVersionLookupResults.stream().map(
ToolLookupResult::tool).collect(Collectors.toSet());
if (sameVersionTools.equals(Set.of(Candle3)) || sameVersionTools.equals(Set.of(
Light3))) {
var sameVersionTools = sameVersionLookupResults.stream()
.map(ToolLookupResult::tool)
.collect(toSet());
if (sameVersionTools.equals(Set.of(Candle3)) || sameVersionTools.equals(Set.of(Light3))) {
// There is only one tool from WiX v3 toolset of some version available. Discard it.
return false;
} else {
return true;
}
}).flatMap(List::stream).collect(Collectors.toMap(ToolLookupResult::tool,
ToolLookupResult::info, (ToolInfo x, ToolInfo y) -> {
}).flatMap(List::stream).collect(toMap(
ToolLookupResult::tool,
ToolLookupResult::info,
(ToolInfo x, ToolInfo y) -> {
return Stream.of(x, y).sorted(Comparator.comparing((ToolInfo toolInfo) -> {
return toolInfo.version().toComponentsString();
}).reversed()).findFirst().get();
@ -115,58 +142,53 @@ public enum WixTool {
Function<List<ToolLookupResult>, Optional<WixToolset>> createToolset = lookupResults -> {
var tools = conv.apply(lookupResults);
// Try to build a toolset found in the PATH and in known locations.
return Stream.of(WixToolsetType.values()).map(toolsetType -> {
return WixToolset.create(toolsetType.getTools(), tools);
}).filter(Objects::nonNull).findFirst();
return Stream.of(WixToolsetType.values()).flatMap(toolsetType -> {
return WixToolset.create(toolsetType, tools).stream();
}).findFirst();
};
var toolsInPath = Stream.of(values()).map(tool -> {
return ToolLookupResult.lookup(tool, Optional.empty());
}).filter(Optional::isPresent).map(Optional::get).toList();
final List<ToolLookupResult> toolsInPath;
if (searchInPath) {
toolsInPath = Stream.of(values()).flatMap(tool -> {
return ToolLookupResult.lookup(tool, Optional.empty()).stream();
}).toList();
} else {
toolsInPath = List.of();
}
// Try to build a toolset from tools in the PATH first.
var toolset = createToolset.apply(toolsInPath);
if (toolset.isPresent()) {
return toolset.get();
}
var toolset = createToolset.apply(toolsInPath).orElseGet(() -> {
// Look up for WiX tools in known locations.
var toolsInKnownWiXDirs = wixInstallDirs.get().stream().flatMap(dir -> {
return Stream.of(values()).flatMap(tool -> {
return ToolLookupResult.lookup(tool, Optional.of(dir)).stream();
});
}).toList();
// Look up for WiX tools in known locations.
var toolsInKnownWiXDirs = findWixInstallDirs().stream().map(dir -> {
return Stream.of(values()).map(tool -> {
return ToolLookupResult.lookup(tool, Optional.of(dir));
// Build a toolset found in the PATH and in known locations.
var allValidFoundTools = Stream.of(toolsInPath, toolsInKnownWiXDirs)
.flatMap(List::stream)
.filter(ToolLookupResult::isValid)
.toList();
return createToolset.apply(allValidFoundTools).orElseThrow(() -> {
return new ConfigException(
I18N.getString("error.no-wix-tools"),
I18N.getString("error.no-wix-tools.advice"));
});
}).flatMap(Function.identity()).filter(Optional::isPresent).map(Optional::get).toList();
});
// Build a toolset found in the PATH and in known locations.
var allFoundTools = Stream.of(toolsInPath, toolsInKnownWiXDirs).flatMap(List::stream).filter(
ToolLookupResult::isValid).toList();
toolset = createToolset.apply(allFoundTools);
if (toolset.isPresent()) {
return toolset.get();
} else if (allFoundTools.isEmpty()) {
throw new ConfigException(I18N.getString("error.no-wix-tools"), I18N.getString(
"error.no-wix-tools.advice"));
} else {
var toolOldVerErr = allFoundTools.stream().map(lookupResult -> {
if (lookupResult.versionTooOld) {
return new ConfigException(MessageFormat.format(I18N.getString(
"message.wrong-tool-version"), lookupResult.info().path(),
lookupResult.info().version(), lookupResult.tool().minimalVersion),
I18N.getString("error.no-wix-tools.advice"));
} else {
return null;
}
}).filter(Objects::nonNull).findAny();
if (toolOldVerErr.isPresent()) {
throw toolOldVerErr.get();
} else {
throw new ConfigException(I18N.getString("error.no-wix-tools"), I18N.getString(
"error.no-wix-tools.advice"));
}
}
return toolset;
}
private record ToolLookupResult(WixTool tool, ToolInfo info, boolean versionTooOld) {
static List<Path> findWixInstallDirs() {
return Stream.of(
findWixCurrentInstallDirs(),
findWix3InstallDirs()
).flatMap(List::stream).toList();
}
private record ToolLookupResult(WixTool tool, ToolInfo info) {
ToolLookupResult {
Objects.requireNonNull(tool);
@ -177,58 +199,59 @@ public enum WixTool {
Objects.requireNonNull(tool);
Objects.requireNonNull(lookupDir);
final Path toolPath = lookupDir.map(p -> p.resolve(
tool.toolFileName)).orElse(tool.toolFileName);
final Path toolPath = lookupDir.map(p -> {
return p.resolve(tool.toolFileName);
}).orElse(tool.toolFileName);
final boolean[] tooOld = new boolean[1];
final String[] parsedVersion = new String[1];
final var validator = new ToolValidator(toolPath).setMinimalVersion(tool.minimalVersion);
final var validator = new ToolValidator(toolPath)
.setMinimalVersion(tool.minimalVersion)
.setToolOldVersionErrorHandler((name, version) -> {
tooOld[0] = true;
return null;
});
final Function<Stream<String>, String> versionParser;
if (Set.of(Candle3, Light3).contains(tool)) {
final String printVersionArg;
if (tool == Candle3) {
final var printVersionArg = switch (tool) {
case Candle3 -> {
// Add '-fips' to make "candle.exe" print help message and return
// 0 exit code instead of returning error exit code and printing
// "error CNDL0308 : The Federal Information Processing Standard (FIPS) appears to be enabled on the machine..."
// error message if FIPS is enabled.
// If FIPS is disabled, passing '-fips' parameter still makes
// "candle.exe" print help message and return 0 exit code.
printVersionArg = "-fips";
} else {
printVersionArg = "-?";
yield "-fips";
}
validator.setCommandLine(printVersionArg);
versionParser = output -> {
String firstLineOfOutput = output.findFirst().orElse("");
int separatorIdx = firstLineOfOutput.lastIndexOf(' ');
if (separatorIdx == -1) {
return null;
}
return firstLineOfOutput.substring(separatorIdx + 1);
};
} else {
validator.setCommandLine("--version");
versionParser = output -> {
return output.findFirst().orElse("");
};
}
case Light3 -> {
yield "-?";
}
default -> {
yield "--version";
}
};
validator.setCommandLine(printVersionArg);
final Function<Stream<String>, Optional<String>> versionParser = switch (tool) {
case Candle3, Light3 -> {
yield output -> {
return output.findFirst().map(firstLineOfOutput -> {
int separatorIdx = firstLineOfOutput.lastIndexOf(' ');
if (separatorIdx == -1) {
return null;
}
return firstLineOfOutput.substring(separatorIdx + 1);
});
};
}
default -> {
yield output -> {
return output.findFirst();
};
}
};
final var parsedVersion = Slot.<String>createEmpty();
validator.setVersionParser(output -> {
parsedVersion[0] = versionParser.apply(output);
return parsedVersion[0];
versionParser.apply(output).ifPresent(parsedVersion::set);
return parsedVersion.find().orElse(null);
});
if (validator.validate() == null) {
// Tool found
ToolInfo info = new DefaultToolInfo(toolPath, parsedVersion[0]);
ToolInfo info = new DefaultToolInfo(toolPath, parsedVersion.get());
if (tool == Candle3) {
// Detect FIPS mode
var fips = false;
@ -242,63 +265,52 @@ public enum WixTool {
}
}
} catch (IOException ex) {
Log.verbose(ex);
}
info = new DefaultCandleInfo(info, fips);
}
return Optional.of(new ToolLookupResult(tool, info, tooOld[0]));
return Optional.of(new ToolLookupResult(tool, info));
} else {
return Optional.empty();
}
}
boolean versionTooOld() {
return DottedVersion.compareComponents(info.version(), tool.minimalVersion) < 0;
}
boolean isValid() {
return !versionTooOld;
return !versionTooOld();
}
}
private static Path getSystemDir(String envVar, String knownDir) {
return Optional
.ofNullable(getEnvVariableAsPath(envVar))
.orElseGet(() -> Optional
.ofNullable(getEnvVariableAsPath("SystemDrive"))
.orElseGet(() -> Path.of("C:")).resolve(knownDir));
private static Path getSystemDir(String envVar, Path knownDir) {
return getEnvVariableAsPath(envVar).orElseGet(() -> {
return getEnvVariableAsPath("SystemDrive").orElseGet(() -> {
return Path.of("C:");
}).resolve(knownDir);
});
}
private static Path getEnvVariableAsPath(String envVar) {
String path = System.getenv(envVar);
if (path != null) {
try {
return Path.of(path);
} catch (InvalidPathException ex) {
Log.error(MessageFormat.format(I18N.getString(
"error.invalid-envvar"), envVar));
}
}
return null;
}
private static List<Path> findWixInstallDirs() {
return Stream.of(findWixCurrentInstallDirs(), findWix3InstallDirs()).
flatMap(List::stream).toList();
private static Optional<Path> getEnvVariableAsPath(String envVar) {
Objects.requireNonNull(envVar);
return Optional.ofNullable(Globals.instance().system().getenv(envVar)).flatMap(PathUtils::asPath);
}
private static List<Path> findWixCurrentInstallDirs() {
return Stream.of(getEnvVariableAsPath("USERPROFILE"), Optional.ofNullable(System.
getProperty("user.home")).map(Path::of).orElse(null)).filter(Objects::nonNull).map(
path -> {
return path.resolve(".dotnet/tools");
}).filter(Files::isDirectory).distinct().toList();
return Stream.of(
getEnvVariableAsPath("USERPROFILE"),
Optional.ofNullable(Globals.instance().system().getProperty("user.home")).flatMap(PathUtils::asPath)
).flatMap(Optional::stream).map(path -> {
return path.resolve(".dotnet/tools");
}).filter(Files::isDirectory).distinct().toList();
}
private static List<Path> findWix3InstallDirs() {
PathMatcher wixInstallDirMatcher = FileSystems.getDefault().
getPathMatcher(
"glob:WiX Toolset v*");
var wixInstallDirMatcher = FileSystems.getDefault().getPathMatcher("glob:WiX Toolset v*");
Path programFiles = getSystemDir("ProgramFiles", "\\Program Files");
Path programFilesX86 = getSystemDir("ProgramFiles(x86)",
"\\Program Files (x86)");
var programFiles = getSystemDir("ProgramFiles", Path.of("Program Files"));
var programFilesX86 = getSystemDir("ProgramFiles(x86)", Path.of("Program Files (x86)"));
// Returns list of WiX install directories ordered by WiX version number.
// Newer versions go first.
@ -306,13 +318,11 @@ public enum WixTool {
try (var paths = Files.walk(path, 1)) {
return paths.toList();
} catch (IOException ex) {
Log.verbose(ex);
List<Path> empty = List.of();
return empty;
return List.<Path>of();
}
}).flatMap(List::stream)
.filter(path -> wixInstallDirMatcher.matches(path.getFileName())).
sorted(Comparator.comparing(Path::getFileName).reversed())
.filter(path -> wixInstallDirMatcher.matches(path.getFileName()))
.sorted(Comparator.comparing(Path::getFileName).reversed())
.map(path -> path.resolve("bin"))
.toList();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,14 +26,20 @@ package jdk.jpackage.internal;
import java.nio.file.Path;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import jdk.jpackage.internal.model.DottedVersion;
final class WixToolset {
record WixToolset(Map<WixTool, WixTool.ToolInfo> tools) {
static enum WixToolsetType {
WixToolset {
tools = Map.copyOf(tools);
}
enum WixToolsetType {
// Wix v4+
Wix4(WixTool.Wix4),
// Wix v3+
@ -50,10 +56,6 @@ final class WixToolset {
private final Set<WixTool> tools;
}
private WixToolset(Map<WixTool, WixTool.ToolInfo> tools) {
this.tools = tools;
}
WixToolsetType getType() {
return Stream.of(WixToolsetType.values()).filter(toolsetType -> {
return toolsetType.getTools().equals(tools.keySet());
@ -75,16 +77,19 @@ final class WixToolset {
.anyMatch(WixTool.CandleInfo::fips);
}
static WixToolset create(Set<WixTool> requiredTools, Map<WixTool, WixTool.ToolInfo> allTools) {
static Optional<WixToolset> create(WixToolsetType type, Map<WixTool, WixTool.ToolInfo> allTools) {
Objects.requireNonNull(type);
Objects.requireNonNull(allTools);
var requiredTools = type.getTools();
var filteredTools = allTools.entrySet().stream().filter(e -> {
return requiredTools.contains(e.getKey());
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (filteredTools.keySet().equals(requiredTools)) {
return new WixToolset(filteredTools);
return Optional.of(new WixToolset(filteredTools));
} else {
return null;
return Optional.empty();
}
}
private final Map<WixTool, WixTool.ToolInfo> tools;
}

View File

@ -36,8 +36,8 @@ resource.launcher-as-service-wix-file=Service installer WiX project file
resource.wix-src-conv=XSLT stylesheet converting WiX sources from WiX v3 to WiX v4 format
resource.installer-exe=installer executable
error.no-wix-tools=Can not find WiX tools. Was looking for WiX v3 light.exe and candle.exe or WiX v4/v5 wix.exe and none was found
error.no-wix-tools.advice=Download WiX 3.0 or later from https://wixtoolset.org and add it to the PATH.
error.no-wix-tools=No usable WiX Toolset installation found
error.no-wix-tools.advice=Install the latest WiX v3 from https://github.com/wixtoolset/wix3/releases or WiX v4+ from https://github.com/wixtoolset/wix/releases
error.version-string-wrong-format.advice=Set value of --app-version parameter to a valid Windows Installer ProductVersion.
error.msi-product-version-components=Version string [{0}] must have between 2 and 4 components.
error.msi-product-version-major-out-of-range=Major version must be in the range [0, 255]
@ -56,7 +56,6 @@ error.missing-service-installer.advice=Add 'service-installer.exe' service insta
message.icon-not-ico=The specified icon "{0}" is not an ICO file and will not be used. The default icon will be used in it's place.
message.tool-version=Detected [{0}] version [{1}].
message.wrong-tool-version=Detected [{0}] version {1} but version {2} is required.
message.product-code=MSI ProductCode: {0}.
message.upgrade-code=MSI UpgradeCode: {0}.
message.preparing-msi-config=Preparing MSI config: {0}.

View File

@ -543,9 +543,9 @@ public abstract class JavadocHelper implements AutoCloseable {
for (DocTree t : inheritedText.get(0)) {
start = Math.min(start,
sp.getStartPosition(null, inheritedDocTree, t) - offset);
sp.getStartPosition(inheritedDocTree, t) - offset);
end = Math.max(end,
sp.getEndPosition(null, inheritedDocTree, t) - offset);
sp.getEndPosition(inheritedDocTree, t) - offset);
}
String text = end >= 0 ? inherited.substring((int) start, (int) end) : "";
@ -559,8 +559,8 @@ public abstract class JavadocHelper implements AutoCloseable {
} else {
//replace the {@inheritDoc} with the full text from
//the overridden method:
long inheritedStart = sp.getStartPosition(null, dcTree, node);
long inheritedEnd = sp.getEndPosition(null, dcTree, node);
long inheritedStart = sp.getStartPosition(dcTree, node);
long inheritedEnd = sp.getEndPosition(dcTree, node);
int[] span = new int[] {(int) inheritedStart, (int) inheritedEnd};
replace.computeIfAbsent(span, s -> new ArrayList<>())
@ -571,11 +571,11 @@ public abstract class JavadocHelper implements AutoCloseable {
}
@Override
public Void visitLink(LinkTree node, Void p) {
if (sp.isRewrittenTree(null, dcTree, node)) {
if (sp.isRewrittenTree(dcTree, node)) {
//this link is a synthetic rewritten link, replace
//the original span with the new link:
int start = (int) sp.getStartPosition(null, dcTree, node);
int end = (int) sp.getEndPosition(null, dcTree, node);
int start = (int) sp.getStartPosition(dcTree, node);
int end = (int) sp.getEndPosition(dcTree, node);
replace.computeIfAbsent(new int[] {start, end}, _ -> new ArrayList<>())
.add(node.toString());
@ -601,7 +601,7 @@ public abstract class JavadocHelper implements AutoCloseable {
//this tree)
//if there is a newline immediately behind this tree, insert behind
//the newline:
long endPos = sp.getEndPosition(null, dcTree, tree);
long endPos = sp.getEndPosition(dcTree, tree);
if (endPos >= offset) {
if (endPos - offset + 1 < docComment.length() &&
docComment.charAt((int) (endPos - offset + 1)) == '\n') {
@ -744,7 +744,7 @@ public abstract class JavadocHelper implements AutoCloseable {
}
};
DocCommentTree tree = trees.getDocCommentTree(fo);
offset += (int) trees.getSourcePositions().getStartPosition(null, tree, tree);
offset += (int) trees.getSourcePositions().getStartPosition(tree, tree);
return Pair.of(tree, offset);
} catch (URISyntaxException ex) {
throw new IllegalStateException(ex);
@ -939,7 +939,7 @@ public abstract class JavadocHelper implements AutoCloseable {
Iterable<? extends DocTree> trees) {
StringBuilder sourceBuilder = new StringBuilder();
List<int[]> replaceSpans = new ArrayList<>();
int currentSpanStart = (int) sp.getStartPosition(null, comment, trees.iterator().next());
int currentSpanStart = (int) sp.getStartPosition(comment, trees.iterator().next());
DocTree lastTree = null;
for (DocTree tree : trees) {
@ -958,8 +958,8 @@ public abstract class JavadocHelper implements AutoCloseable {
}
sourceBuilder.append(code);
} else {
int treeStart = (int) sp.getStartPosition(null, comment, tree);
int treeEnd = (int) sp.getEndPosition(null, comment, tree);
int treeStart = (int) sp.getStartPosition(comment, tree);
int treeEnd = (int) sp.getEndPosition(comment, tree);
replaceSpans.add(new int[] {currentSpanStart, treeStart});
currentSpanStart = treeEnd;
sourceBuilder.append(PLACEHOLDER);
@ -967,7 +967,7 @@ public abstract class JavadocHelper implements AutoCloseable {
lastTree = tree;
}
int end = (int) sp.getEndPosition(null, comment, lastTree);
int end = (int) sp.getEndPosition(comment, lastTree);
replaceSpans.add(new int[] {currentSpanStart, end});
@ -1006,8 +1006,8 @@ public abstract class JavadocHelper implements AutoCloseable {
}
@Override
public long getStartPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
ensureAdjustedSpansFilled(file, comment, tree);
public long getStartPosition(DocCommentTree comment, DocTree tree) {
ensureAdjustedSpansFilled(comment, tree);
long[] adjusted = adjustedSpan.get(tree);
@ -1015,12 +1015,12 @@ public abstract class JavadocHelper implements AutoCloseable {
return adjusted[0];
}
return delegate.getStartPosition(file, comment, tree);
return delegate.getStartPosition(comment, tree);
}
@Override
public long getEndPosition(CompilationUnitTree file, DocCommentTree comment, DocTree tree) {
ensureAdjustedSpansFilled(file, comment, tree);
public long getEndPosition(DocCommentTree comment, DocTree tree) {
ensureAdjustedSpansFilled(comment, tree);
long[] adjusted = adjustedSpan.get(tree);
@ -1028,28 +1028,26 @@ public abstract class JavadocHelper implements AutoCloseable {
return adjusted[1];
}
return delegate.getEndPosition(file, comment, tree);
return delegate.getEndPosition(comment, tree);
}
@Override
public long getStartPosition(CompilationUnitTree file, Tree tree) {
return delegate.getStartPosition(file, tree);
public long getStartPosition(Tree tree) {
return delegate.getStartPosition(tree);
}
@Override
public long getEndPosition(CompilationUnitTree file, Tree tree) {
return delegate.getEndPosition(file, tree);
public long getEndPosition(Tree tree) {
return delegate.getEndPosition(tree);
}
boolean isRewrittenTree(CompilationUnitTree file,
DocCommentTree comment,
boolean isRewrittenTree(DocCommentTree comment,
DocTree tree) {
ensureAdjustedSpansFilled(file, comment, tree);
ensureAdjustedSpansFilled(comment, tree);
return rewrittenTrees.contains(tree);
}
private void ensureAdjustedSpansFilled(CompilationUnitTree file,
DocCommentTree comment,
private void ensureAdjustedSpansFilled(DocCommentTree comment,
DocTree tree) {
if (tree.getKind() != DocTree.Kind.LINK &&
tree.getKind() != DocTree.Kind.LINK_PLAIN) {
@ -1057,7 +1055,7 @@ public abstract class JavadocHelper implements AutoCloseable {
}
long[] span;
long treeStart = delegate.getStartPosition(file, comment, tree);
long treeStart = delegate.getStartPosition(comment, tree);
if (treeStart == (-1)) {
LinkTree link = (LinkTree) tree;
@ -1069,15 +1067,15 @@ public abstract class JavadocHelper implements AutoCloseable {
for (DocTree t : nested) {
start = Math.min(start,
delegate.getStartPosition(file, comment, t));
delegate.getStartPosition(comment, t));
end = Math.max(end,
delegate.getEndPosition(file, comment, t));
delegate.getEndPosition(comment, t));
}
span = new long[] {(int) start - 1, (int) end + 1};
rewrittenTrees.add(tree);
} else {
long treeEnd = delegate.getEndPosition(file, comment, tree);
long treeEnd = delegate.getEndPosition(comment, tree);
span = new long[] {treeStart, treeEnd};
}

View File

@ -464,7 +464,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
ImportTree it = findImport(tp);
if (it != null && it.isModule()) {
int selectStart = (int) sp.getStartPosition(topLevel, tp.getLeaf());
int selectStart = (int) sp.getStartPosition(tp.getLeaf());
String qualifiedPrefix = it.getQualifiedIdentifier().getKind() == Kind.MEMBER_SELECT
? ((MemberSelectTree) it.getQualifiedIdentifier()).getExpression().toString() + "."
: "";
@ -634,7 +634,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
Element annotationType = tp.getParentPath().getParentPath().getLeaf().getKind() == Kind.ANNOTATION
? at.trees().getElement(tp.getParentPath().getParentPath())
: at.trees().getElement(tp.getParentPath().getParentPath().getParentPath());
if (sp.getEndPosition(topLevel, tp.getParentPath().getLeaf()) == (-1)) {
if (sp.getEndPosition(tp.getParentPath().getLeaf()) == (-1)) {
//synthetic 'value':
//TODO: filter out existing:
addElements(javadoc, ElementFilter.methodsIn(annotationType.getEnclosedElements()), TRUE, TRUE, cursor, prefix, result);
@ -846,14 +846,14 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
new TreePathScanner<Void, Void>() {
@Override
public Void visitIdentifier(IdentifierTree node, Void p) {
long start = sp.getStartPosition(cut, node);
long end = sp.getEndPosition(cut, node);
long start = sp.getStartPosition(node);
long end = sp.getEndPosition(node);
handleElement(false, start, end);
return super.visitIdentifier(node, p);
}
@Override
public Void visitMemberSelect(MemberSelectTree node, Void p) {
long exprEnd = sp.getEndPosition(cut, node.getExpression());
long exprEnd = sp.getEndPosition(node.getExpression());
Token ident = findTokensFrom(exprEnd, TokenKind.DOT, TokenKind.IDENTIFIER);
if (ident != null) {
handleElement(false, ident.pos, ident.endPos);
@ -866,16 +866,16 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
if (mods.getFlags().contains(Modifier.SEALED) ||
mods.getFlags().contains(Modifier.NON_SEALED)) {
List<Token> modifierTokens = new ArrayList<>();
long modsStart = sp.getStartPosition(cut, mods);
long modsEnd = sp.getEndPosition(cut, mods);
long modsStart = sp.getStartPosition(mods);
long modsEnd = sp.getEndPosition(mods);
for (Token t : tokens) {
if (t.pos >= modsStart && t.endPos <= modsEnd) {
modifierTokens.add(t);
}
}
for (AnnotationTree at : mods.getAnnotations()) {
long annStart = sp.getStartPosition(cut, at);
long annEnd = sp.getEndPosition(cut, at);
long annStart = sp.getStartPosition(at);
long annEnd = sp.getEndPosition(at);
modifierTokens.removeIf(t -> t.pos >= annStart && t.endPos <= annEnd);
}
OUTER: for (int i = 0; i < modifierTokens.size(); i++) {
@ -912,7 +912,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
handleElement(true, ident.pos, ident.endPos);
}
if (!node.getPermitsClause().isEmpty()) {
long start = sp.getStartPosition(cut, node.getPermitsClause().get(0));
long start = sp.getStartPosition(node.getPermitsClause().get(0));
Token permitsCandidate = findTokensBefore(start, TokenKind.IDENTIFIER);
if (permitsCandidate != null && permitsCandidate.name().contentEquals("permits")) {
addKeyword.accept(permitsCandidate);
@ -946,7 +946,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
}
@Override
public Void visitYield(YieldTree node, Void p) {
long start = sp.getStartPosition(cut, node);
long start = sp.getStartPosition(node);
Token yield = findTokensFrom(start, TokenKind.IDENTIFIER);
addKeyword.accept(yield);
return super.visitYield(node, p);
@ -961,7 +961,7 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
@Override
public Void scan(Tree tree, Void p) {
if (tree != null) {
long end = sp.getEndPosition(cut, tree);
long end = sp.getEndPosition(tree);
if (end == (-1)) {
//synthetic
return null;
@ -1072,14 +1072,14 @@ class SourceCodeAnalysisImpl extends SourceCodeAnalysis {
if (tree == null)
return null;
long start = sp.getStartPosition(topLevel, tree);
long end = sp.getEndPosition(topLevel, tree);
long start = sp.getStartPosition(tree);
long end = sp.getEndPosition(tree);
if (end == (-1) && tree.getKind() == Kind.ASSIGNMENT &&
getCurrentPath() != null &&
getCurrentPath().getLeaf().getKind() == Kind.ANNOTATION) {
//the assignment is synthetically generated, take the end pos of the nested tree:
end = sp.getEndPosition(topLevel, ((AssignmentTree) tree).getExpression());
end = sp.getEndPosition(((AssignmentTree) tree).getExpression());
}
if (start <= wrapEndPos && wrapEndPos <= end &&
(deepest[0] == null || deepest[0].getLeaf() == getCurrentPath().getLeaf())) {

View File

@ -119,11 +119,11 @@ class TreeDissector {
}
int getStartPosition(Tree tree) {
return (int) getSourcePositions().getStartPosition(targetCompilationUnit, tree);
return (int) getSourcePositions().getStartPosition(tree);
}
int getEndPosition(Tree tree) {
return (int) getSourcePositions().getEndPosition(targetCompilationUnit, tree);
return (int) getSourcePositions().getEndPosition(tree);
}
Range treeToRange(Tree tree) {

View File

@ -56,7 +56,7 @@ compiler/c2/irTests/TestDuplicateBackedge.java 8318904 generic-all
compiler/codecache/jmx/PoolsIndependenceTest.java 8264632 macosx-all
compiler/vectorapi/reshape/TestVectorReinterpret.java 8320897,8348519 aix-ppc64,linux-ppc64le,linux-s390x
compiler/vectorapi/reshape/TestVectorReinterpret.java 8348519 linux-s390x
compiler/vectorapi/VectorRebracket128Test.java 8330538 generic-all
compiler/vectorization/TestVectorAlgorithms.java#noSuperWord 8376803 aix-ppc64,linux-s390x

View File

@ -23,16 +23,18 @@
/*
* @test
* @bug 8248791
* @bug 8248791 8375442
* @summary Test cloning with more than 8 (=ArrayCopyLoadStoreMaxElem) where loads are wrongly replaced by zero.
* @requires vm.compiler2.enabled | vm.graal.enabled
*
* @run main/othervm -XX:-ReduceBulkZeroing
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestCloneAccess::*
* compiler.arraycopy.TestCloneAccess
* -XX:CompileCommand=dontinline,${test.main.class}::*
* ${test.main.class}
* @run main/othervm -XX:-ReduceBulkZeroing -XX:-ReduceInitialCardMarks
* -XX:CompileCommand=dontinline,compiler.arraycopy.TestCloneAccess::*
* compiler.arraycopy.TestCloneAccess
* -XX:CompileCommand=dontinline,${test.main.class}::*
* ${test.main.class}
* @run main/othervm -XX:-ReduceBulkZeroing -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
* ${test.main.class}
*/
package compiler.arraycopy;

View File

@ -0,0 +1,83 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package compiler.c2.igvn;
import compiler.lib.ir_framework.*;
/*
* @test
* @bug 8375442
* @summary Test deep IGVN revisit for RangeCheck elimination. Other deep-revisit node types
* (If, Load, CmpP, CountedLoopEnd, LongCountedLoopEnd) benefit in large methods
* but require graph complexity beyond this test.
* @library /test/lib /
* @run driver ${test.main.class}
*/
public class TestDeepIGVNRevisit {
static boolean c1, c2, c3, c4;
static volatile int volatileField;
public static void main(String[] args) {
TestFramework tf = new TestFramework();
tf.setDefaultWarmup(0);
tf.addFlags("-XX:+IgnoreUnrecognizedVMOptions",
"-XX:+AlwaysIncrementalInline",
"-XX:-PartialPeelLoop",
"-XX:-LoopUnswitching");
tf.addScenarios(
new Scenario(1, "-XX:-StressIGVN", "-XX:+UseDeepIGVNRevisit"),
new Scenario(2, "-XX:+StressIGVN", "-XX:+UseDeepIGVNRevisit"),
new Scenario(3, "-XX:-StressIGVN", "-XX:-UseDeepIGVNRevisit"),
new Scenario(4, "-XX:+StressIGVN", "-XX:-UseDeepIGVNRevisit"));
tf.start();
}
static void lateInline() {}
// Deferred calls create separate LoadRange nodes for the two arr[idx]
// accesses. After inlining, LoadRanges CSE but RangeCheck#2 is already
// processed. Deep revisit re-processes it with matching range pointers.
@Setup
static Object[] setupRangeCheck() {
return new Object[] { new int[100], 42 };
}
@Test
@Arguments(setup = "setupRangeCheck")
@IR(phase = CompilePhase.ITER_GVN2,
applyIf = {"UseDeepIGVNRevisit", "true"},
counts = {IRNode.RANGE_CHECK, "1"})
@IR(phase = CompilePhase.ITER_GVN2,
applyIf = {"UseDeepIGVNRevisit", "false"},
counts = {IRNode.RANGE_CHECK, "2"})
static int testRangeCheck(int[] arr, int idx) {
int r = arr[idx]; // RangeCheck #1
if (c1) { lateInline(); }
if (c2) { lateInline(); }
if (c3) { lateInline(); }
if (c4) { lateInline(); }
volatileField = r;
r += arr[idx]; // RangeCheck #2
return r;
}
}

View File

@ -0,0 +1,56 @@
/*
* Copyright Amazon.com Inc. or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8375442
* @summary fold_compares_helper must clean up speculative lo node when bailing out with deep revisit
* @library /test/lib /
* @run main/othervm -XX:-TieredCompilation -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:VerifyIterativeGVN=1110
* -XX:CompileCommand=compileonly,${test.main.class}::test
* ${test.main.class}
*
* @run main ${test.main.class}
*/
package compiler.igvn;
import jdk.test.lib.Asserts;
public class TestFoldComparesCleanup {
// Constants chosen so that fold_compares_helper computes adjusted_lim which overflows negative.
static final int A = -2_000_000_000;
static final int B = 2_000_000_000;
static int test(int z) {
int sum = 0;
if (z > A) sum += 1;
if (z < B) sum += 2;
return sum;
}
public static void main(String[] args) {
for (int i = 0; i < 50_000; i++) {
Asserts.assertEquals(3, test(i));
}
}
}

View File

@ -1776,6 +1776,11 @@ public class IRNode {
trapNodes(PREDICATE_TRAP, "predicate");
}
public static final String RANGE_CHECK = PREFIX + "RANGE_CHECK" + POSTFIX;
static {
beforeMatchingNameRegex(RANGE_CHECK, "RangeCheck");
}
public static final String RANGE_CHECK_TRAP = PREFIX + "RANGE_CHECK_TRAP" + POSTFIX;
static {
trapNodes(RANGE_CHECK_TRAP, "range_check");

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8380579
* @summary Test that C2 match_type_check handles Bool(CmpP(CastPP(LoadKlass(...)), ConP(klass)), eq)
* @run main/othervm
* -XX:-TieredCompilation
* -Xcomp
* -XX:CompileCommand=compileonly,${test.main.class}::test
* ${test.main.class}
*/
package compiler.reflection;
import java.lang.reflect.Array;
public class TestSharpenTypeAfterIfMissingTypeCheckInfo {
public static void main(String[] args) {
for (int i = 0; i < 20_000; i++) {
test(i);
}
}
static boolean test(int i) {
Class componentType;
if (i % 2 == 0) {
componentType = Object.class;
} else {
componentType = Integer.class;
}
Object array = Array.newInstance(componentType, 1);
return array.getClass() == Object[].class;
}
}

View File

@ -122,6 +122,7 @@ public class TestVectorAlgorithms {
testGroups.get("dotProductF").put("dotProductF_loop", i -> { return dotProductF_loop(d.aF, d.bF); });
testGroups.get("dotProductF").put("dotProductF_VectorAPI_naive", i -> { return dotProductF_VectorAPI_naive(d.aF, d.bF); });
testGroups.get("dotProductF").put("dotProductF_VectorAPI_reduction_after_loop", i -> { return dotProductF_VectorAPI_reduction_after_loop(d.aF, d.bF); });
testGroups.get("dotProductF").put("dotProductF_VectorAPI_fma", i -> { return dotProductF_VectorAPI_fma(d.aF, d.bF); });
testGroups.put("hashCodeB", new HashMap<String,TestFunction>());
testGroups.get("hashCodeB").put("hashCodeB_loop", i -> { return hashCodeB_loop(d.aB); });
@ -142,6 +143,12 @@ public class TestVectorAlgorithms {
testGroups.get("findI").put("findI_loop", i -> { return findI_loop(d.aI, d.eI_findI[i]); });
testGroups.get("findI").put("findI_VectorAPI", i -> { return findI_VectorAPI(d.aI, d.eI_findI[i]); });
testGroups.put("mismatchB", new HashMap<String,TestFunction>());
testGroups.get("mismatchB").put("mismatchB_loop", i -> { return d.wrap_mismatchB(i, TestVectorAlgorithms::mismatchB_loop); });
testGroups.get("mismatchB").put("mismatchB_Arrays", i -> { return d.wrap_mismatchB(i, TestVectorAlgorithms::mismatchB_Arrays); });
testGroups.get("mismatchB").put("mismatchB_MemorySegment", i -> { return d.wrap_mismatchB(i, TestVectorAlgorithms::mismatchB_MemorySegment); });
testGroups.get("mismatchB").put("mismatchB_VectorAPI", i -> { return d.wrap_mismatchB(i, TestVectorAlgorithms::mismatchB_VectorAPI); });
testGroups.put("reverseI", new HashMap<String,TestFunction>());
testGroups.get("reverseI").put("reverseI_loop", i -> { return reverseI_loop(d.aI, d.rI1); });
testGroups.get("reverseI").put("reverseI_VectorAPI", i -> { return reverseI_VectorAPI(d.aI, d.rI2); });
@ -192,6 +199,7 @@ public class TestVectorAlgorithms {
"dotProductF_loop",
"dotProductF_VectorAPI_naive",
"dotProductF_VectorAPI_reduction_after_loop",
"dotProductF_VectorAPI_fma",
"hashCodeB_loop",
"hashCodeB_Arrays",
"hashCodeB_VectorAPI_v1",
@ -203,6 +211,10 @@ public class TestVectorAlgorithms {
"findMinIndexI_VectorAPI",
"findI_loop",
"findI_VectorAPI",
"mismatchB_loop",
"mismatchB_Arrays",
"mismatchB_MemorySegment",
"mismatchB_VectorAPI",
"reverseI_loop",
"reverseI_VectorAPI",
"filterI_loop",
@ -409,6 +421,16 @@ public class TestVectorAlgorithms {
return VectorAlgorithmsImpl.dotProductF_VectorAPI_reduction_after_loop(a, b);
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_F, "> 0",
IRNode.ADD_REDUCTION_V, "> 0",
IRNode.FMA_VF, "> 0"},
applyIfCPUFeatureOr = {"avx2", "true", "asimd", "true", "rvv", "true"},
applyIf = {"UseSuperWord", "true"})
public float dotProductF_VectorAPI_fma(float[] a, float[] b) {
return VectorAlgorithmsImpl.dotProductF_VectorAPI_fma(a, b);
}
@Test
public int hashCodeB_loop(byte[] a) {
return VectorAlgorithmsImpl.hashCodeB_loop(a);
@ -509,6 +531,34 @@ public class TestVectorAlgorithms {
return VectorAlgorithmsImpl.findI_VectorAPI(a, e);
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_B, "= 0"})
// Currently does not vectorize, but might in the future.
public static int mismatchB_loop(byte[] a, byte[] b) {
return VectorAlgorithmsImpl.mismatchB_loop(a, b);
}
@Test
// Inlining makes IR rules difficult. Just keep this as a correctness test.
public static int mismatchB_Arrays(byte[] a, byte[] b) {
return VectorAlgorithmsImpl.mismatchB_Arrays(a, b);
}
@Test
// Inlining makes IR rules difficult. Just keep this as a correctness test.
public static int mismatchB_MemorySegment(byte[] a, byte[] b) {
return VectorAlgorithmsImpl.mismatchB_MemorySegment(a, b);
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_B, "> 0",
IRNode.VECTOR_MASK_CMP, "> 0",
IRNode.VECTOR_TEST, "> 0"},
applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
public static int mismatchB_VectorAPI(byte[] a, byte[] b) {
return VectorAlgorithmsImpl.mismatchB_VectorAPI(a, b);
}
@Test
@IR(counts = {IRNode.LOAD_VECTOR_I, "= 0",
IRNode.STORE_VECTOR, "= 0"})

View File

@ -26,6 +26,7 @@ package compiler.vectorization;
import java.util.Arrays;
import java.util.Random;
import java.lang.foreign.MemorySegment;
import jdk.incubator.vector.*;
/**
@ -94,6 +95,15 @@ public class VectorAlgorithmsImpl {
public int[] oopsX4;
public int[] memX4;
// Input for mismatchB
// We set m1B and m2B to have identical data, temporarily edit m2B at one position,
// run the mismatch implementation, and then reset that position. This means we
// perform as little mutation while randomizing the input data.
public byte[] m1B;
public byte[] m2B;
public int[] mismatchB_idx;
public int mismatchB_idx_idx = 0;
public Data(int size, int seed, int numX4Objects, float branchProbability) {
Random random = new Random(seed);
@ -165,6 +175,30 @@ public class VectorAlgorithmsImpl {
? (byte)(random.nextInt(16) + 'A')
: (byte)(random.nextInt(16) + 'a');
}
// Input data for mismatchB
m1B = new byte[size];
m2B = new byte[size];
random.nextBytes(m1B);
System.arraycopy(m1B, 0, m2B, 0, size);
mismatchB_idx = new int[0x10000];
for (int i = 0; i < mismatchB_idx.length; i++) {
// Sometimes make no mutation (-1), sometimes pick index for mutation.
mismatchB_idx[i] = (random.nextInt(10) == 0) ? -1 : random.nextInt(m1B.length);
}
}
public interface MismatchBImpl {
int run(byte[] a, byte[] b);
}
public int wrap_mismatchB(int idx, MismatchBImpl impl) {
int i = mismatchB_idx[idx & 0xffff];
if (i != -1) { m2B[i]++; }
int res = impl.run(m1B, m2B);
if (i != -1) { m2B[i]--; }
return res;
}
}
@ -348,6 +382,21 @@ public class VectorAlgorithmsImpl {
return sum;
}
public static float dotProductF_VectorAPI_fma(float[] a, float[] b) {
var sums = FloatVector.broadcast(SPECIES_F, 0.0f);
int i;
for (i = 0; i < SPECIES_F.loopBound(a.length); i += SPECIES_F.length()) {
var va = FloatVector.fromArray(SPECIES_F, a, i);
var vb = FloatVector.fromArray(SPECIES_F, b, i);
sums = va.fma(vb, sums);
}
float sum = sums.reduceLanes(VectorOperators.ADD);
for (; i < a.length; i++) {
sum = Math.fma(a[i], b[i], sum);
}
return sum;
}
public static int hashCodeB_loop(byte[] a) {
int h = 1;
for (int i = 0; i < a.length; i++) {
@ -656,6 +705,44 @@ public class VectorAlgorithmsImpl {
return -1;
}
public static int mismatchB_loop(byte[] a, byte[] b) {
for (int i = 0; i < a.length; i++) {
if (a[i] != b[i]) {
return i;
}
}
return -1;
}
public static int mismatchB_Arrays(byte[] a, byte[] b) {
return Arrays.mismatch(a, b);
}
public static int mismatchB_MemorySegment(byte[] a, byte[] b) {
var aMS = MemorySegment.ofArray(a);
var bMS = MemorySegment.ofArray(b);
return (int) aMS.mismatch(bMS);
}
public static int mismatchB_VectorAPI(byte[] a, byte[] b) {
int i = 0;
for (; i < SPECIES_B.loopBound(a.length); i += SPECIES_B.length()) {
ByteVector va = ByteVector.fromArray(SPECIES_B, a, i);
ByteVector vb = ByteVector.fromArray(SPECIES_B, b, i);
var mask = va.compare(VectorOperators.NE, vb);
if (mask.anyTrue()) {
return i + mask.firstTrue();
}
}
for (; i < a.length; i++) {
if (a[i] != b[i]) {
return i;
}
}
return -1;
}
public static Object reverseI_loop(int[] a, int[] r) {
for (int i = 0; i < a.length; i++) {
r[a.length - i - 1] = a[i];

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8379174
* @summary Test for G1 ergonomics deriving an out-of-range
* G1RemSetArrayOfCardsEntries value from G1RemSetArrayOfCardsEntriesBase
* @requires vm.gc.G1
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions
* -XX:+UseG1GC
* -XX:G1RemSetArrayOfCardsEntriesBase=62117
* -XX:G1HeapRegionSize=4m
* ${test.main.class}
*/
package gc.g1;
public class TestG1RemSetArrayOfCardsEntriesErgoLimit {
public static void main(String[] args) {
System.out.println("passed");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,14 +66,21 @@ public class CanEncode {
Charset cs = Charset.forName(csn);
CharsetEncoder ce = cs.newEncoder();
if (cs.name().equals("US-ASCII")) {
ck(ce, 'x', true);
ck(ce, '\u00B6', false);
ck(ce, "x", true);
ck(ce, "\u00B6", false);
ck(ce, "xyzzy", true);
ck(ce, "xy\u00B6", false);
}
// Basic multilingual plane
boolean utf = csn.startsWith("UTF-");
ck(ce, 'x', true);
ck(ce, '\u00B6', utf);
ck(ce, "", true);
ck(ce, "x", true);
ck(ce, "\u00B6", utf);
ck(ce, "xyzzy", true);
ck(ce, "xy\u00B6", utf);
// Paired surrogates
ck(ce, "\uD83D\uDE00", utf);
ck(ce, "XX\uD83D\uDE00", utf);
ck(ce, "\uD83D\uDE00XX", utf);
ck(ce, "X\uD83D\uDE00X", utf);
// Unpaired surrogates should never be encodable
ck(ce, '\ud800', false);
@ -81,15 +88,36 @@ public class CanEncode {
ck(ce, '\udffe', false);
ck(ce, '\udfff', false);
ck(ce, "\ud800", false);
ck(ce, "XX\ud800", false);
ck(ce, "\ud800XX", false);
ck(ce, "X\ud800X", false);
ck(ce, "\ud801", false);
ck(ce, "XX\ud801", false);
ck(ce, "\ud801XX", false);
ck(ce, "X\ud801X", false);
ck(ce, "\udffe", false);
ck(ce, "XX\udffe", false);
ck(ce, "\udffeXX", false);
ck(ce, "X\udffeX", false);
ck(ce, "\udfff", false);
ck(ce, "XX\udfff", false);
ck(ce, "\udfffXX", false);
ck(ce, "X\udfffX", false);
if (errors > 0) {
throw new RuntimeException(errors + " errors for Charset " + csn);
}
}
public static void main(String[] args) throws Exception {
test("US-ASCII");
test("UTF-8");
test("UTF-16");
test("UTF-16LE");
test("UTF-16BE");
test("UTF-32");
test("UTF-32LE");
test("UTF-32BE");
}
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.zip.ZipEntry;
import java.util.zip.ZipException;
import java.util.zip.ZipOutputStream;
import static java.io.OutputStream.nullOutputStream;
import static org.junit.jupiter.api.Assertions.assertThrows;
/* @test
* @bug 8380452
* @summary Unmappable characters in ZipEntry name or comment should be rejected with ZipException
* @run junit ${test.main.class}
*/
public class UnmappableZipEntryNameOrComment {
// Charset used by any ZipOutputStream in this test
static final Charset CHARSET = StandardCharsets.US_ASCII;
// 'ø' is an unmappable character in US_ASCII
static final String UNMAPPABLE = "\u00f8";
/**
* Verify that calling ZipOutputStream.putNextEntry with an unmappable ZipEntry
* name is rejected with a ZipException.
*
* @throws IOException if an unexpected IO error occurs
*/
@Test
void rejectUnmappableZipEntryName() throws IOException {
ZipEntry e = new ZipEntry(UNMAPPABLE);
try (var out = new ZipOutputStream(nullOutputStream(), CHARSET)) {
assertThrows(ZipException.class, () -> out.putNextEntry(e));
}
}
/**
* Verify that calling ZipOutputStream.putNextEntry with an unmappable ZipEntry
* comment is rejected with a ZipException.
*
* @throws IOException if an unexpected IO error occurs
*/
@Test
void rejectUnmappableZipEntryComment() throws IOException {
ZipEntry e = new ZipEntry("file.txt");
e.setComment(UNMAPPABLE);
try (var out = new ZipOutputStream(nullOutputStream(), CHARSET)) {
assertThrows(ZipException.class, () -> out.putNextEntry(e));
}
}
}

View File

@ -160,5 +160,5 @@ abstract sealed class MockingToolProvider implements ToolProviderCommandMock {
private final String name;
private final Iterator<CommandAction> actionIter;
static ToolProviderCommandMock UNREACHABLE = new MockingToolProvider.NonCompletable("<unreachable>", List.of());
static final ToolProviderCommandMock UNREACHABLE = new MockingToolProvider.NonCompletable("<unreachable>", List.of());
}

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jpackage.test.stdmock;
import jdk.jpackage.internal.EnvironmentProvider;
import java.util.ArrayList;
import java.util.Map;
import java.util.Objects;
public record EnvironmentProviderMock(
Map<String, String> envVariables,
Map<String, String> systemProperties) implements EnvironmentProvider {
public EnvironmentProviderMock {
envVariables.keySet().forEach(Objects::requireNonNull);
envVariables.values().forEach(Objects::requireNonNull);
systemProperties.keySet().forEach(Objects::requireNonNull);
systemProperties.values().forEach(Objects::requireNonNull);
}
@Override
public String getenv(String envVarName) {
return envVariables.get(Objects.requireNonNull(envVarName));
}
@Override
public String getProperty(String propertyName) {
return systemProperties.get(Objects.requireNonNull(propertyName));
}
@Override
public String toString() {
var tokens = new ArrayList<String>();
if (!envVariables.isEmpty()) {
tokens.add(String.format("env=%s", envVariables));
}
if (!systemProperties.isEmpty()) {
tokens.add(String.format("props=%s", systemProperties));
}
return String.join(", ", tokens);
}
}

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jpackage.test.stdmock;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import jdk.jpackage.internal.util.PathUtils;
import jdk.jpackage.test.mock.CommandActionSpec;
import jdk.jpackage.test.mock.CommandActionSpecs;
import jdk.jpackage.test.mock.CommandMockSpec;
public final class WixToolMock {
public CommandMockSpec create() {
Objects.requireNonNull(type);
Objects.requireNonNull(version);
CommandActionSpec action = switch (type) {
case CANDLE3 -> {
yield candleAction(fips, version);
}
case LIGHT3 -> {
yield lightAction(version);
}
case WIX4 -> {
yield wixAction(version);
}
};
var toolPath = Optional.ofNullable(dir).map(d -> {
return d.resolve(type.fileName);
}).orElse(type.fileName);
var mockName = PathUtils.replaceSuffix(toolPath, "");
return new CommandMockSpec(toolPath, mockName, CommandActionSpecs.build().action(action).create());
}
public WixToolMock fips(Boolean v) {
fips = v;
return this;
}
public WixToolMock fips() {
return fips(true);
}
public WixToolMock dir(Path v) {
dir = v;
return this;
}
public WixToolMock version(String v) {
version = v;
return this;
}
public WixToolMock candle(String version) {
return type(WixTool.CANDLE3).version(version);
}
public WixToolMock light(String version) {
return type(WixTool.LIGHT3).version(version);
}
public WixToolMock wix(String version) {
return type(WixTool.WIX4).version(version);
}
private WixToolMock type(WixTool v) {
type = v;
return this;
}
private static CommandActionSpec candleAction(boolean fips, String version) {
Objects.requireNonNull(version);
var sb = new StringBuilder();
sb.append(version);
if (fips) {
sb.append("; fips");
}
return CommandActionSpec.create(sb.toString(), context -> {
if (List.of("-?").equals(context.args())) {
if (fips) {
context.err().println("error CNDL0308 : The Federal Information Processing Standard (FIPS) appears to be enabled on the machine");
return Optional.of(308);
}
} else if (!List.of("-fips").equals(context.args())) {
throw context.unexpectedArguments();
}
var out = context.out();
List.of(
"Windows Installer XML Toolset Compiler version " + version,
"Copyright (c) .NET Foundation and contributors. All rights reserved.",
"",
" usage: candle.exe [-?] [-nologo] [-out outputFile] sourceFile [sourceFile ...] [@responseFile]"
).forEach(out::println);
return Optional.of(0);
});
}
private static CommandActionSpec lightAction(String version) {
Objects.requireNonNull(version);
return CommandActionSpec.create(version, context -> {
if (List.of("-?").equals(context.args())) {
var out = context.out();
List.of(
"Windows Installer XML Toolset Linker version " + version,
"Copyright (c) .NET Foundation and contributors. All rights reserved.",
"",
" usage: light.exe [-?] [-b bindPath] [-nologo] [-out outputFile] objectFile [objectFile ...] [@responseFile]"
).forEach(out::println);
return Optional.of(0);
} else {
throw context.unexpectedArguments();
}
});
}
private static CommandActionSpec wixAction(String version) {
Objects.requireNonNull(version);
return CommandActionSpec.create(version, context -> {
if (List.of("--version").equals(context.args())) {
context.out().println(version);
return Optional.of(0);
} else {
throw context.unexpectedArguments();
}
});
}
private enum WixTool {
CANDLE3("candle"),
LIGHT3("light"),
WIX4("wix"),
;
WixTool(String name) {
this.fileName = Path.of(Objects.requireNonNull(name) + ".exe");
}
final Path fileName;
}
private Path dir;
private WixTool type;
private String version;
private boolean fips;
}

View File

@ -0,0 +1,213 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jpackage.internal.util;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrowsExactly;
import java.nio.file.Path;
import java.util.Optional;
import java.util.function.UnaryOperator;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
class PathUtilsTest {
@ParameterizedTest
@CsvSource({
"foo,''",
"foo.bar,.bar",
"foo..bar,.bar",
".bar,.bar",
"foo.bar.buz,.buz",
".,.",
"...,.",
"..,.",
})
void test_getSuffix(Path path, String expected) {
var suffix = PathUtils.getSuffix(path);
assertEquals(expected, suffix);
}
@Test
void test_getSuffix_null() {
assertThrowsExactly(NullPointerException.class, () -> {
PathUtils.getSuffix(null);
});
}
@ParameterizedTest
@CsvSource({
"foo,'',foo",
"a/b/foo.exe,.ico,a/b/foo.exe.ico",
"foo,bar,foobar",
"'',bar,bar",
".,bar,.bar",
})
void test_addSuffix(Path path, String suffix, Path expected) {
var newPath = PathUtils.addSuffix(path, suffix);
assertEquals(expected, newPath);
}
@Test
void test_addSuffix_null() {
assertThrowsExactly(NullPointerException.class, () -> {
PathUtils.addSuffix(null, "foo");
});
assertThrowsExactly(NullPointerException.class, () -> {
PathUtils.addSuffix(Path.of("foo"), null);
});
}
@ParameterizedTest
@CsvSource({
"foo.exe,.ico,foo.ico",
"foo.exe,,foo",
"foo.exe,'',foo",
"a/b/foo.exe,.ico,a/b/foo.ico",
"foo,'',foo",
"foo,bar,foobar",
"'',bar,bar",
".,bar,bar",
".,.bar,.bar",
})
void test_replaceSuffix(Path path, String newSuffix, Path expected) {
var newPath = PathUtils.replaceSuffix(path, newSuffix);
assertEquals(expected, newPath);
}
@Test
void test_replaceSuffix_null() {
assertThrowsExactly(NullPointerException.class, () -> {
PathUtils.replaceSuffix(null, "foo");
});
assertEquals(Path.of("foo"), PathUtils.replaceSuffix(Path.of("foo.a"), null));
}
@ParameterizedTest
@CsvSource({
"IDENTITY,a,a",
"IDENTITY,,",
"RETURN_NULL,a,",
"RETURN_NULL,,",
"FOO,a,foo",
"FOO,,",
})
void test_mapNullablePath(PathMapper mapper, Path path, Path expected) {
var newPath = PathUtils.mapNullablePath(mapper, path);
assertEquals(expected, newPath);
}
@Test
void test_mapNullablePath_null() {
assertThrowsExactly(NullPointerException.class, () -> {
PathUtils.mapNullablePath(null, Path.of(""));
});
}
@ParameterizedTest
@CsvSource(nullValues = {"N/A"}, value = {
"foo.exe",
"N/A",
})
void test_normalizedAbsolutePath(Path path) {
var newPath = PathUtils.normalizedAbsolutePath(path);
var expected = Optional.ofNullable(path).map(v -> {
return v.normalize().toAbsolutePath();
}).orElse(null);
assertEquals(expected, newPath);
}
@ParameterizedTest
@CsvSource(nullValues = {"N/A"}, value = {
"foo.exe",
"N/A",
})
void test_normalizedAbsolutePathString(Path path) {
var newPath = PathUtils.normalizedAbsolutePathString(path);
var expected = Optional.ofNullable(path).map(v -> {
return v.normalize().toAbsolutePath().toString();
}).orElse(null);
assertEquals(expected, newPath);
}
@ParameterizedTest
@CsvSource(nullValues = {"N/A"}, value = {
"N/A",
"foo",
"*",
":",
})
void test_asPath(String str) {
var path = PathUtils.asPath(str);
var expected = Optional.ofNullable(str).flatMap(v -> {
return Result.of(() -> {
return Path.of(v);
}).value();
});
assertEquals(expected, path);
}
enum PathMapper implements UnaryOperator<Path> {
IDENTITY {
@Override
public Path apply(Path path) {
return path;
}
},
RETURN_NULL {
@Override
public Path apply(Path path) {
return null;
}
},
FOO {
@Override
public Path apply(Path path) {
return Path.of("foo");
}
},
;
}
}

View File

@ -0,0 +1,754 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.jpackage.internal;
import static java.util.stream.Collectors.toMap;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import jdk.jpackage.internal.WixTool.ToolInfo;
import jdk.jpackage.internal.WixToolset.WixToolsetType;
import jdk.jpackage.internal.model.ConfigException;
import jdk.jpackage.internal.util.TokenReplace;
import jdk.jpackage.test.CannedFormattedString;
import jdk.jpackage.test.JPackageStringBundle;
import jdk.jpackage.test.mock.CommandActionSpecs;
import jdk.jpackage.test.mock.CommandMock;
import jdk.jpackage.test.mock.CommandMockSpec;
import jdk.jpackage.test.mock.Script;
import jdk.jpackage.test.stdmock.EnvironmentProviderMock;
import jdk.jpackage.test.stdmock.JPackageMockUtils;
import jdk.jpackage.test.stdmock.WixToolMock;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;
class WixToolTest {
@ParameterizedTest
@MethodSource
void testLookup(TestSpec spec, @TempDir Path workDir) throws IOException {
spec.run(workDir);
}
@ParameterizedTest
@MethodSource
void testLookupDirs(EnvironmentTestSpec spec, @TempDir Path workDir) throws IOException {
spec.run(workDir);
}
private static Collection<TestSpec> testLookup() {
List<TestSpec> testCases = new ArrayList<>();
Consumer<TestSpec.Builder> appendTestCases = builder -> {
testCases.add(builder.create());
};
Stream.of(
// Simple WiX3 of a minimal acceptable version
TestSpec.build()
.expect(toolset().version("3.0").put(WixToolsetType.Wix3, "foo"))
.tool(tool("foo").candle("3.0"))
.tool(tool("foo").light("3.0")),
// Simple WiX3 with FIPS
TestSpec.build()
.expect(toolset().version("3.14.1.8722").put(WixToolsetType.Wix3, "foo").fips())
.tool(tool("foo").candle("3.14.1.8722").fips())
.tool(tool("foo").light("3.14.1.8722")),
// Simple WiX4+ of a minimal acceptable version
TestSpec.build()
.expect(toolset().version("4.0.4").put(WixToolsetType.Wix4, "foo"))
.tool(tool("foo").wix("4.0.4")),
// WiX3 with light and candle from different directories and non-existent directory
TestSpec.build()
.expect(toolset().version("3.11.2").put(WixTool.Candle3, "foo").put(WixTool.Light3, "bar"))
.lookupDir("buz")
.tool(tool("foo").candle("3.11.2"))
.tool(tool("bar").light("3.11.2"))
.tool(tool("bar").candle("3.11.1"))
.tool(tool("foo").light("3.11.1")),
// WiX3, WiX4+ same directory
TestSpec.build()
.expect(toolset().version("5.0.2+aa65968c").put(WixToolsetType.Wix4, "foo"))
.tool(tool("foo").candle("3.14.1.8722"))
.tool(tool("foo").light("3.14.1.8722"))
.tool(tool("foo").wix("5.0.2+aa65968c")),
// WiX3 (good), WiX4+ (bad version)
TestSpec.build()
.expect(toolset().version("3.14.1.8722").put(WixToolsetType.Wix3, "foo"))
.tool(tool("foo").candle("3.14.1.8722"))
.tool(tool("foo").light("3.14.1.8722"))
.tool(tool("foo").wix("Blah-blah-blah")),
// WiX3 (incomplete), WiX4+ (good)
TestSpec.build()
.expect(toolset().version("5.0").put(WixToolsetType.Wix4, "foo"))
.tool(tool("foo").candle("3.14.1.8722"))
.tool(tool("foo").wix("5.0")),
// WiX5 in the PATH and in the directory, same version; PATH always wins
TestSpec.build()
.expect(toolset().version("5.0").put(WixToolsetType.Wix4))
.tool(tool().wix("5.0"))
.tool(tool("foo").wix("5.0")),
// WiX5 in the PATH and in the directory; the one in the directory is newer; PATH always wins
TestSpec.build()
.expect(toolset().version("5.0").put(WixToolsetType.Wix4))
.tool(tool().wix("5.0"))
.tool(tool("foo").wix("5.1")),
// WiX5 in the PATH and in the directory; the one in the PATH is newer; PATH always wins
TestSpec.build()
.expect(toolset().version("5.1").put(WixToolsetType.Wix4))
.tool(tool().wix("5.1"))
.tool(tool("foo").wix("5.0")),
// WiX3 in the PATH, WiX3 in the directory; PATH always wins
TestSpec.build()
.expect(toolset().version("3.20").put(WixToolsetType.Wix3))
.tool(tool().candle("3.20"))
.tool(tool().light("3.20"))
.tool(tool("foo").wix("5.0")),
// Old WiX3 in the PATH, WiX3 in the directory
TestSpec.build()
.expect(toolset().version("3.20").put(WixToolsetType.Wix3, "foo"))
.tool(tool().candle("2.9"))
.tool(tool().light("2.9"))
.tool(tool("foo").candle("3.20"))
.tool(tool("foo").light("3.20"))
).forEach(appendTestCases);
for (var oldLightStatus : ToolStatus.values()) {
for (var oldCandleStatus : ToolStatus.values()) {
for (var newLightStatus : ToolStatus.values()) {
for (var newCandleStatus : ToolStatus.values()) {
boolean newGood = ToolStatus.isAllGood(newLightStatus, newCandleStatus);
if (!ToolStatus.isAllGood(oldLightStatus, oldCandleStatus) && !newGood) {
continue;
}
var builder = TestSpec.build();
if (newGood) {
builder.expect(toolset().version("3.14").put(WixToolsetType.Wix3, "new"));
} else {
builder.expect(toolset().version("3.11").put(WixToolsetType.Wix3, "old"));
}
oldCandleStatus.map(tool("old").candle("3.11")).ifPresent(builder::tool);
oldLightStatus.map(tool("old").light("3.11")).ifPresent(builder::tool);
newCandleStatus.map(tool("new").candle("3.14")).ifPresent(builder::tool);
newLightStatus.map(tool("new").light("3.14")).ifPresent(builder::tool);
appendTestCases.accept(builder);
}
}
}
}
Stream.of(
// No WiX tools
TestSpec.build(),
TestSpec.build()
.lookupDir("foo"),
TestSpec.build()
.lookupDir(LOOKUP_IN_PATH),
// Incomplete WiX3: missing candle.exe
TestSpec.build()
.tool(tool("foo").light("3.14.1.8722")),
// Incomplete WiX3: missing light.exe
TestSpec.build()
.tool(tool("foo").candle("3.14.1.8722")),
// Incomplete WiX3: version mismatch of light.exe and candle.exe
TestSpec.build()
.tool(tool("foo").candle("3.14"))
.tool(tool("foo").light("3.15")),
// WiX3 too old
TestSpec.build()
.tool(tool("foo").candle("2.9"))
.tool(tool("foo").light("2.9")),
// WiX4+ too old
TestSpec.build()
.tool(tool("foo").wix("4.0.3"))
).forEach(appendTestCases);
return testCases;
}
private static Collection<EnvironmentTestSpec> testLookupDirs() {
List<EnvironmentTestSpec> testCases = new ArrayList<>();
Stream.of(
EnvironmentTestSpec.build()
.env(EnvironmentVariable.USERPROFILE, "@@/foo")
.expect("@USERPROFILE@/.dotnet/tools"),
EnvironmentTestSpec.build()
.env(SystemProperty.USER_HOME, "@@/bar")
.expect("@user.home@/.dotnet/tools"),
// "USERPROFILE" environment variable and "user.home" system property set to different values,
// the order should be "USERPROFILE" followed by "user.home".
EnvironmentTestSpec.build()
.env(EnvironmentVariable.USERPROFILE, "@@/foo")
.env(SystemProperty.USER_HOME, "@@/bar")
.expect("@USERPROFILE@/.dotnet/tools")
.expect("@user.home@/.dotnet/tools"),
// "USERPROFILE" environment variable and "user.home" system property set to the same value.
EnvironmentTestSpec.build()
.env(EnvironmentVariable.USERPROFILE, "@@/buz")
.env(SystemProperty.USER_HOME, "@@/buz")
.expect("@USERPROFILE@/.dotnet/tools"),
// WiX3: newer versions first; 32bit after 64bit
EnvironmentTestSpec.build()
.standardEnv(EnvironmentVariable.PROGRAM_FILES_X86)
.standardEnv(EnvironmentVariable.PROGRAM_FILES)
.expect(String.format("@%s@/WiX Toolset v3.11/bin", EnvironmentVariable.PROGRAM_FILES_X86.variableName()))
.expect(String.format("@%s@/WiX Toolset v3.10/bin", EnvironmentVariable.PROGRAM_FILES.variableName()))
.expect(String.format("@%s@/WiX Toolset v3.10/bin", EnvironmentVariable.PROGRAM_FILES_X86.variableName())),
// Malformed installation directory should be accepted
EnvironmentTestSpec.build()
.standardEnv(EnvironmentVariable.PROGRAM_FILES_X86)
.expect(String.format("@%s@/WiX Toolset vb/bin", EnvironmentVariable.PROGRAM_FILES_X86.variableName()))
.expect(String.format("@%s@/WiX Toolset va/bin", EnvironmentVariable.PROGRAM_FILES_X86.variableName()))
.expect(String.format("@%s@/WiX Toolset v/bin", EnvironmentVariable.PROGRAM_FILES_X86.variableName())),
// No directories
EnvironmentTestSpec.build()
).map(EnvironmentTestSpec.Builder::create).forEach(testCases::add);
return testCases;
}
private enum ToolStatus {
GOOD,
MISSING,
UNEXPECTED_STDOUT,
;
static boolean isAllGood(ToolStatus... status) {
return Stream.of(status).allMatch(Predicate.isEqual(GOOD));
}
Optional<CommandMockSpec> map(WixToolMock builder) {
return switch (this) {
case MISSING -> {
yield Optional.empty();
}
case UNEXPECTED_STDOUT -> {
var mock = builder.create();
yield Optional.of(new CommandMockSpec(
mock.name(),
mock.mockName(),
CommandActionSpecs.build().stdout("Blah-Blah-Blah").exit().create()));
}
case GOOD -> {
yield Optional.of(builder.create());
}
};
}
}
record TestSpec(
Optional<WixToolset> expected,
List<Path> lookupDirs,
boolean lookupInPATH,
Collection<CommandMockSpec> mocks,
List<CannedFormattedString> expectedErrors) {
TestSpec {
Objects.requireNonNull(expected);
lookupDirs.forEach(Objects::requireNonNull);
mocks.forEach(Objects::requireNonNull);
expectedErrors.forEach(Objects::requireNonNull);
if (expected.isEmpty() == expectedErrors.isEmpty()) {
// It should be either toolset or errors, not both or non both.
throw new IllegalArgumentException();
}
lookupDirs.forEach(WixToolTest::assertIsRelative);
lookupDirs.forEach(path -> {
assertNotEquals(LOOKUP_IN_PATH, path);
});
// Ensure tool paths are unique.
mocks.stream().map(CommandMockSpec::name).collect(toMap(x -> x, x -> x));
}
@Override
public String toString() {
var tokens = new ArrayList<String>();
expected.map(Object::toString).ifPresent(tokens::add);
if (!expectedErrors.isEmpty()) {
tokens.add(String.format("errors=%s", expectedErrors));
}
List<Path> lookupPaths;
if (lookupInPATH) {
lookupPaths = new ArrayList<>();
lookupPaths.add(Path.of("${PATH}"));
lookupPaths.addAll(lookupDirs);
} else {
lookupPaths = lookupDirs;
}
if (!lookupPaths.isEmpty()) {
tokens.add(String.format("lookup-dirs=%s", lookupPaths));
}
if (!mocks.isEmpty()) {
tokens.add(mocks.toString());
}
return String.join(", ", tokens);
}
void run(Path workDir) {
var scriptBuilder = Script.build().commandMockBuilderMutator(CommandMock.Builder::repeatInfinitely);
mocks.stream().map(mockSpec -> {
Path toolPath = mockSpec.name();
if (toolPath.getNameCount() > 1) {
toolPath = workDir.resolve(toolPath);
}
return new CommandMockSpec(toolPath, mockSpec.mockName(), mockSpec.actions());
}).forEach(scriptBuilder::map);
scriptBuilder.map(_ -> true, CommandMock.ioerror("non-existent"));
var script = scriptBuilder.createLoop();
Supplier<WixToolset> createToolset = () -> {
return WixTool.createToolset(() -> {
return lookupDirs.stream().map(workDir::resolve).toList();
}, lookupInPATH());
};
Globals.main(() -> {
JPackageMockUtils.buildJPackage()
.script(script)
.listener(System.out::println)
.applyToGlobals();
expected.ifPresentOrElse(expectedToolset -> {
var toolset = createToolset.get();
assertEquals(resolveAt(expectedToolset, workDir), toolset);
}, () -> {
var ex = assertThrows(RuntimeException.class, createToolset::get);
assertEquals(expectedErrors.getFirst().getValue(), ex.getMessage());
if (ex instanceof ConfigException cfgEx) {
assertEquals(expectedErrors.getLast().getValue(), cfgEx.getAdvice());
assertEquals(2, expectedErrors.size());
} else {
assertEquals(1, expectedErrors.size());
}
});
return 0;
});
}
static Builder build() {
return new Builder();
}
static final class Builder {
TestSpec create() {
if (expected == null && expectedErrors.isEmpty()) {
return copy()
.expect("error.no-wix-tools")
.expect("error.no-wix-tools.advice")
.create();
} else {
var allLookupDirs = Stream.concat(
lookupDirs.stream(),
tools.stream().map(CommandMockSpec::name).map(toolPath -> {
if (toolPath.getNameCount() == 1) {
return LOOKUP_IN_PATH;
} else {
return toolPath.getParent();
}
})
).distinct().collect(Collectors.toCollection(ArrayList::new));
var lookupInPATH = allLookupDirs.contains(LOOKUP_IN_PATH);
if (lookupInPATH) {
allLookupDirs.remove(LOOKUP_IN_PATH);
}
return new TestSpec(
Optional.ofNullable(expected),
Collections.unmodifiableList(allLookupDirs),
lookupInPATH,
List.copyOf(tools),
List.copyOf(expectedErrors));
}
}
Builder copy() {
return new Builder(this);
}
private Builder() {
expectedErrors = new ArrayList<>();
lookupDirs = new ArrayList<>();
tools = new ArrayList<>();
}
private Builder(Builder other) {
expected = other.expected;
expectedErrors = new ArrayList<>(other.expectedErrors);
lookupDirs = new ArrayList<>(other.lookupDirs);
tools = new ArrayList<>(other.tools);
}
Builder expect(WixToolset v) {
expected = v;
return this;
}
Builder expect(String formatKey, Object ... args) {
expectedErrors.add(JPackageStringBundle.MAIN.cannedFormattedString(formatKey, args));
return this;
}
Builder expect(WixToolsetBuilder builder) {
return expect(builder.create());
}
Builder lookupDir(String v) {
return lookupDir(Path.of(v));
}
Builder lookupDir(Path v) {
lookupDirs.add(Objects.requireNonNull(v));
return this;
}
Builder tool(CommandMockSpec v) {
tools.add(Objects.requireNonNull(v));
return this;
}
Builder tool(WixToolMock v) {
return tool(v.create());
}
private WixToolset expected;
private final List<CannedFormattedString> expectedErrors;
private final List<Path> lookupDirs;
private final List<CommandMockSpec> tools;
}
}
private static final class WixToolsetBuilder {
WixToolset create() {
return new WixToolset(tools.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> {
ToolInfo toolInfo = new WixTool.DefaultToolInfo(e.getValue(), version);
if (e.getKey() == WixTool.Candle3) {
toolInfo = new WixTool.DefaultCandleInfo(toolInfo, fips);
}
return toolInfo;
})));
}
WixToolsetBuilder version(String v) {
version = v;
return this;
}
WixToolsetBuilder put(WixTool tool, String path) {
return put(tool, Path.of(path));
}
WixToolsetBuilder put(WixTool tool, Path path) {
tools.put(Objects.requireNonNull(tool), path.resolve(tool.fileName()));
return this;
}
WixToolsetBuilder put(WixTool tool) {
return put(tool, LOOKUP_IN_PATH);
}
WixToolsetBuilder put(WixToolsetType type, Path path) {
type.getTools().forEach(tool -> {
put(tool, path);
});
return this;
}
WixToolsetBuilder put(WixToolsetType type, String path) {
return put(type, Path.of(path));
}
WixToolsetBuilder put(WixToolsetType type) {
return put(type, LOOKUP_IN_PATH);
}
WixToolsetBuilder fips(boolean v) {
fips = true;
return this;
}
WixToolsetBuilder fips() {
return fips(true);
}
private Map<WixTool, Path> tools = new HashMap<>();
private boolean fips;
private String version;
}
enum EnvironmentVariable {
USERPROFILE("USERPROFILE"),
PROGRAM_FILES("ProgramFiles"),
PROGRAM_FILES_X86("ProgramFiles(x86)"),
SYSTEM_DRIVE("SystemDrive"),
;
EnvironmentVariable(String variableName) {
this.variableName = Objects.requireNonNull(variableName);
}
String variableName() {
return variableName;
}
private final String variableName;
}
enum SystemProperty {
USER_HOME("user.home"),
;
SystemProperty(String propertyName) {
this.propertyName = Objects.requireNonNull(propertyName);
}
String propertyName() {
return propertyName;
}
private final String propertyName;
}
record EnvironmentTestSpec(EnvironmentProviderMock env, List<Path> expectedDirs) {
EnvironmentTestSpec {
Objects.requireNonNull(env);
expectedDirs.forEach(dir -> {
if (dir.isAbsolute()) {
throw new IllegalArgumentException();
}
});
}
@Override
public String toString() {
var tokens = new ArrayList<String>();
tokens.add(String.format("expect=%s", expectedDirs));
tokens.add(env.toString());
return String.join(", ", tokens);
}
void run(Path workDir) throws IOException {
var allResolved = resolve(workDir, Stream.of(
env.envVariables().entrySet().stream(),
env.systemProperties().entrySet().stream(),
expectedDirs.stream().map(Path::toString).map(dir -> {
return Map.entry(dir, dir);
})
).flatMap(x -> x).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)));
Function<Supplier<Map<String, String>>, Map<String, String>> filterAllResolved = filterSupplier -> {
var filter = filterSupplier.get();
return allResolved.entrySet().stream().filter(e -> {
return filter.containsKey(e.getKey());
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
};
var resolvedEnv = new EnvironmentProviderMock(
filterAllResolved.apply(env::envVariables),
filterAllResolved.apply(env::systemProperties));
var resolvedDirs = expectedDirs.stream().map(Path::toString).map(allResolved::get).map(Path::of).toList();
for (var dir : resolvedDirs) {
Files.createDirectories(dir);
}
Globals.main(() -> {
Globals.instance().system(resolvedEnv);
assertEquals(resolvedDirs, WixTool.findWixInstallDirs());
return 0;
});
}
static Builder build() {
return new Builder();
}
static final class Builder {
EnvironmentTestSpec create() {
var env = envVariables.entrySet().stream().collect(Collectors.toMap(e -> {
return e.getKey().variableName();
}, Map.Entry::getValue));
var props = systemProperties.entrySet().stream().collect(Collectors.toMap(e -> {
return e.getKey().propertyName();
}, Map.Entry::getValue));
return new EnvironmentTestSpec(new EnvironmentProviderMock(env, props), List.copyOf(expectedDirs));
}
Builder expect(List<Path> dirs) {
expectedDirs.addAll(dirs);
return this;
}
Builder expect(Path... dirs) {
return expect(List.of(dirs));
}
Builder expect(String... dirs) {
return expect(List.of(dirs).stream().map(Path::of).toList());
}
Builder env(SystemProperty k, String v) {
systemProperties.put(Objects.requireNonNull(k), Objects.requireNonNull(v));
return this;
}
Builder env(EnvironmentVariable k, String v) {
envVariables.put(Objects.requireNonNull(k), Objects.requireNonNull(v));
return this;
}
Builder standardEnv(EnvironmentVariable k) {
var value = switch (k) {
case PROGRAM_FILES -> "Program Files";
case PROGRAM_FILES_X86 -> "Program Files(x86)";
default -> {
throw new IllegalArgumentException();
}
};
return env(k, "@@/" + value);
}
private final Map<EnvironmentVariable, String> envVariables = new HashMap<>();
private final Map<SystemProperty, String> systemProperties = new HashMap<>();
private final List<Path> expectedDirs = new ArrayList<>();
}
private static Map<String, String> resolve(Path workDir, Map<String, String> props) {
var tokens = new ArrayList<String>();
Stream.of(
Stream.of(EnvironmentVariable.values()).map(EnvironmentVariable::variableName),
Stream.of(SystemProperty.values()).map(SystemProperty::propertyName)
).flatMap(x -> x).map(str -> {
return String.format("@%s@", str);
}).forEach(tokens::add);
tokens.add(TOKEN_WORKDIR);
var tokenReplace = new TokenReplace(tokens.toArray(String[]::new));
return props.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> {
return tokenReplace.recursiveApplyTo(e.getValue(), token -> {
if (token.equals(TOKEN_WORKDIR)) {
return workDir;
} else {
return Objects.requireNonNull(props.get(token.substring(1, token.length() - 1)), () -> {
return String.format("Unrecognized token: [%s]", token);
});
}
});
}));
}
static final String TOKEN_WORKDIR = "@@";
}
private static WixToolsetBuilder toolset() {
return new WixToolsetBuilder();
}
private static WixToolMock tool() {
return new WixToolMock();
}
private static WixToolMock tool(Path dir) {
return tool().dir(dir);
}
private static WixToolMock tool(String dir) {
return tool(Path.of(dir));
}
private static WixToolset resolveAt(WixToolset toolset, Path root) {
return new WixToolset(toolset.tools().entrySet().stream().collect(toMap(Map.Entry::getKey, e -> {
var toolInfo = e.getValue();
assertIsRelative(toolInfo.path());
if (toolInfo.path().getNameCount() == 1) {
// The tool is picked from the PATH.
return toolInfo;
}
ToolInfo newToolInfo = new WixTool.DefaultToolInfo(root.resolve(toolInfo.path()), toolInfo.version());
if (toolInfo instanceof WixTool.CandleInfo candleInfo) {
newToolInfo = new WixTool.DefaultCandleInfo(newToolInfo, candleInfo.fips());
}
return newToolInfo;
})));
}
private static void assertIsRelative(Path path) {
if (path.isAbsolute()) {
throw new IllegalArgumentException();
}
}
static final Path LOOKUP_IN_PATH = Path.of("");
}

View File

@ -45,3 +45,15 @@
* jdk/jpackage/internal/wixui/UISpecTest.java
* @run junit jdk.jpackage/jdk.jpackage.internal.wixui.UISpecTest
*/
/* @test
* @summary Test WiX Toolset lookup algorithm
* @requires (os.family == "windows")
* @library /test/jdk/tools/jpackage/helpers
* @build jdk.jpackage.test.*
* @build jdk.jpackage.test.mock.*
* @build jdk.jpackage.test.stdmock.*
* @compile/module=jdk.jpackage -Xlint:all -Werror
* jdk/jpackage/internal/WixToolTest.java
* @run junit jdk.jpackage/jdk.jpackage.internal.WixToolTest
*/

View File

@ -65,6 +65,9 @@ public class CharsetCanEncode {
// sun.nio.cs.UTF_16LE
private CharsetEncoder utf16le = Charset.forName("UTF-16LE").newEncoder();
// sun.nio.cs.UTF_32LE
private CharsetEncoder utf32le = Charset.forName("UTF-32LE").newEncoder();
@Benchmark
public boolean asciiCanEncodeCharYes() {
return ascii.canEncode('D');
@ -184,4 +187,24 @@ public class CharsetCanEncode {
public boolean utf16leCanEncodeStringNo() {
return utf16le.canEncode(String.valueOf(Character.MIN_SURROGATE));
}
@Benchmark
public boolean utf32leCanEncodeCharYes() {
return utf32le.canEncode('D');
}
@Benchmark
public boolean utf32leCanEncodeStringYes() {
return utf32le.canEncode("D");
}
@Benchmark
public boolean utf32leCanEncodeCharNo() {
return utf32le.canEncode(Character.MIN_SURROGATE);
}
@Benchmark
public boolean utf32leCanEncodeStringNo() {
return utf32le.canEncode(String.valueOf(Character.MIN_SURROGATE));
}
}

View File

@ -165,6 +165,11 @@ public class VectorAlgorithms {
return VectorAlgorithmsImpl.dotProductF_VectorAPI_reduction_after_loop(d.aF, d.bF);
}
@Benchmark
public float dotProductF_VectorAPI_fma() {
return VectorAlgorithmsImpl.dotProductF_VectorAPI_fma(d.aF, d.bF);
}
@Benchmark
public int hashCodeB_loop() {
return VectorAlgorithmsImpl.hashCodeB_loop(d.aB);
@ -227,6 +232,26 @@ public class VectorAlgorithms {
return VectorAlgorithmsImpl.findI_VectorAPI(d.aI, e);
}
@Benchmark
public int mismatchB_loop() {
return d.wrap_mismatchB(d.mismatchB_idx_idx++, VectorAlgorithmsImpl::mismatchB_loop);
}
@Benchmark
public int mismatchB_Arrays() {
return d.wrap_mismatchB(d.mismatchB_idx_idx++, VectorAlgorithmsImpl::mismatchB_Arrays);
}
@Benchmark
public int mismatchB_MemorySegment() {
return d.wrap_mismatchB(d.mismatchB_idx_idx++, VectorAlgorithmsImpl::mismatchB_MemorySegment);
}
@Benchmark
public int mismatchB_VectorAPI() {
return d.wrap_mismatchB(d.mismatchB_idx_idx++, VectorAlgorithmsImpl::mismatchB_VectorAPI);
}
@Benchmark
public Object reverseI_loop() {
return VectorAlgorithmsImpl.reverseI_loop(d.aI, d.rI1);

View File

@ -26,6 +26,7 @@ package org.openjdk.bench.vm.compiler;
import java.util.Arrays;
import java.util.Random;
import java.lang.foreign.MemorySegment;
import jdk.incubator.vector.*;
/**
@ -94,6 +95,15 @@ public class VectorAlgorithmsImpl {
public int[] oopsX4;
public int[] memX4;
// Input for mismatchB
// We set m1B and m2B to have identical data, temporarily edit m2B at one position,
// run the mismatch implementation, and then reset that position. This means we
// perform as little mutation while randomizing the input data.
public byte[] m1B;
public byte[] m2B;
public int[] mismatchB_idx;
public int mismatchB_idx_idx = 0;
public Data(int size, int seed, int numX4Objects, float branchProbability) {
Random random = new Random(seed);
@ -165,6 +175,30 @@ public class VectorAlgorithmsImpl {
? (byte)(random.nextInt(16) + 'A')
: (byte)(random.nextInt(16) + 'a');
}
// Input data for mismatchB
m1B = new byte[size];
m2B = new byte[size];
random.nextBytes(m1B);
System.arraycopy(m1B, 0, m2B, 0, size);
mismatchB_idx = new int[0x10000];
for (int i = 0; i < mismatchB_idx.length; i++) {
// Sometimes make no mutation (-1), sometimes pick index for mutation.
mismatchB_idx[i] = (random.nextInt(10) == 0) ? -1 : random.nextInt(m1B.length);
}
}
public interface MismatchBImpl {
int run(byte[] a, byte[] b);
}
public int wrap_mismatchB(int idx, MismatchBImpl impl) {
int i = mismatchB_idx[idx & 0xffff];
if (i != -1) { m2B[i]++; }
int res = impl.run(m1B, m2B);
if (i != -1) { m2B[i]--; }
return res;
}
}
@ -348,6 +382,21 @@ public class VectorAlgorithmsImpl {
return sum;
}
public static float dotProductF_VectorAPI_fma(float[] a, float[] b) {
var sums = FloatVector.broadcast(SPECIES_F, 0.0f);
int i;
for (i = 0; i < SPECIES_F.loopBound(a.length); i += SPECIES_F.length()) {
var va = FloatVector.fromArray(SPECIES_F, a, i);
var vb = FloatVector.fromArray(SPECIES_F, b, i);
sums = va.fma(vb, sums);
}
float sum = sums.reduceLanes(VectorOperators.ADD);
for (; i < a.length; i++) {
sum = Math.fma(a[i], b[i], sum);
}
return sum;
}
public static int hashCodeB_loop(byte[] a) {
int h = 1;
for (int i = 0; i < a.length; i++) {
@ -656,6 +705,44 @@ public class VectorAlgorithmsImpl {
return -1;
}
public static int mismatchB_loop(byte[] a, byte[] b) {
for (int i = 0; i < a.length; i++) {
if (a[i] != b[i]) {
return i;
}
}
return -1;
}
public static int mismatchB_Arrays(byte[] a, byte[] b) {
return Arrays.mismatch(a, b);
}
public static int mismatchB_MemorySegment(byte[] a, byte[] b) {
var aMS = MemorySegment.ofArray(a);
var bMS = MemorySegment.ofArray(b);
return (int) aMS.mismatch(bMS);
}
public static int mismatchB_VectorAPI(byte[] a, byte[] b) {
int i = 0;
for (; i < SPECIES_B.loopBound(a.length); i += SPECIES_B.length()) {
ByteVector va = ByteVector.fromArray(SPECIES_B, a, i);
ByteVector vb = ByteVector.fromArray(SPECIES_B, b, i);
var mask = va.compare(VectorOperators.NE, vb);
if (mask.anyTrue()) {
return i + mask.firstTrue();
}
}
for (; i < a.length; i++) {
if (a[i] != b[i]) {
return i;
}
}
return -1;
}
public static Object reverseI_loop(int[] a, int[] r) {
for (int i = 0; i < a.length; i++) {
r[a.length - i - 1] = a[i];